prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 14:49:24 2021
@author: cghiaus
Tutorial 03: Cube with 2 walls and feed-back
https://unicode-table.com/en/
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import dm4bem
# Physical values
# ===============
# P-controler gain
Kp = 1e4 # almost perfect controller Kp -> ∞
Kp = 1e-3 # no controller Kp -> 0
# Geometry
# --------
l = 3 # m length of the cubic room
Va = l**3 # m³ volume of air
ACH = 1 # air changes per hour
Va_dot = ACH * Va / 3600 # m³/s air infiltration
# Thermophyscal properties
# ------------------------
air = {'Density': 1.2, # kg/m³
'Specific heat': 1000} # J/kg.K
""" Incropera et al. (2011) Fundamantals of heat and mass transfer, 7 ed,
Table A3,
concrete (stone mix) p. 993
glass plate p.993
insulation polystyrene extruded (R-12) p.990"""
wall = {'Conductivity': [1.4, 0.027, 1.4], # W/m.K
'Density': [2300, 55, 2500], # kg/m³
'Specific heat': [880, 1210, 750], # J/kg.K
'Width': [0.2, 0.08, 0.004],
'Surface': [5 * l**2, 5 * l**2, l**2], # m²
'Slice': [4, 2, 1]} # number of meshes
wall = pd.DataFrame(wall, index=['Concrete', 'Insulation', 'Glass'])
# Radiative properties
# --------------------
""" concrete EngToolbox Emissivity Coefficient Materials """
ε_wLW = 0.7 # long wave wall emmisivity
""" grey to dark surface EngToolbox,
Absorbed Solar Radiation by Surface Color """
α_wSW = 0.1 # absortivity white surface
""" Glass, pyrex EngToolbox Absorbed Solar Radiation by Surface Color """
ε_gLW = 0.7 # long wave glass emmisivity
""" EngToolbox Optical properties of some typical glazing mat
Window glass
https://www.nationalglass.com.au/wp-content/uploads/2019/06/Glass-Data_v4-Low-Res.pdf
Conduction and Radiation of Thermal Energy
https://www.glewengineering.com/window-energy-efficiency-solar-heat-gain-and-visible-transmittance/
"""
τ_gSW = 0.30 # short wave glass transmitance
α_gSW = 0.05 # short wave glass absortivity
σ = 5.67e-8 # W/m².K⁴ Stefan-Bolzmann constant
Fwg = 1 / 5 # view factor wall - glass
Tm = 22 + 273 # mean temp for radiative exchange
# convection coefficients, W/m² K
h = pd.DataFrame([{'in': 4., 'out': 10}])
# Thermal circuit
# ===============
# Thermal conductances
# Conduction
G_cd = wall['Conductivity'] / wall['Width'] * wall['Surface']
# Convection
Gw = h * wall['Surface'][0] # wall
Gg = h * wall['Surface'][2] # glass
# Long-wave radiation exchnage
GLW1 = ε_wLW / (1 - ε_wLW) * wall['Surface']['Insulation'] * 4 * σ * Tm**3
GLW2 = Fwg * wall['Surface']['Insulation'] * 4 * σ * Tm**3
GLW3 = ε_gLW / (1 - ε_gLW) * wall['Surface']['Glass'] * 4 * σ * Tm**3
# long-wave exg. wall-glass
GLW = 1 / (1 / GLW1 + 1 / GLW2 + 1 / GLW3)
# ventilation & advection
Gv = Va_dot * air['Density'] * air['Specific heat']
# glass: convection outdoor & conduction
Ggs = float(1 / (1 / Gg['out'] + 1 / (2 * G_cd['Glass'])))
# Thermal capacities
C = wall['Density'] * wall['Specific heat'] * wall['Surface'] * wall['Width']
C['Air'] = air['Density'] * air['Specific heat'] * Va
# Incidence matrix
A = np.zeros([12, 8])
A[0, 0] = 1
A[1, 0], A[1, 1] = -1, 1
A[2, 1], A[2, 2] = -1, 1
A[3, 2], A[3, 3] = -1, 1
A[4, 3], A[4, 4] = -1, 1
A[5, 4], A[5, 5] = -1, 1
A[6, 4], A[6, 6] = -1, 1
A[7, 5], A[7, 6] = -1, 1
A[8, 7] = 1
A[9, 5], A[9, 7] = 1, -1
A[10, 6] = 1
A[11, 6] = 1
G = np.diag([Gw.iloc[0]['out'], 2 * G_cd['Concrete'], 2 * G_cd['Concrete'],
2 * G_cd['Insulation'], 2 * G_cd['Insulation'],
GLW, Gw.iloc[0]['in'], Gg.iloc[0]['in'], Ggs,
2 * G_cd['Glass'], Gv, Kp])
C = np.diag([0, C['Concrete'], 0, C['Insulation'], 0, 0,
C['Air'], C['Glass']])
# C = np.diag([0, C['Concrete'], 0, C['Insulation'], 0, 0, 0, 0])
b = np.zeros(12)
b[[0, 8, 10, 11]] = 10 + np.array([0, 80, 100, 110])
f = np.zeros(8)
f[[0, 4, 6, 7]] = 1000 + np.array([0, 4000, 6000, 7000])
y = np.ones(8)
u = np.hstack([b[np.nonzero(b)], f[np.nonzero(f)]])
# Thermal circuit -> state-space
# ==============================
[As, Bs, Cs, Ds] = dm4bem.tc2ss(A, G, b, C, f, y)
# Test: comparison steady-state of thermal circuit and state-space
ytc = np.linalg.inv(A.T @ G @ A) @ (A.T @ G @ b + f)
yss = (-Cs @ np.linalg.inv(As) @ Bs + Ds) @ u
print(np.array_str(yss, precision=3, suppress_small=True))
print(np.array_str(ytc, precision=3, suppress_small=True))
print(f'Max error in steady-state between thermal circuit and state-space:\
{max(abs(yss - ytc)):.2e}')
# Dynamic simulation
# ==================
# Thermal circuit -> state-space with 1 for b, f, y
b = np.zeros(12)
b[[0, 8, 10, 11]] = 1
f = np.zeros(8)
f[[0, 4, 6, 7]] = 1
y = np.zeros(8)
y[[6]] = 1
[As, Bs, Cs, Ds] = dm4bem.tc2ss(A, G, b, C, f, y)
# Maximum time-step
dtmax = min(-2. / np.linalg.eig(As)[0])
print(f'Maximum time step: {dtmax:.2f} s')
dt = 5
dt = 360
print(f'Time step: {dt:.2f} s')
# Step response
# -------------
duration = 3600 * 24 * 1 # [s]
# number of steps
n = int(np.floor(duration / dt))
t = np.arange(0, n * dt, dt) # time
# Vectors of state and input (in time)
n_tC = As.shape[0] # no of state variables (temps with capacity)
# u = [To To To Tsp Phio Phii Qaux Phia]
u = np.zeros([8, n])
u[0:3, :] = np.ones([3, n])
# initial values for temperatures obtained by explicit and implicit Euler
temp_exp = np.zeros([n_tC, t.shape[0]])
temp_imp = np.zeros([n_tC, t.shape[0]])
I = np.eye(n_tC)
for k in range(n - 1):
temp_exp[:, k + 1] = (I + dt * As) @\
temp_exp[:, k] + dt * Bs @ u[:, k]
temp_imp[:, k + 1] = np.linalg.inv(I - dt * As) @\
(temp_imp[:, k] + dt * Bs @ u[:, k])
y_exp = Cs @ temp_exp + Ds @ u
y_imp = Cs @ temp_imp + Ds @ u
fig, axs = plt.subplots(3, 1)
axs[0].plot(t / 3600, y_exp.T, t / 3600, y_imp.T)
axs[0].set(ylabel='$T_i$ [°C]', title='Step input: To = 1°C')
# Simulation with weather data
# ----------------------------
filename = 'FRA_Lyon.074810_IWEC.epw'
start_date = '2000-01-03 12:00:00'
end_date = '2000-01-04 18:00:00'
start_date = '2000-07-01 12:00:00'
end_date = '2000-07-15 18:00:00'
# Read weather data from Energyplus .epw file
[data, meta] = dm4bem.read_epw(filename, coerce_year=None)
weather = data[["temp_air", "dir_n_rad", "dif_h_rad"]]
del data
weather.index = weather.index.map(lambda t: t.replace(year=2000))
weather = weather[(weather.index >= start_date) & (
weather.index < end_date)]
# Solar radiation on a tilted surface
surface_orientation = {'slope': 90,
'azimuth': 0,
'latitude': 45}
albedo = 0.2
rad_surf1 = dm4bem.sol_rad_tilt_surf(weather, surface_orientation, albedo)
rad_surf1['Φt1'] = rad_surf1.sum(axis=1)
# Interpolate weather data for time step dt
data = pd.concat([weather['temp_air'], rad_surf1['Φt1']], axis=1)
data = data.resample(str(dt) + 'S').interpolate(method='linear')
data = data.rename(columns={'temp_air': 'To'})
# Indoor temperature set-point
data['Ti'] = 20 * np.ones(data.shape[0])
# Indoor auxiliary heat flow rate
data['Qa'] = 0 * np.ones(data.shape[0])
# Flow-rate sources for SW radiation
S = np.array([[wall['Surface']['Insulation'], 0],
[0, wall['Surface']['Glass']]])
# view factor
F = np.array([[1 - Fwg, Fwg],
[1, 0]])
ρSW = np.array([[1 - α_wSW, 0],
[0, 1 - α_gSW - τ_gSW]])
Eow = τ_gSW * wall['Surface'][
'Glass'] / wall['Surface']['Concrete'] * data['Φt1']
Eog = np.zeros_like(Eow)
Eo = np.array([Eow, Eog])
E = np.linalg.inv(np.eye(np.shape(Eo)[0]) - ρSW @ F) @ Eo
Φ = S @ E
Φi = p
|
d.Series(α_wSW * Φ[0], index=data.index)
|
pandas.Series
|
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
from sklearn.inspection import partial_dependence
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from sklearn import svm
from sklearn.datasets import load_boston
from articles.pd.support import load_rent, load_bulldozer, load_flights, \
toy_weather_data, toy_weight_data, \
df_cat_to_catcode, df_split_dates, \
df_string_to_cat, synthetic_interaction_data
from stratx import plot_stratpd, plot_catstratpd, \
plot_stratpd_gridsearch, plot_catstratpd_gridsearch
from stratx.partdep import partial_dependence
from stratx.plot import marginal_plot_, plot_ice, plot_catice
from stratx.ice import predict_ice, predict_catice, friedman_partial_dependence
import inspect
import matplotlib.patches as mpatches
from collections import OrderedDict
import matplotlib.pyplot as plt
import os
import shap
import xgboost as xgb
from colour import rgb2hex, Color
from dtreeviz.trees import tree, ShadowDecTree
figsize = (2.5, 2)
figsize2 = (3.8, 3.2)
GREY = '#444443'
# This genfigs.py code is just demonstration code to generate figures for the paper.
# There are lots of programming sins committed here; to not take this to be
# our idea of good code. ;)
# For data sources, please see notebooks/examples.ipynb
def addnoise(df, n=1, c=0.5, prefix=''):
if n == 1:
df[f'{prefix}noise'] = np.random.random(len(df)) * c
return
for i in range(n):
df[f'{prefix}noise{i + 1}'] = np.random.random(len(df)) * c
def fix_missing_num(df, colname):
df[colname + '_na'] = pd.isnull(df[colname])
df[colname].fillna(df[colname].median(), inplace=True)
def savefig(filename, pad=0):
plt.tight_layout(pad=pad, w_pad=0, h_pad=0)
plt.savefig(f"images/{filename}.pdf", bbox_inches="tight", pad_inches=0)
# plt.savefig(f"images/{filename}.png", dpi=150)
plt.tight_layout()
plt.show()
plt.close()
def rent():
print(f"----------- {inspect.stack()[0][3]} -----------")
np.random.seed(1) # pick seed for reproducible article images
X,y = load_rent(n=10_000)
df_rent = X.copy()
df_rent['price'] = y
colname = 'bedrooms'
colname = 'bathrooms'
TUNE_RF = False
TUNE_XGB = False
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
if TUNE_RF:
rf, bestparams = tune_RF(X, y) # does CV on entire data set to tune
# bedrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 125}
# validation R^2 0.7873724127323822
# bathrooms
# RF best: {'max_features': 0.3, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.8066593395345907
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=.3,
oob_score=True, n_jobs=-1)
rf.fit(X_train, y_train) # Use training set for plotting
print("RF OOB R^2", rf.oob_score_)
rf_score = rf.score(X_test, y_test)
print("RF validation R^2", rf_score)
if TUNE_XGB:
tuned_parameters = {'n_estimators': [400, 450, 500, 600, 1000],
'learning_rate': [0.008, 0.01, 0.02, 0.05, 0.08, 0.1, 0.11],
'max_depth': [3, 4, 5, 6, 7, 8, 9]}
grid = GridSearchCV(
xgb.XGBRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=2
)
grid.fit(X, y) # does CV on entire data set to tune
print("XGB best:", grid.best_params_)
b = grid.best_estimator_
# bedrooms
# XGB best: {'max_depth': 7, 'n_estimators': 250}
# XGB validation R^2 0.7945797751555217
# bathrooms
# XGB best: {'learning_rate': 0.11, 'max_depth': 6, 'n_estimators': 1000}
# XGB train R^2 0.9834399795800324
# XGB validation R^2 0.8244958014380593
else:
b = xgb.XGBRegressor(n_estimators=1000,
max_depth=6,
learning_rate=.11,
verbose=2,
n_jobs=8)
b.fit(X_train, y_train)
xgb_score = b.score(X_test, y_test)
print("XGB validation R^2", xgb_score)
lm = LinearRegression()
lm.fit(X_train, y_train)
lm_score = lm.score(X_test, y_test)
print("OLS validation R^2", lm_score)
lm.fit(X, y)
model, r2_keras = rent_deep_learning_model(X_train, y_train, X_test, y_test)
fig, axes = plt.subplots(1, 6, figsize=(10, 1.8),
gridspec_kw = {'wspace':0.15})
for i in range(len(axes)):
axes[i].set_xlim(0-.3,4+.3)
axes[i].set_xticks([0,1,2,3,4])
axes[i].set_ylim(1800, 9000)
axes[i].set_yticks([2000,4000,6000,8000])
axes[1].get_yaxis().set_visible(False)
axes[2].get_yaxis().set_visible(False)
axes[3].get_yaxis().set_visible(False)
axes[4].get_yaxis().set_visible(False)
axes[0].set_title("(a) Marginal", fontsize=10)
axes[1].set_title("(b) RF", fontsize=10)
axes[1].text(2,8000, f"$R^2=${rf_score:.3f}", horizontalalignment='center', fontsize=9)
axes[2].set_title("(c) XGBoost", fontsize=10)
axes[2].text(2,8000, f"$R^2=${xgb_score:.3f}", horizontalalignment='center', fontsize=9)
axes[3].set_title("(d) OLS", fontsize=10)
axes[3].text(2,8000, f"$R^2=${lm_score:.3f}", horizontalalignment='center', fontsize=9)
axes[4].set_title("(e) Keras", fontsize=10)
axes[4].text(2,8000, f"$R^2=${r2_keras:.3f}", horizontalalignment='center', fontsize=9)
axes[5].set_title("(f) StratPD", fontsize=10)
avg_per_baths = df_rent.groupby(colname).mean()['price']
axes[0].scatter(df_rent[colname], df_rent['price'], alpha=0.07, s=5)
axes[0].scatter(np.unique(df_rent[colname]), avg_per_baths, s=6, c='black',
label="average price/{colname}")
axes[0].set_ylabel("price") # , fontsize=12)
axes[0].set_xlabel("bathrooms")
axes[0].spines['right'].set_visible(False)
axes[0].spines['top'].set_visible(False)
ice = predict_ice(rf, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[1], show_xlabel=True,
show_ylabel=False)
ice = predict_ice(b, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[2], show_ylabel=False)
ice = predict_ice(lm, X, colname, 'price', numx=30, nlines=100)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[3], show_ylabel=False)
scaler = StandardScaler()
X_train_ = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns)
# y_pred = model.predict(X_)
# print("Keras training R^2", r2_score(y, y_pred)) # y_test in y
ice = predict_ice(model, X_train_, colname, 'price', numx=30, nlines=100)
# replace normalized unique X with unnormalized
ice.iloc[0, :] = np.linspace(np.min(X_train[colname]), np.max(X_train[colname]), 30, endpoint=True)
plot_ice(ice, colname, 'price', alpha=.3, ax=axes[4], show_ylabel=True)
pdpx, pdpy, ignored = \
plot_stratpd(X, y, colname, 'price', ax=axes[5],
pdp_marker_size=6,
show_x_counts=False,
hide_top_right_axes=False,
show_xlabel=True, show_ylabel=False)
print(f"StratPD ignored {ignored} records")
axes[5].yaxis.tick_right()
axes[5].yaxis.set_label_position('right')
axes[5].set_ylim(-250,2250)
axes[5].set_yticks([0,1000,2000])
axes[5].set_ylabel("price")
savefig(f"{colname}_vs_price")
def tune_RF(X, y, verbose=2):
tuned_parameters = {'n_estimators': [50, 100, 125, 150, 200],
'min_samples_leaf': [1, 3, 5, 7],
'max_features': [.1, .3, .5, .7, .9]}
grid = GridSearchCV(
RandomForestRegressor(), tuned_parameters, scoring='r2',
cv=5,
n_jobs=-1,
verbose=verbose
)
grid.fit(X, y) # does CV on entire data set
rf = grid.best_estimator_
print("RF best:", grid.best_params_)
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# rf.fit(X_train, y_train)
# print("validation R^2", rf.score(X_test, y_test))
return rf, grid.best_params_
def plot_with_noise_col(df, colname):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_noise = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_noise']
type = "noise"
fig, axes = plt.subplots(2, 2, figsize=(5, 5), sharey=True, sharex=True)
df = df.copy()
addnoise(df, n=1, c=50, prefix=colname + '_')
X = df[features]
y = df['price']
# STRATPD ON ROW 1
X = df[features]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15, show_xlabel=True,
show_ylabel=False)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_noise]
y = df['price']
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15,
show_ylabel=False)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x, pdp_curve = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(2):
axes[i, j].set_xlim(0, 6)
X = df[features_with_noise]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
uniq_x_, pdp_curve_ = \
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True,
show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def plot_with_dup_col(df, colname, min_samples_leaf):
features = ['bedrooms', 'bathrooms', 'latitude', 'longitude']
features_with_dup = ['bedrooms', 'bathrooms', 'latitude', 'longitude',
colname + '_dup']
fig, axes = plt.subplots(2, 3, figsize=(7.5, 5), sharey=True, sharex=True)
type = "dup"
verbose = False
df = df.copy()
df[colname + '_dup'] = df[colname]
# df_rent[colname+'_dupdup'] = df_rent[colname]
# STRATPD ON ROW 1
X = df[features]
y = df['price']
print(f"shape is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 0], slope_line_alpha=.15,
show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=True,
verbose=verbose)
axes[0, 0].set_ylim(-1000, 5000)
axes[0, 0].set_title(f"StratPD")
X = df[features_with_dup]
y = df['price']
print(f"shape with dup is {X.shape}")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 1], slope_line_alpha=.15, show_ylabel=False,
min_samples_leaf=min_samples_leaf,
verbose=verbose)
axes[0, 1].set_ylim(-1000, 5000)
axes[0, 1].set_title(f"StratPD w/{type} col")
plot_stratpd(X, y, colname, 'price', ax=axes[0, 2], slope_line_alpha=.15, show_xlabel=True,
min_samples_leaf=min_samples_leaf,
show_ylabel=False,
n_trees=15,
max_features=1,
bootstrap=False,
verbose=verbose
)
axes[0, 2].set_ylim(-1000, 5000)
axes[0, 2].set_title(f"StratPD w/{type} col")
axes[0, 2].text(.2, 4000, "ntrees=15")
axes[0, 2].text(.2, 3500, "max features per split=1")
# ICE ON ROW 2
X = df[features]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
# do it w/o dup'd column
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 0], show_xlabel=True)
axes[1, 0].set_ylim(-1000, 5000)
axes[1, 0].set_title(f"FPD/ICE")
for i in range(2):
for j in range(3):
axes[i, j].set_xlim(0, 6)
# with dup'd column
X = df[features_with_dup]
y = df['price']
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True,
n_jobs=-1)
rf.fit(X, y)
ice = predict_ice(rf, X, colname, 'price', nlines=1000)
plot_ice(ice, colname, 'price', alpha=.05, ax=axes[1, 1], show_xlabel=True, show_ylabel=False)
axes[1, 1].set_ylim(-1000, 5000)
axes[1, 1].set_title(f"FPD/ICE w/{type} col")
# print(f"max ICE curve {np.max(pdp_curve):.0f}, max curve with dup {np.max(pdp_curve_):.0f}")
axes[1, 2].set_title(f"FPD/ICE w/{type} col")
axes[1, 2].text(.2, 4000, "Cannot compensate")
axes[1, 2].set_xlabel(colname)
# print(f"max curve {np.max(curve):.0f}, max curve with dup {np.max(curve_):.0f}")
axes[0, 0].get_xaxis().set_visible(False)
axes[0, 1].get_xaxis().set_visible(False)
def rent_ntrees():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
trees = [1, 5, 10, 30]
supervised = True
def onevar(colname, row, yrange=None):
alphas = [.1,.08,.05,.04]
for i, t in enumerate(trees):
plot_stratpd(X, y, colname, 'price', ax=axes[row, i], slope_line_alpha=alphas[i],
# min_samples_leaf=20,
yrange=yrange,
supervised=supervised,
show_ylabel=t == 1,
pdp_marker_size=2 if row==2 else 8,
n_trees=t,
max_features='auto',
bootstrap=True,
verbose=False)
fig, axes = plt.subplots(3, 4, figsize=(8, 6), sharey=True)
for i in range(1, 4):
axes[0, i].get_yaxis().set_visible(False)
axes[1, i].get_yaxis().set_visible(False)
axes[2, i].get_yaxis().set_visible(False)
for i in range(0, 4):
axes[0, i].set_title(f"{trees[i]} trees")
onevar('bedrooms', row=0, yrange=(-500, 4000))
onevar('bathrooms', row=1, yrange=(-500, 4000))
onevar('latitude', row=2, yrange=(-500, 4000))
savefig(f"rent_ntrees")
plt.close()
def meta_boston():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
boston = load_boston()
print(len(boston.data))
df = pd.DataFrame(boston.data, columns=boston.feature_names)
df['MEDV'] = boston.target
X = df.drop('MEDV', axis=1)
y = df['MEDV']
plot_stratpd_gridsearch(X, y, 'AGE', 'MEDV',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10))
# yranges = [(-30, 0), (0, 30), (-8, 8), (-11, 0)]
# for nbins in range(6):
# plot_meta_multivar(X, y, colnames=['LSTAT', 'RM', 'CRIM', 'DIS'], targetname='MEDV',
# nbins=nbins,
# yranges=yranges)
savefig(f"meta_boston_age_medv")
def plot_meta_multivar(X, y, colnames, targetname, nbins, yranges=None):
np.random.seed(1) # pick seed for reproducible article images
min_samples_leaf_values = [2, 5, 10, 30, 50, 100, 200]
nrows = len(colnames)
ncols = len(min_samples_leaf_values)
fig, axes = plt.subplots(nrows, ncols + 2, figsize=((ncols + 2) * 2.5, nrows * 2.5))
if yranges is None:
yranges = [None] * len(colnames)
row = 0
for i, colname in enumerate(colnames):
marginal_plot_(X, y, colname, targetname, ax=axes[row, 0])
col = 2
for msl in min_samples_leaf_values:
print(
f"---------- min_samples_leaf={msl}, nbins={nbins:.2f} ----------- ")
plot_stratpd(X, y, colname, targetname, ax=axes[row, col],
min_samples_leaf=msl,
yrange=yranges[i],
n_trees=1)
axes[row, col].set_title(
f"leafsz={msl}, nbins={nbins:.2f}",
fontsize=9)
col += 1
row += 1
rf = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, oob_score=True)
rf.fit(X, y)
row = 0
for i, colname in enumerate(colnames):
ice = predict_ice(rf, X, colname, targetname)
plot_ice(ice, colname, targetname, ax=axes[row, 1])
row += 1
def unsup_rent():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y = load_rent(n=10_000)
fig, axes = plt.subplots(4, 2, figsize=(4, 8))
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bedrooms', 'price', ax=axes[0, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 0], yrange=(-500,4000),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'bathrooms', 'price', ax=axes[1, 1], yrange=(-500,4000),
slope_line_alpha=.2, supervised=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 0], yrange=(-500,2000),
slope_line_alpha=.2, supervised=False, verbose=True)
plot_stratpd(X, y, 'latitude', 'price', ax=axes[2, 1], yrange=(-500,2000),
slope_line_alpha=.2, supervised=True, verbose=True)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 0], yrange=(-500,500),
slope_line_alpha=.2, supervised=False)
plot_stratpd(X, y, 'longitude', 'price', ax=axes[3, 1], yrange=(-500,500),
slope_line_alpha=.2, supervised=True)
axes[0, 0].set_title("Unsupervised")
axes[0, 1].set_title("Supervised")
for i in range(3):
axes[i, 1].get_yaxis().set_visible(False)
savefig(f"rent_unsup")
plt.close()
def weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
TUNE_RF = False
df_raw = toy_weather_data()
df = df_raw.copy()
df_string_to_cat(df)
names = np.unique(df['state'])
catnames = OrderedDict()
for i,v in enumerate(names):
catnames[i+1] = v
df_cat_to_catcode(df)
X = df.drop('temperature', axis=1)
y = df['temperature']
# cats = catencoders['state'].values
# cats = np.insert(cats, 0, None) # prepend a None for catcode 0
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 5, 'n_estimators': 150}
# validation R^2 0.9500072628270099
else:
rf = RandomForestRegressor(n_estimators=150, min_samples_leaf=5, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
fig, ax = plt.subplots(1, 1, figsize=figsize)
df = df_raw.copy()
avgtmp = df.groupby(['state', 'dayofyear'])[['temperature']].mean()
avgtmp = avgtmp.reset_index()
ca = avgtmp.query('state=="CA"')
co = avgtmp.query('state=="CO"')
az = avgtmp.query('state=="AZ"')
wa = avgtmp.query('state=="WA"')
nv = avgtmp.query('state=="NV"')
ax.plot(ca['dayofyear'], ca['temperature'], lw=.5, c='#fdae61', label="CA")
ax.plot(co['dayofyear'], co['temperature'], lw=.5, c='#225ea8', label="CO")
ax.plot(az['dayofyear'], az['temperature'], lw=.5, c='#41b6c4', label="AZ")
ax.plot(wa['dayofyear'], wa['temperature'], lw=.5, c='#a1dab4', label="WA")
ax.plot(nv['dayofyear'], nv['temperature'], lw=.5, c='#a1dab4', label="NV")
ax.legend(loc='upper left', borderpad=0, labelspacing=0)
ax.set_xlabel("dayofyear")
ax.set_ylabel("temperature")
ax.set_title("(a) State/day vs temp")
savefig(f"dayofyear_vs_temp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'dayofyear', 'temperature', ax=ax,
show_x_counts=False,
yrange=(-10, 10),
pdp_marker_size=2, slope_line_alpha=.5, n_trials=1)
ax.set_title("(b) StratPD")
savefig(f"dayofyear_vs_temp_stratpd")
plt.close()
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_catstratpd(X, y, 'state', 'temperature', catnames=catnames,
show_x_counts=False,
# min_samples_leaf=30,
min_y_shifted_to_zero=True,
# alpha=.3,
ax=ax,
yrange=(-1, 55))
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(d) CatStratPD")
savefig(f"state_vs_temp_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'dayofyear', 'temperature')
plot_ice(ice, 'dayofyear', 'temperature', ax=ax)
ax.set_title("(c) FPD/ICE")
savefig(f"dayofyear_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_catice(rf, X, 'state', 'temperature')
plot_catice(ice, 'state', 'temperature', catnames=catnames, ax=ax,
pdp_marker_size=15,
min_y_shifted_to_zero = True,
yrange=(-2, 50)
)
ax.set_yticks([0,10,20,30,40,50])
ax.set_title("(b) FPD/ICE")
savefig(f"state_vs_temp_pdp")
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.scatter(X['state'], y, alpha=.05, s=15)
ax.set_xticks(range(1,len(catnames)+1))
ax.set_xticklabels(catnames.values())
ax.set_xlabel("state")
ax.set_ylabel("temperature")
ax.set_title("(a) Marginal")
savefig(f"state_vs_temp")
plt.close()
def meta_weather():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
# np.random.seed(66)
nyears = 5
years = []
for y in range(1980, 1980 + nyears):
df_ = toy_weather_data()
df_['year'] = y
years.append(df_)
df_raw = pd.concat(years, axis=0)
# df_raw.drop('year', axis=1, inplace=True)
df = df_raw.copy()
print(df.head(5))
names = {'CO': 5, 'CA': 10, 'AZ': 15, 'WA': 20}
df['state'] = df['state'].map(names)
catnames = {v:k for k,v in names.items()}
X = df.drop('temperature', axis=1)
y = df['temperature']
plot_catstratpd_gridsearch(X, y, 'state', 'temp',
min_samples_leaf_values=[2, 5, 20, 40, 60],
catnames=catnames,
yrange=(-5,60),
cellwidth=2
)
savefig(f"state_temp_meta")
plot_stratpd_gridsearch(X, y, 'dayofyear', 'temp',
show_slope_lines=True,
min_samples_leaf_values=[2,5,10,20,30],
yrange=(-10,10),
slope_line_alpha=.15)
savefig(f"dayofyear_temp_meta")
def weight():
np.random.seed(1) # pick seed for reproducible article images
print(f"----------- {inspect.stack()[0][3]} -----------")
X, y, df_raw, eqn = toy_weight_data(2000)
TUNE_RF = False
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'education', 'weight', ax=ax,
show_x_counts=False,
pdp_marker_size=5,
yrange=(-12, 0.05), slope_line_alpha=.1, show_ylabel=True)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
savefig(f"education_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=figsize)
plot_stratpd(X, y, 'height', 'weight', ax=ax,
pdp_marker_size=.2,
show_x_counts=False,
yrange=(0, 160), show_ylabel=False)
# ax.get_yaxis().set_visible(False)
ax.set_title("StratPD", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
plot_catstratpd(X, y, 'sex', 'weight', ax=ax,
show_x_counts=False,
catnames={0:'M',1:'F'},
yrange=(-1, 35),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"sex_vs_weight_stratpd")
fig, ax = plt.subplots(1, 1, figsize=(1.5,1.8))
plot_catstratpd(X, y, 'pregnant', 'weight', ax=ax,
show_x_counts=False,
catnames={0:False, 1:True},
yrange=(-1, 45),
)
ax.set_title("CatStratPD", fontsize=10)
savefig(f"pregnant_vs_weight_stratpd")
if TUNE_RF:
rf, bestparams = tune_RF(X, y)
# RF best: {'max_features': 0.9, 'min_samples_leaf': 1, 'n_estimators': 200}
# validation R^2 0.9996343699640691
else:
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1, max_features=0.9, oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
# show pregnant female at max range drops going taller
X_test = np.array([[1, 1, 70, 10]])
y_pred = rf.predict(X_test)
print("pregnant female at max range", X_test, "predicts", y_pred)
X_test = np.array([[1, 1, 72, 10]]) # make them taller
y_pred = rf.predict(X_test)
print("pregnant female in male height range", X_test, "predicts", y_pred)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ice = predict_ice(rf, X, 'education', 'weight')
plot_ice(ice, 'education', 'weight', ax=ax, yrange=(-12, 0), min_y_shifted_to_zero=True)
ax.set_xlim(10,18)
ax.set_xticks([10,12,14,16,18])
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"education_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(2.4, 2.2))
ice = predict_ice(rf, X, 'height', 'weight')
plot_ice(ice, 'height', 'weight', ax=ax, pdp_linewidth=2, yrange=(100, 250),
min_y_shifted_to_zero=False)
ax.set_xlabel("height\n(a)", fontsize=10)
ax.set_ylabel("weight", fontsize=10)
ax.set_title("FPD/ICE", fontsize=10)
ax.set_xticks([60,65,70,75])
savefig(f"height_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,2))
ice = predict_catice(rf, X, 'sex', 'weight')
plot_catice(ice, 'sex', 'weight', catnames={0:'M',1:'F'}, ax=ax, yrange=(0, 35),
pdp_marker_size=15)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"sex_vs_weight_pdp")
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
ice = predict_catice(rf, X, 'pregnant', 'weight', cats=df_raw['pregnant'].unique())
plot_catice(ice, 'pregnant', 'weight', catnames={0:'M',1:'F'}, ax=ax,
min_y_shifted_to_zero=True,
yrange=(-5, 45), pdp_marker_size=20)
ax.set_title("FPD/ICE", fontsize=10)
savefig(f"pregnant_vs_weight_pdp")
def shap_pregnant():
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 300
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100),
feature_perturbation='interventional')
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
GREY = '#444443'
fig, ax = plt.subplots(1, 1, figsize=(1.3,1.8))
preg_shap_values = shap_values[:, 1]
avg_not_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==0)])
avg_preg_weight = np.mean(preg_shap_values[np.where(shap_sample['pregnant']==1)])
ax.bar([0, 1], [avg_not_preg_weight-avg_not_preg_weight, avg_preg_weight-avg_not_preg_weight],
color='#1E88E5')
ax.set_title("SHAP", fontsize=10)
ax.set_xlabel("pregnant")
ax.set_xticks([0,1])
ax.set_xticklabels(['False','True'])
ax.set_ylabel("weight")
ax.set_ylim(-1,45)
ax.set_yticks([0,10,20,30,40])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
savefig('pregnant_vs_weight_shap')
def shap_weight(feature_perturbation, twin=False):
np.random.seed(1) # pick seed for reproducible article images
n = 2000
shap_test_size = 2000
X, y, df_raw, eqn = toy_weight_data(n=n)
df = df_raw.copy()
df_string_to_cat(df)
df_cat_to_catcode(df)
df['pregnant'] = df['pregnant'].astype(int)
X = df.drop('weight', axis=1)
y = df['weight']
# parameters from tune_RF() called in weight()
rf = RandomForestRegressor(n_estimators=200, min_samples_leaf=1,
max_features=0.9,
oob_score=True)
rf.fit(X, y) # Use full data set for plotting
print("RF OOB R^2", rf.oob_score_)
if feature_perturbation=='interventional':
explainer = shap.TreeExplainer(rf, data=shap.sample(X, 100), feature_perturbation='interventional')
xlabel = "height\n(c)"
ylabel = None
yticks = []
figsize = (2.2, 2.2)
else:
explainer = shap.TreeExplainer(rf, feature_perturbation='tree_path_dependent')
xlabel = "height\n(b)"
ylabel = "SHAP height"
yticks = [-75, -60, -40, -20, 0, 20, 40, 60, 75]
figsize = (2.6, 2.2)
shap_sample = X.sample(shap_test_size, replace=False)
shap_values = explainer.shap_values(shap_sample, check_additivity=False)
df_shap =
|
pd.DataFrame()
|
pandas.DataFrame
|
import MetaTrader5 as mt5
from datetime import datetime, timedelta
import pandas as pd
import time
import schedule
import pytz
import talib as ta
class getData():
def connect(self, account_id: int):
"""connect to the specific MT5 account
Args:
account_id (int):the MT5 account you want to connect to
"""
mt5.initialize()
authorized=mt5.login(account_id)
if authorized:
print("Connected: Connecting to MT5 Client")
else:
print("Failed to connect at account #{}, error code: {}".format(account_id, mt5.last_error()))
def open_position(self, pair: str, order_type: str, size: float, tp_distance: int = None, stop_distance: int = None, comment: str = ""):
"""Open a position on the connected current metatrader account
Args:
pair (str): The pair you want to exchange (EURUSD for instance)
order_type (str): the type of order you want to place (BUY for instance)
size (float): volume of the order
tp_distance (int, optional): Number of pip before taking profit. Defaults to None.
stop_distance (int, optional): Number of pip before stop loss. Defaults to None.
comment (str, optional): A comment you want to put on your order. Defaults to empty
"""
symbol_info = mt5.symbol_info(pair)
if symbol_info is None:
print(pair, "not found")
return
if not symbol_info.visible:
print(pair, "is not visible, trying to switch on")
if not mt5.symbol_select(pair, True):
print("symbol_select({}}) failed, exit",pair)
return
print(pair, "found!")
point = symbol_info.point
if(order_type == "BUY"):
order = mt5.ORDER_TYPE_BUY
price = mt5.symbol_info_tick(pair).ask
if(stop_distance):
sl = price - (stop_distance * point)
if(tp_distance):
tp = price + (tp_distance * point)
if(order_type == "SELL"):
order = mt5.ORDER_TYPE_SELL
price = mt5.symbol_info_tick(pair).bid
if(stop_distance):
sl = price + (stop_distance * point)
if(tp_distance):
tp = price - (tp_distance * point)
request = {
"action": mt5.TRADE_ACTION_DEAL,
"symbol": pair,
"volume": size,
"type": order,
"price": price,
"sl": sl,
"tp": tp,
"magic": 234000,
"comment": comment,
"type_time": mt5.ORDER_TIME_GTC,
"type_filling": mt5.ORDER_FILLING_IOC,
}
result = mt5.order_send(request)
print(result)
if result.retcode != mt5.TRADE_RETCODE_DONE:
print("Failed to send order :(")
else:
print ("Order successfully placed!")
def positions_get(self, symbol: str = None):
"""Return all your open positions corresponding to the pair you gave (all open positions by default)
Args:
symbol (str, optional): the specific pair you want to have positions from (leave empty if you want all your positions ).
Returns:
pd.DataFrame: A list of matching pairs
"""
if symbol is None:
positions = mt5.positions_get()
else:
positions = mt5.positions_get(symbol=symbol)
print(positions)
if(positions is not None and positions != ()):
df = pd.DataFrame(list(positions),columns=positions[0]._asdict().keys())
df['time'] = pd.to_datetime(df['time'], unit='s')
return df
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# from collections import namedtuple
import warnings
import numpy as np
import pandas as pd
import mikeio
# class PfsSection(dict):
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.__dict__ = self
# def __getattr__(self, key):
# return self.get(key.upper())
class Pfs:
def __init__(self, pfs_file=None) -> None:
self.d = None
self._sections = None
self._model_errors = None
self._measurements = None
self._diagnostics = None
if pfs_file:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
pfs = mikeio.Pfs(pfs_file)
self.data = pfs.data # NestedNamespace
self.d = pfs._data # dictionary
@property
def dda(self):
"""Dictionary of settings in DATA_ASSIMILATION_MODULE"""
return self.d["DATA_ASSIMILATION_MODULE"]
@property
def sections(self):
"""List of the DA Pfs sections"""
if self._sections is None:
self._sections = self._get_DA_sections()
return self._sections
@property
def model_errors(self):
"""DataFrame with model errors"""
if self._model_errors is None:
self._model_errors = self._get_model_errors_df()
return self._model_errors
@property
def measurements(self):
"""DataFrame with measurements"""
if self._measurements is None:
self._measurements = self._get_measurements_df()
return self._measurements
@property
def measurement_positions(self):
"""DataFrame with measurement positions"""
df = self.measurements.copy()
df[["x", "y"]] = df.position.to_list()
return df[["name", "x", "y"]]
@classmethod
def validate_positions(cls, mesh, df):
"""Determine if positions are inside mesh and find nearest cell centers"""
# TODO: handle empty positions
assert isinstance(mesh, (mikeio.Mesh, mikeio.Dfsu))
if ("x" in df) and ("y" in df):
xy = df[["x", "y"]].to_numpy()
elif "position" in df:
n = len(df)
xy = np.concatenate(df.position.to_numpy()).reshape(n, 2)
else:
raise ValueError(
"Could not find 'x', 'y' or 'position' columns in DataFrame"
)
inside = mesh.contains(xy)
elemid, dist = mesh.find_nearest_elements(xy, return_distances=True)
new_positions = mesh.element_coordinates[elemid, :2]
df2 =
|
pd.DataFrame(index=df.index)
|
pandas.DataFrame
|
"""
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import pandas as pd
from pandas.core.base import PandasObject
from pandas import compat
from pandas.compat import range
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCSparseSeries)
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from pandas.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
astype_nansafe, find_common_type)
from pandas.core.dtypes.missing import isnull, notnull, na_value_for_dtype
import pandas._libs.sparse as splib
from pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex
from pandas._libs import index as libindex
import pandas.core.algorithms as algos
import pandas.core.ops as ops
import pandas.io.formats.printing as printing
from pandas.util._decorators import Appender
from pandas.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _get_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, series=False):
if series and is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if name in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
elif name in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
dtype = find_common_type([left.dtype, right.dtype])
left = left.astype(dtype)
right = right.astype(dtype)
else:
dtype = left.dtype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name, dtype=dtype)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(left_sp_values, left.sp_index,
left.fill_value, right_sp_values,
right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
""" wrap op result to have correct dtype """
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype)
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point 1-D data
Parameters
----------
data : {array-like (1-D), Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(cls, data, sparse_index=None, index=None, kind='integer',
fill_value=None, dtype=None, copy=False):
if index is not None:
if data is None:
data = np.nan
if not
|
is_scalar(data)
|
pandas.core.dtypes.common.is_scalar
|
import pytest
import pandas as pd
from pandas.testing import assert_frame_equal
from pathlib import Path
from data_check.sql import DataCheckSql, LoadMode # noqa E402
from data_check.config import DataCheckConfig # noqa E402
@pytest.fixture(scope="module", params=["csv", "xlsx"])
def file_type(request):
return request.param
@pytest.fixture
def sql() -> DataCheckSql:
dc_config = DataCheckConfig().load_config().set_connection("test")
_sql = DataCheckSql(dc_config.connection)
return _sql
def test_load_from_dataframe_append(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.get_connection().execute("create table test (id number(10), data varchar2(10))")
sql.table_loader.load_table("test", data, LoadMode.APPEND)
df = sql.run_query("select id, data from test")
assert_frame_equal(data, df)
def test_load_from_dataframe_append_creates_table_if_no_table_exists(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.table_loader.load_table("test", data, LoadMode.APPEND)
df = sql.run_query("select id, data from test")
assert_frame_equal(data, df)
def test_load_from_dataframe_append_adds_data(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
data2 = pd.DataFrame.from_dict({"id": [3, 4, 5], "data": ["d", "e", "f"]})
full_data = pd.DataFrame.from_dict(
{"id": [0, 1, 2, 3, 4, 5], "data": ["a", "b", "c", "d", "e", "f"]}
)
sql.get_connection().execute("create table test (id number(10), data varchar2(10))")
sql.table_loader.load_table("test", data, LoadMode.APPEND)
sql.table_loader.load_table("test", data2, LoadMode.APPEND)
df = sql.run_query("select id, data from test")
assert_frame_equal(full_data, df)
def test_load_from_dataframe_truncate(sql: DataCheckSql):
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.get_connection().execute("create table test (id number(10), data varchar2(10))")
sql.table_loader.load_table("test", data, LoadMode.TRUNCATE)
df = sql.run_query("select id, data from test")
assert_frame_equal(data, df)
def test_load_from_dataframe_truncate_deletes_data(sql: DataCheckSql):
sql.get_connection().execute("create table test (id number(10), data varchar2(10))")
data = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
sql.table_loader.load_table("test", data, LoadMode.TRUNCATE)
data2 = pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "d"]})
sql.table_loader.load_table("test", data2, LoadMode.TRUNCATE)
df = sql.run_query("select id, data from test")
assert_frame_equal(data2, df)
def test_load_from_dataframe_truncate_creates_table_if_no_table_exists(
sql: DataCheckSql,
):
data =
|
pd.DataFrame.from_dict({"id": [0, 1, 2], "data": ["a", "b", "c"]})
|
pandas.DataFrame.from_dict
|
from unittest import TestCase
from unittest.mock import ANY, Mock, call, patch
import pandas as pd
from mlblocks import MLPipeline
from orion import benchmark
from orion.evaluation import CONTEXTUAL_METRICS as METRICS
from orion.evaluation import contextual_confusion_matrix
def test__sort_leaderboard_rank():
rank = 'f1'
metrics = METRICS
score = pd.DataFrame({
'pipeline': range(5),
'f1': range(5),
})
expected_return = pd.DataFrame({
'pipeline': range(5)[::-1],
'rank': range(1, 6),
'f1': range(5)[::-1],
})
returned = benchmark._sort_leaderboard(score, rank, metrics)
pd.testing.assert_frame_equal(returned, expected_return)
def test__sort_leaderboard_rank_does_not_exist():
rank = 'does not exist'
metrics = {'f1': METRICS['f1']}
score = pd.DataFrame({
'pipeline': range(5),
'f1': range(5),
})
expected_return = pd.DataFrame({
'pipeline': range(5)[::-1],
'rank': range(1, 6),
'f1': range(5)[::-1],
})
returned = benchmark._sort_leaderboard(score, rank, metrics)
pd.testing.assert_frame_equal(returned, expected_return)
def test__sort_leaderboard_no_rank():
rank = None
metrics = METRICS
score = {k: range(5) for k in metrics.keys()}
score['pipeline'] = range(5)
score = pd.DataFrame(score)
expected_return = score.iloc[::-1].reset_index(drop=True)
expected_return['rank'] = range(1, 6)
returned = benchmark._sort_leaderboard(score, rank, metrics)
assert len(returned.columns) == len(expected_return.columns)
assert sorted(returned.columns) == sorted(expected_return.columns)
pd.testing.assert_frame_equal(returned, expected_return[returned.columns])
def test__detrend_signal_trend():
df = pd.DataFrame({
'timestamp': range(5),
'value': range(5)
})
expected_return = pd.DataFrame({
'timestamp': range(5),
'value': [0.0] * 5,
})
returned = benchmark._detrend_signal(df, 'value')
pd.testing.assert_frame_equal(returned, expected_return)
def test__detrend_signal_no_trend():
df = pd.DataFrame({
'timestamp': range(5),
'value': [0.0] * 5
})
expected_return = df.copy()
returned = benchmark._detrend_signal(df, 'value')
pd.testing.assert_frame_equal(returned, expected_return)
def test__get_parameter_pipeline():
hyperparameters = {
"pipeline1": "pipeline1.json",
"pipeline2": "pipeline2.json",
}
pipeline = "pipeline1"
expected_return = "pipeline1.json"
returned = benchmark._get_parameter(hyperparameters, pipeline)
assert returned == expected_return
def test__get_parameter_dataset():
hyperparameters = {
"dataset1": {
"pipeline1": "pipeline1.json",
"pipeline2": "pipeline2.json",
}
}
dataset = "dataset1"
expected_return = {
"pipeline1": "pipeline1.json",
"pipeline2": "pipeline2.json",
}
returned = benchmark._get_parameter(hyperparameters, dataset)
assert returned == expected_return
def test__get_parameter_does_not_exist():
hyperparameters = None
pipeline = "pipeline1"
expected_return = None
returned = benchmark._get_parameter(hyperparameters, pipeline)
assert returned == expected_return
@patch('orion.benchmark.load_signal')
def test__load_signal_test_split_true(load_signal_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.return_value = (train, test)
test_split = True
returned = benchmark._load_signal('signal-name', test_split)
assert isinstance(returned, tuple)
assert len(returned) == 2
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
@patch('orion.benchmark.load_signal')
def test__load_signal_test_split_false(load_signal_mock):
df = pd.DataFrame({
'timestamp': list(range(10)),
'value': list(range(10, 20))
})
load_signal_mock.return_value = df
test_split = False
returned = benchmark._load_signal('signal-name', test_split)
assert isinstance(returned, tuple)
assert len(returned) == 2
train, test = returned
pd.testing.assert_frame_equal(train, test)
expected_calls = [
call('signal-name'),
]
assert load_signal_mock.call_args_list == expected_calls
@patch('orion.benchmark.load_signal')
def test__load_signal_test_split_float(load_signal_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.return_value = (train, test)
test_split = 0.2
returned = benchmark._load_signal('signal-name', test_split)
assert isinstance(returned, tuple)
assert len(returned) == 2
expected_calls = [
call('signal-name', test_size=test_split),
]
assert load_signal_mock.call_args_list == expected_calls
class TestBenchmark(TestCase):
@classmethod
def setup_class(cls):
cls.pipeline = Mock(autospec=MLPipeline)
cls.name = 'pipeline-name'
cls.dataset = 'dataset-name'
cls.signal = 'signal-name'
cls.hyper = None
cls.distributed = False
cls.rank = 'metric-name'
cls.metrics = {
'metric-name': Mock(autospec=METRICS['f1'], return_value=1)
}
def set_score(self, metric, elapsed, test_split):
return {
'metric-name': metric,
'elapsed': elapsed,
'pipeline': self.name,
'split': test_split,
'dataset': self.dataset,
'signal': self.signal,
'status': 'OK'
}
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
anomalies = Mock(autospec=pd.DataFrame)
analyze_mock.return_value = anomalies
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics, True)
expected_return = self.set_score(1, ANY, ANY)
assert returned == expected_return
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
load_anomalies_mock.assert_called_once_with(self.signal)
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal_exception(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
analyze_mock.side_effect = Exception("failed analyze.")
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics, True)
expected_return = self.set_score(0, ANY, ANY)
expected_return['status'] = 'ERROR'
assert returned == expected_return
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
assert load_anomalies_mock.called
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal_exception_confusion_matrix(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
anomalies = pd.DataFrame({
'start': [10, 35],
'end': [20, 40]
})
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
load_anomalies_mock.return_value = anomalies
analyze_mock.side_effect = Exception("failed analyze.")
metrics = {'confusion_matrix': Mock(autospec=contextual_confusion_matrix)}
metrics = {**metrics, **self.metrics}
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, metrics, True)
expected_return = self.set_score(0, ANY, ANY)
expected_return['status'] = 'ERROR'
expected_return['confusion_matrix'] = (None, 0, 2, 0)
assert returned == expected_return
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal_test_split(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
test_split = True
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split=test_split)
expected_return = self.set_score(1, ANY, test_split)
assert returned == expected_return
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
load_anomalies_mock.assert_called_once_with(self.signal)
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal_no_test_split(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
test_split = False
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split=test_split)
expected_return = self.set_score(1, ANY, test_split)
assert returned == expected_return
expected_calls = [
call('signal-name')
]
assert load_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
load_anomalies_mock.assert_called_once_with(self.signal)
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
def test__evaluate_signal_no_detrend(
self, load_signal_mock, load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
detrend = False
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split=True, detrend=detrend)
expected_return = self.set_score(1, ANY, ANY)
assert returned == expected_return
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
load_anomalies_mock.assert_called_once_with(self.signal)
@patch('orion.benchmark.load_anomalies')
@patch('orion.benchmark.analyze')
@patch('orion.benchmark._load_pipeline')
@patch('orion.benchmark.load_signal')
@patch('orion.benchmark._detrend_signal')
def test__evaluate_signal_detrend(self, detrend_signal_mock, load_signal_mock,
load_pipeline_mock, analyze_mock, load_anomalies_mock):
train = Mock(autospec=pd.DataFrame)
test = Mock(autospec=pd.DataFrame)
detrend_signal_mock.side_effect = [train, test]
load_signal_mock.side_effect = [train, test]
load_pipeline_mock.return_value = self.pipeline
detrend = True
returned = benchmark._evaluate_signal(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split=True, detrend=detrend)
expected_return = self.set_score(1, ANY, ANY)
assert returned == expected_return
expected_calls = [
call('signal-name-train'),
call('signal-name-test')
]
assert load_signal_mock.call_args_list == expected_calls
expected_calls = [
call(train, 'value'),
call(test, 'value')
]
assert detrend_signal_mock.call_args_list == expected_calls
load_pipeline_mock.assert_called_once_with(self.pipeline, self.hyper)
analyze_mock.assert_called_once_with(self.pipeline, train, test)
load_anomalies_mock.assert_called_once_with(self.signal)
@patch('orion.benchmark._evaluate_signal')
def test__evaluate_pipeline(self, evaluate_signal_mock):
test_split = (True, False)
detrend = False
signals = [self.signal]
score = self.set_score(1, ANY, ANY)
evaluate_signal_mock.return_value = score
benchmark._evaluate_pipeline(
self.pipeline, self.name, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
expected_calls = [
call(self.pipeline, self.name, self.dataset, self.signal,
self.hyper, self.metrics, True, detrend),
call(self.pipeline, self.name, self.dataset, self.signal,
self.hyper, self.metrics, False, detrend)
]
assert evaluate_signal_mock.call_args_list == expected_calls
@patch('orion.benchmark._evaluate_signal')
def test__evaluate_pipeline_test_split_none(self, evaluate_signal_mock):
test_split = None
detrend = False
signals = [self.signal]
score = self.set_score(1, ANY, ANY)
evaluate_signal_mock.return_value = score
returned = benchmark._evaluate_pipeline(
self.pipeline, self.name, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
expected_return = [
self.set_score(1, ANY, True),
self.set_score(1, ANY, False)
]
assert returned == expected_return
expected_calls = [
call(self.pipeline, self.name, self.dataset, self.signal,
self.hyper, self.metrics, True, detrend),
call(self.pipeline, self.name, self.dataset, self.signal,
self.hyper, self.metrics, False, detrend)
]
assert evaluate_signal_mock.call_args_list == expected_calls
@patch('orion.benchmark._evaluate_signal')
def test__evaluate_pipeline_test_split(self, evaluate_signal_mock):
test_split = True
detrend = False
signals = [self.signal]
score = self.set_score(1, ANY, test_split)
evaluate_signal_mock.return_value = score
expected_return = [score]
returned = benchmark._evaluate_pipeline(
self.pipeline, self.name, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
assert returned == expected_return
evaluate_signal_mock.assert_called_once_with(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split, detrend)
@patch('orion.benchmark._evaluate_signal')
def test__evaluate_pipeline_no_test_split(self, evaluate_signal_mock):
test_split = False
detrend = False
signals = [self.signal]
score = self.set_score(1, ANY, test_split)
evaluate_signal_mock.return_value = score
expected_return = [score]
returned = benchmark._evaluate_pipeline(
self.pipeline, self.name, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
assert returned == expected_return
evaluate_signal_mock.assert_called_once_with(
self.pipeline, self.name, self.dataset, self.signal, self.hyper, self.metrics,
test_split, detrend)
@patch('orion.benchmark._evaluate_pipeline')
def test__evaluate_pipelines(self, evaluate_pipeline_mock):
test_split = False
detrend = False
signals = [self.signal]
pipelines = {self.name: self.pipeline}
score = self.set_score(1, ANY, test_split)
evaluate_pipeline_mock.return_value = [score]
expected_return = [score]
returned = benchmark._evaluate_pipelines(pipelines, self.dataset, signals, self.hyper,
self.metrics, self.distributed, test_split,
detrend)
assert returned == expected_return
evaluate_pipeline_mock.assert_called_once_with(
self.pipeline, self.name, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
@patch('orion.benchmark._evaluate_pipeline')
def test__evaluate_pipelines_hyperparameter(self, evaluate_pipeline_mock):
test_split = False
detrend = False
signals = [self.signal]
pipelines = {self.name: self.pipeline}
hyperparameter = Mock(autospec=dict)
hyperparameters = {self.name: hyperparameter}
score = self.set_score(1, ANY, test_split)
evaluate_pipeline_mock.return_value = [score]
expected_return = [score]
returned = benchmark._evaluate_pipelines(pipelines, self.dataset, signals, hyperparameters,
self.metrics, self.distributed, test_split,
detrend)
assert returned == expected_return
evaluate_pipeline_mock.assert_called_once_with(
self.pipeline, self.name, self.dataset, signals, hyperparameter, self.metrics,
self.distributed, test_split, detrend)
@patch('orion.benchmark._evaluate_pipelines')
def test__evaluate_datasets(self, evaluate_pipelines_mock):
test_split = False
detrend = False
signals = [self.signal]
datasets = {self.dataset: signals}
pipelines = {self.name, self.pipeline}
score = self.set_score(1, ANY, test_split)
evaluate_pipelines_mock.return_value = [score]
order = ['dataset', 'elapsed', 'metric-name', 'pipeline', 'signal', 'split', 'status']
expected_return = pd.DataFrame.from_records([{
'metric-name': 1,
'elapsed': ANY,
'split': test_split,
'pipeline': self.name,
'dataset': self.dataset,
'signal': self.signal,
'status': 'OK'
}])[order]
returned = benchmark._evaluate_datasets(
pipelines, datasets, self.hyper, self.metrics, self.distributed, test_split, detrend)
pd.testing.assert_frame_equal(returned, expected_return)
evaluate_pipelines_mock.assert_called_once_with(
pipelines, self.dataset, signals, self.hyper, self.metrics,
self.distributed, test_split, detrend)
@patch('orion.benchmark._evaluate_datasets')
def test_benchmark(self, evaluate_datasets_mock):
signals = [self.signal]
datasets = {self.dataset: signals}
pipelines = {self.name, self.pipeline}
score = self.set_score(1, ANY, ANY)
evaluate_datasets_mock.return_value = pd.DataFrame.from_records([score])
order = [
'pipeline',
'rank',
'dataset',
'elapsed',
'metric-name',
'signal',
'split',
'status']
expected_return = pd.DataFrame.from_records([{
'rank': 1,
'metric-name': 1,
'elapsed': ANY,
'split': ANY,
'pipeline': self.name,
'dataset': self.dataset,
'signal': self.signal,
'status': 'OK'
}])[order]
returned = benchmark.benchmark(
pipelines, datasets, self.hyper, self.metrics, self.rank, self.distributed)
pd.testing.assert_frame_equal(returned, expected_return)
evaluate_datasets_mock.assert_called_once_with(
pipelines, datasets, self.hyper, self.metrics, self.distributed, False, False)
@patch('orion.benchmark._evaluate_datasets')
def test_benchmark_metrics_list(self, evaluate_datasets_mock):
test_split = False
detrend = False
signals = [self.signal]
datasets = {self.dataset: signals}
pipelines = {self.name: self.pipeline}
metric = Mock(autospec=METRICS['f1'], return_value=1)
metric.__name__ = 'metric-name'
metrics = [metric]
metrics_ = {metric.__name__: metric}
score = self.set_score(1, ANY, test_split)
score[metric.__name__] = metric
evaluate_datasets_mock.return_value = pd.DataFrame.from_records([score])
order = [
'pipeline',
'rank',
'dataset',
'elapsed',
'metric-name',
'signal',
'split',
'status']
expected_return = pd.DataFrame.from_records([{
'rank': 1,
'metric-name': metric,
'elapsed': ANY,
'split': test_split,
'pipeline': self.name,
'dataset': self.dataset,
'signal': self.signal,
'status': 'OK'
}])[order]
returned = benchmark.benchmark(pipelines, datasets, self.hyper, metrics, self.rank,
self.distributed, test_split, detrend)
pd.testing.assert_frame_equal(returned, expected_return)
evaluate_datasets_mock.assert_called_once_with(
pipelines, datasets, self.hyper, metrics_, self.distributed, test_split, detrend)
@patch('orion.benchmark._evaluate_datasets')
def test_benchmark_metrics_exception(self, evaluate_datasets_mock):
test_split = False
detrend = False
signals = [self.signal]
datasets = {self.dataset: signals}
pipelines = {self.name: self.pipeline}
metric = 'does-not-exist'
metrics = [metric]
score = self.set_score(1, ANY, test_split)
evaluate_datasets_mock.return_value = pd.DataFrame.from_records([score])
with self.assertRaises(ValueError) as ex:
benchmark.benchmark(pipelines, datasets, self.hyper, metrics, self.rank,
self.distributed, test_split, detrend)
self.assertTrue(metric in ex.exception)
@patch('orion.benchmark._evaluate_datasets')
def test_benchmark_pipelines_list(self, evaluate_datasets_mock):
test_split = False
detrend = False
signals = [self.signal]
datasets = {self.dataset: signals}
pipelines = [self.pipeline]
pipelines_ = {self.pipeline: self.pipeline}
score = self.set_score(1, ANY, test_split)
score['pipeline'] = self.pipeline
evaluate_datasets_mock.return_value =
|
pd.DataFrame.from_records([score])
|
pandas.DataFrame.from_records
|
import numpy as np
import pandas as pd
import io
import re
import warnings
from scipy.stats import skew, skewtest
from scipy.stats import rankdata
from .plot_1var import *
# from plot_1var import * # for local testing only
from IPython.display import HTML
def print_list(l, br=', '):
o = ''
for e in l:
o += str(e) + br
return o[:-len(br)]
def summary(s, max_lev=10, br_way=', ', sum_num_like_cat_if_nunique_small=5):
'''
a function that takes a series and returns a summary string
'''
if s.nunique(dropna=False) == 1:
return(f'all the same: {s.unique()[0]}')
elif s.notnull().sum() == 0:
return(f'all are NaNs')
if s.dtype.name in ['object', 'bool', 'category'] or \
(('float' in s.dtype.name or 'int' in s.dtype.name) \
and s.nunique() <= sum_num_like_cat_if_nunique_small):
if len(s.unique()) <= max_lev:
# consider drop na?
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
for name, v in zip(vc.index, vc.values):
s += f'{name} {v*100:>2.0f}%' + br_way
return s[:-len(br_way)]
else:
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
i = 0
cur_sum_perc = 0
for name, v in zip(vc.index, vc.values):
if i == max_lev or \
(i >= 5 and cur_sum_perc >= 0.8) or \
(i == 0 and cur_sum_perc < 0.05):
# break if the it has describe 80% of the data, or the
break
s += f'{name} {v*100:>2.0f}%' + br_way
i += 1
cur_sum_perc += v
s += f'other {(1-cur_sum_perc)*100:>2.0f}%'
# return s[:-len(br_way)]
return s
elif 'float' in s.dtype.name or 'int' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values.tolist()
cv = round(s.std()/s.mean(), 2) if s.mean() != 0 else 'nan'
sk = round(skew(s[s.notnull()]), 2) if len(s[s.notnull()]) > 0 else 'nan'
o = f'{qs}{br_way}\
mean: {s.mean():.2f} std: {s.std():.2f}{br_way}\
cv: {cv} skew: {sk}'
if sum(s.notnull()) > 8: # requirement of skewtest
p = skewtest(s[s.notnull()]).pvalue
o += f'*' if p <= 0.05 else ''
if min(s[s!=0]) > 0 and len(s[s!=0]) > 8: # take log
o += f'{br_way}log skew: {skew(np.log(s[s>0])):.2f}'
p = skewtest(np.log(s[s!=0])).pvalue
o += f'*' if p != p and p <= 0.05 else ''
return o
elif 'datetime' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values
dt_range = (qs[-1]-qs[0]).astype('timedelta64[D]')
if dt_range > np.timedelta64(1, 'D'):
to_print = [np.datetime_as_string(q, unit='D') for q in qs]
else:
to_print = [np.datetime_as_string(q, unit='s') for q in qs]
return print_list(to_print, br=br_way)
else:
return ''
def possible_dup_lev(series, threshold=0.9, truncate=False):
try:
from fuzzywuzzy import fuzz
except ImportError:
sys.exit("""Please install fuzzywuzzy first
install it using: pip install fuzzywuzzy
if installing the dependency python-levenshtein is failed and you are using Anaconda, try
conda install -c conda-forge python-levenshtein""")
if series.dtype.name not in ['category', 'object']:
return ''
if series.nunique() > 100 and series.dtype.name == 'object' and truncate: # maybe should adjust
# warnings.warn('Checking duplicates on a long list will take a long time', RuntimeWarning)
# simplified = series.str.lower().replace(r'\W', '')
# if simplified.nunique() < series.nunique():
# return f"too many levls, didn't check, but didn't pass a quick check"
# else:
# return ''
return ''
threshold *= 100
l = series.unique().tolist()
l = [y for y in l if type(y) == str] # remove nan, True, False
candidate = []
for i in range(len(l)):
for j in range(i+1, len(l)):
if l[i].isdigit() or l[j].isdigit():
continue
if any([fuzz.ratio(l[i], l[j]) > threshold,
fuzz.partial_ratio(l[i], l[j]) > threshold,
fuzz.token_sort_ratio(l[i], l[j]) > threshold,
fuzz.token_set_ratio(l[i], l[j]) > threshold]):
candidate.append((l[i], l[j]))
o = '; '.join(['('+', '.join(can)+')' for can in candidate])
if truncate and len(o) > 1000:
o = o[:1000] + f'...truncated, call TEF.possible_dup_lev({series.name}) for a full result'
return o
def dfmeta(df, description=None, max_lev=10, transpose=True, sample=True,
style=True, color_bg_by_type=True, highlight_nan=0.5, in_cell_next_line=True,
drop=None,
check_possible_error=True, dup_lev_prop=0.9,
fitted_feat_imp=None,
plot=True,
standard=False):
# validation
assert max_lev > 2, 'max_lev should > 2'
assert sample < df.shape[0], 'sample should < nrows'
if sample == True and df.shape[0] < 3:
sample = df.shape[0]
assert drop is None or 'NaNs' not in drop, 'Cannot drop NaNs for now'
assert drop is None or 'dtype' not in drop, 'Cannot drop dtype for now'
warnings.simplefilter('ignore', RuntimeWarning) # caused from skewtest, unknown
if standard: # overwrite thise args
check_possible_error = False
sample = False
# drop=['unique levs']
# the first line, shape, dtypes, memory
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
if style == False:
print(f'shape: {df.shape}')
print(s.split('\n')[-3])
print(s.split('\n')[-2])
color_bg_by_type, highlight_nan, in_cell_next_line = False, False, False
br_way = "<br/> " if in_cell_next_line else ", " # notice a space here
o = pd.DataFrame(columns=df.columns)
o.loc['idx'] = list(range(df.shape[1]))
o.loc['dtype'] = df.dtypes
if description is not None:
o.loc['description'] = ''
for col, des in description.items():
if col in df.columns.tolist():
o.loc['description', col] = des
o.loc['NaNs'] = df.apply(lambda x: f'{sum(x.isnull())}{br_way}{sum(x.isnull())/df.shape[0]*100:.0f}%')
o.loc['unique counts'] = df.apply(lambda x: f'{len(x.unique())}{br_way}{len(x.unique())/df.shape[0]*100:.0f}%')
# def unique_index(s):
# if len(s.unique()) <= max_lev:
# o = ''
# for i in s.value_counts(dropna=False).index.tolist():
# o += str(i) + br_way
# return o[:-len(br_way)]
# else:
# return ''
# o.loc['unique levs'] = df.apply(unique_index, result_type='expand')
o.loc['summary'] = df.apply(summary, result_type='expand', max_lev=max_lev, br_way=br_way) # need result_type='true' or it will all convert to object dtype
# maybe us args=(arg1, ) or sth?
if plot and style:
o.loc['summary plot'] = ['__TO_PLOT_TO_FILL__'] * df.shape[1]
if fitted_feat_imp is not None:
def print_fitted_feat_imp(fitted_feat_imp, indices):
fitted_feat_imp = fitted_feat_imp[fitted_feat_imp.notnull()]
o =
|
pd.Series(index=indices)
|
pandas.Series
|
import sys
from typing import List, Tuple
import numpy as np
import pandas as pd
def get_valid_gene_info(
genes: List[str],
release=102,
species='homo sapiens'
) -> Tuple[List[str], List[int], List[int], List[int]]:
"""Returns gene locations for all genes in ensembl release 93 --S Markson 3 June 2020
Parameters
----------
genes : A list of genes
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes: List[str] :
Returns
-------
"""
from pyensembl import EnsemblRelease
assembly = EnsemblRelease(release, species=species)
gene_names = []
gene_contigs = []
gene_starts = []
gene_ends = []
for gene in np.intersect1d(genes, [
gene.gene_name for gene in assembly.genes()
if gene.contig.isnumeric() or gene.contig == 'X'
]): # Toss genes not in hg38 release 93
gene_info = assembly.genes_by_name(gene)
gene_info = gene_info[0]
gene_names.append(gene)
gene_contigs.append(gene_info.contig)
gene_starts.append(gene_info.start)
gene_ends.append(gene_info.end)
return gene_names, gene_contigs, gene_starts, gene_ends
def seurat_to_loom(seuratrds, patient_id_column, celltype_column,
complexity_column, loomfile):
"""
Parameters
----------
seuratrds :
patient_id_column :
celltype_column :
complexity_column :
loomfile :
Returns
-------
"""
import rpy2.robjects as robjects
from scipy import sparse
from rpy2.robjects import pandas2ri
import loompy
robjects.r('''
library(Seurat)
seurat2rawandmeta <- function(seuratrds) {
seuratobj <- readRDS(seuratrds)
return(list(genes=rownames(seuratobj@data), metadata=<EMAIL>, data=as.data.frame(summary(seuratobj@data))))
}
''')
seurat_grab = robjects.r['seurat2rawandmeta'](seuratrds)
genes = pd.DataFrame(np.array(seurat_grab.rx2('genes')))
genes.columns = ['gene']
metadata = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('metadata'))
if patient_id_column != 'patient_ID':
metadata['patient_ID'] = metadata[patient_id_column]
metadata.drop(patient_id_column, inplace=True)
if celltype_column != 'cell_type':
metadata['cell_type'] = metadata[celltype_column]
metadata.drop(celltype_column, inplace=True)
if complexity_column != 'complexity':
metadata['complexity'] = metadata[complexity_column]
metadata.drop(complexity_column, inplace=True)
data_df = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('data'))
sparsedata = sparse.coo_matrix(
(data_df['x'], (data_df['i'] - 1, data_df['j'] - 1))).tocsc()
sparsedata.resize((genes.shape[0], metadata.shape[0]))
loompy.create(loomfile, sparsedata, genes.to_dict("list"),
metadata.to_dict("list"))
def intify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
for col in df.columns:
if col.endswith('_ad'):
raise Exception(
"Don't append you column names with _ad! -- Samuel")
df[col] = df[col].apply(
lambda x: int(binascii.hexlify(x.encode()), 16))
while np.sum(df.max() > sys.maxsize) > 0:
for col in df.columns:
if df[col].max() > sys.maxsize:
df[col + '_ad'] = df[col] // sys.maxsize
df[col] = df[col] % sys.maxsize
return df.astype(np.int64)
def deintify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
while np.sum([x.endswith('_ad') for x in df.columns]) > 0:
for col in df.columns:
if col.endswith('_ad') and col + '_ad' not in df.columns:
df[col[0:-3]] = df[col[0:-3]].astype(object)
df[col] = df[col].astype(object)
df[col[0:-3]] = df[col[0:-3]] + sys.maxsize * df[col]
df.drop(col, axis=1, inplace=True)
for col in df.columns:
try:
df[col] = df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode())
except:
print(df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode()))
raise Exception("whoops")
return df
def recover_meta(db, do_deint=False):
"""
Parameters
----------
db :
do_deint :
(Default value = False)
Returns
-------
"""
colmeta = None
for key in db.ca.keys():
if colmeta is None:
colmeta =
|
pd.DataFrame(db.ca[key])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
###############################################################################
#Template Based Checkers
###############################################################################
def check_and_assign_param_values(core_model, parameters):
'''
Checks if parameters are valid and returns a DataFrame.
Accepts parameters as:
1. dict of floats/np.float where keys are parameter names
2. dict of arrays where keys are parameter names
3. Series where where indices are parameter names
4. DataFrame where columns are parameter names
Parameter names may be suffixed with an underscore i.e. param1_1_1 will
be treated as param1_1
Raises an error if parameter names cannot be matched.
Returns a DataFrame of parameters with columns in order.
'''
if type(parameters) == pd.DataFrame:
parameter_df = parameters
elif type(parameters) == pd.Series:
parameter_df = pd.DataFrame(parameters).T
elif type(parameters) == dict:
parameter_df = pd.DataFrame(parameters)
try:
parameter_df = pd.DataFrame(parameters)
except:
parameter_df = pd.DataFrame([parameters])
else:
raise Exception('Error in parameters. Parameters must be dict, DataFrame or Series.')
parameter_df.reset_index(drop=True, inplace=True)
df_columns = list(parameter_df.columns)
core_model_params = core_model['parameters'] + core_model['inputs']
#Check that parameter names match
if set(df_columns).difference(core_model_params):#Not match
#Assume parameters have been suffixed
#Remove the suffix, reassign and rearrange
df_columns_ = ['_'.join(p.split('_')[:-1]) for p in df_columns]
if set(df_columns_).difference(core_model_params):
raise Exception('Could not match parameter names with that of core_model')
else:
parameter_df.columns = df_columns_
parameter_df = parameter_df[core_model_params]
return parameter_df
else:#Match
#Ensure order is correct
parameter_df = parameter_df[core_model_params]
return parameter_df
def check_and_assign_solver_args(solver_args):
'''
Accepts a dict of solver_args and checks if it is valid against a template.
Raises an exception if
1. Unexpected keys are found.
2. Value indexed by tcrit cannot be converted into a list
3. Value indexed by other keys cannot be converted in a float
Returns the template but with new values from solver_args.
'''
template = {'rtol' : 1.49012e-8,
'atol' : 1.49012e-8,
'tcrit' : [],
'h0' : 0.0,
'hmax' : 0.0,
'hmin' : 0.0,
'mxstep' : 0
}
if not solver_args:
return template
for key in solver_args:
if key not in template:
raise Exception('Error in solver_args. Unexpected key found: ' + str(key))
elif key == 'tcrit':
try:
template[key] = list(solver_args[key])
except:
raise Exception('Error in solver_args. value indexed by key ' + str(key) + ' must be list-like.')
else:
try:
template[key] = float(solver_args[key])
except:
raise Exception('Error in solver_args. value indexed by key ' + str(key) + ' must be a number.')
return template
def check_and_assign_init(states, init, init_orient='scenario'):
'''
Accepts init in the form of {scenario_num : array} if init_orient is "scenario".
Accepts init in the form of {state : array} if init_orient is "state".
Formats init in the form required by the models datastructure.
Accepts init in the form:
1. {scenario_num : array} if init_orient is scenario
2. {state : array} if init_orient is "state"
3. A DataFrame where the columns are the states if init_orient is "state"
4. A Series where the indices are the states if init_orient is "state"
5. A numpy array where each column corresponds the states in order
Raises an exception in (1) if the length of any array does not match the length
of states.
Raises an exception in (2) if
'''
def check_valid_scenario_num(scenario_num):
if type(scenario_num) != int:
raise Exception('Error in init argument. Only positive integers can be used as scenario_num.')
elif scenario_num < 1:
raise Exception('Error in init argument. Only positive integers can be used as scenario_num.')
else:
return
def check_valid_num_states(states, row):
if len(states) == len(row):
return
else:
raise Exception('Error in init. Number of states does not match core model. Expected:\n' + str(states) + '\nDetected:\n' + str(row))
def check_valid_states(states, columns):
if set(states).difference(columns):
raise Exception('Error in init. Unexpected states found. Expected:\n' + str(states) + '\nDetected:\n' + str(columns))
if init is not None:
if init_orient == 'scenario' and type(init) == dict:
for key in init:
check_valid_num_states(states, init[key])
check_valid_scenario_num(key)
init1 = pd.DataFrame.from_dict(init, orient='index')
return init1
else:
if type(init) == dict:
init1 =
|
pd.DataFrame.from_dict(init, orient='columns')
|
pandas.DataFrame.from_dict
|
import numpy as np
import pandas as pd
import threading
import time
import pickle
import tsfresh
from psutil import cpu_percent
from tsfresh import extract_features
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler,normalize
from scipy import io
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.distance import pdist, cdist, squareform
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import recall_score, f1_score, precision_score
from tsfresh.feature_extraction import extract_features, ComprehensiveFCParameters
from tsfresh.feature_extraction.settings import from_columns
from tsfresh.feature_extraction import feature_calculators
from tsfresh.feature_extraction import EfficientFCParameters
import os
import glob
from tsfresh.feature_extraction import extract_features, EfficientFCParameters
def Recovery (DataName): #Recovery function
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd ()
working_path = os.getcwd() + '/Model'
PCA_Analyses_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
if DataName == 'D_S_parameters':
try:
# Now change to Kernel directory
os.chdir( Kernel_path )
Final_Target = np.genfromtxt('FinalTarget.csv', delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
P_N_groups = int(np.load('M_N_groups.npy'))
Output_Id = int(np.load('ID.npy'))
P_N_Ids = int(np.load('N_IDs.npy'))
# Now change to base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("D_S_parameters Recovered!")
return Output
except:
print("D_S_parameters not recovered =(" + '\033[0m')
elif DataName == 'ExtractedNames':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
extracted_names = np.load('extracted_names.npy')
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ExtractedNames recovered!")
return extracted_names
except:
print('\033[93m' + "ExtractedNames not recovered =(" + '\033[0m')
elif DataName == 'SelectedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to Kernel directory
os.chdir( Kernel_path )
features_filtered_1 = pd.read_csv('features_filtered_' + str(Output_Id) + '.csv')
# Now change to base directory
os.chdir( base_path )
Output = {'FeaturesFiltered': features_filtered_1,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("SelectedFeatures recovered!")
return Output
except:
print('\033[93m' + "SelectedFeatures not recovered =(" + '\033[0m')
elif DataName == 'ReducedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
features_reduzidas = np.genfromtxt("features_reduzidas_" + str(Output_Id) + ".csv", delimiter=',')
# Now change to base directory
os.chdir( base_path )
Output = {'ReducedFeatures': features_reduzidas,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ReducedFeatures recovered!")
return Output
except:
print('\033[93m' + "ReducedFeatures not recovered =(" + '\033[0m')
elif DataName == 'SODA_parameters_processing_parameters':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
processing_parameters = np.load(('processing_parameters.npy'), allow_pickle=True)
processing_parameters = processing_parameters.tolist()
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
min_granularity = np.load('Min_g.npy')
max_granularity = np.load('Max_g.npy')
pace = np.load('Pace.npy')
Output = {'Distances': distances,
'Min_g': min_granularity,
'Max_g': max_granularity,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("SODA_parameters_processing_parameters recovered!")
return Output, processing_parameters
except:
print('\033[93m' + "SODA_parameters_processing_parameters not recovered =(" + '\033[0m')
elif DataName == 'ClassificationPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
pace = np.load("Pace.npy")
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
define_percent = np.load('define_percent.npy')
Output = {'Percent': define_percent,
'Distances': distances,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ClassificationPar recovered!")
return Output
except:
print('\033[93m' + "ClassificationPar not recovered =(" + '\033[0m')
elif DataName == 'ModelPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
# load the model from disk
model = pickle.load(open("Model.sav", 'rb'))
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
Output = {'Model': model,
'X': X_test,
'Y': y_test}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ModelPar recovered!")
return Output
except:
print('\033[93m' + "ModelPar not recovered =(" + '\033[0m')
else:
print('\033[93m' + "Wrong name lad/lass, please check de Recovery input" + '\033[0m')
def scale(X, x_min, x_max): #Normalization
nom = (X-X.min(axis=0))*(x_max-x_min)
denom = X.max(axis=0) - X.min(axis=0)
if denom==0:
denom = 1
return x_min + nom/denom
def format_func(value, tick_number): #Plot Formater
# find number of multiples of pi/2
N = int(value)
if N == 0:
return "X1"
elif N == 50:
return "X50"
elif N == 100:
return "X100"
elif N == 150:
return "X150"
elif N == 200:
return "X200"
elif N == 250:
return "X250"
elif N == 300:
return "X300"
elif N == 350:
return "X350"
elif N == 400:
return "X400"
elif N == 450:
return "X450"
elif N == 500:
return "X500"
elif N == 550:
return "X550"
elif N == 600:
return "X600"
elif N == 650:
return "X650"
elif N == 700:
return "X700"
elif N == 750:
return "X750"
elif N == 800:
return "X800"
elif N == 850:
return "X850"
def DataSlicer (Output_Id, id_per_group=20, Choice='All'):
''' Function to Slice a time series dataset into several datasets
for save RAM during model execution
Parameters:
------
Output_Id : int
identifier for the dataset
id_per_group: int, optional
number of time series per division (default is 20)
Choice : str, optional
option of data, can be ['Main Data', 'Eminence Data', 'All'] (default is 'All')
Returns:
-------
dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
'''
print('Data Slicer Control Output')
print('----------------------------------')
#Changing Work Folder
add_path1 = "/Input/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Input_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Input directory
os.chdir( Input_path )
# Loading the required input
Full_data = np.genfromtxt('Output_' + str(int(Output_Id)) + '.csv', delimiter=',')
#E_data = np.genfromtxt('Eminence_Data_' + str(Output_Id) + '.csv', delimiter=',')
columns = Full_data.shape[1]
data = Full_data[:,2:columns-1]
info = Full_data[:,0:2]
#centralizar os dados e colocá-los com desvioPadrão=1
#scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
#data = scaler.transform(data)
P_data = np.concatenate((info,data), axis=1)
Target = Full_data[:,columns-1]
print('Full Matrix: ' + str(Full_data.shape))
print('Main Data: ' + str(P_data.shape))
print('Labels: ' + str(Target.shape))
#print('Eminence Data: ' + str(E_data.shape))
# Now change to Kernel directory
os.chdir( Kernel_path )
#pickle.dump(scaler, open('norm.sav', 'wb'))
###______________________________________________________________________###
### ProDiMes Slicing Parameters ###
P_N_Ids = int(np.amax(P_data,axis=0)[0])
P_N_voos = int(np.amax(P_data,axis=0)[1])
P_last_group = int(P_N_Ids % id_per_group)
if P_last_group != 0:
P_N_groups = int((P_N_Ids / id_per_group) + 1)
else:
P_N_groups = int (P_N_Ids / id_per_group)
### Formating Final Target ###
Final_Target = np.zeros((P_N_Ids))
p_n_good = 0
p_n_bad = 0
aquired_time = P_N_Ids*P_N_voos/1000
for i in range (P_N_Ids):
if Target [i*P_N_voos] == 0:
p_n_good += 1
else:
p_n_bad += 1
Final_Target[i] = Target [i*P_N_voos]
print ('Total Number of Ids: ' + str(P_N_Ids))
print ('Number of healthy Ids: ' + str(p_n_good))
print ('Number of falty Ids: ' + str(p_n_bad))
print ('Total lifetime: ' + str(aquired_time) + ' s')
print ('Main data Number of mesures: ' + str(P_N_voos ))
print ('Main data Number of groups: ' + str(P_N_groups ))
print ('Main data Last group: ' + str(P_last_group ))
print ('___________________________________________')
###______________________________________________________________________###
### Eminences Slicing Parameters ###
#E_N_Ids = int(np.amax(E_data,axis=0)[0] - np.amax(P_data,axis=0)[0])
#E_N_voos = int(np.amax(E_data,axis=0)[1]) + 1
#E_last_group = int(E_N_Ids % id_per_group)
#if (E_last_group != 0):
# E_N_groups = int((E_N_Ids / id_per_group) + 1)
#else:
# E_N_groups = int (E_N_Ids / id_per_group)
#print ('Eminences Number of Ids: ' + str(E_N_Ids ))
#print ('Eminences Number of flights: ' + str(E_N_voos ))
#print ('Eminences Number of groups: ' + str(E_N_groups ))
#print ('Eminences Last group: ' + str(E_last_group ))
#np.savetxt(('Target_' + str(int(Output_Id)) + '.csv'), Final_Target, delimiter = ',')
###______________________________________________________________________###
### Slicing Prodimes Data ###
if (Choice =='Main Data') or (Choice =='All'):
for i in range (P_N_groups):
Data = np.zeros(((id_per_group * P_N_voos),columns-1))
for j in range (id_per_group):
for k in range (P_N_voos):
if (i < (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
elif (P_last_group == 0) and (i == (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
if (P_last_group != 0) and (i == (P_N_groups - 1)):
Data = np.zeros(((P_last_group * P_N_voos),columns-1))
for j in range (P_last_group):
for k in range (P_N_voos):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
np.savetxt(('Data_' + str(i) + '.csv'), Data, delimiter = ',')
###______________________________________________________________________###
### Slicing Eminences ###
'''
if (Choice == 'Eminence Data') or (Choice =='All'):
for i in range (E_N_groups):
Data = np.zeros(((id_per_group * E_N_voos),columns-3))
for j in range (id_per_group):
for k in range (E_N_voos):
if (i < (E_N_groups - 1)):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
if (E_last_group != 0) and (i == (E_N_groups - 1)):
Data = np.zeros(((E_last_group * E_N_voos),columns-3))
for j in range (E_last_group):
for k in range (E_N_voos):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
np.savetxt(('Eminence_' + str(i) + '.csv'), Data, delimiter = ',')
'''
np.savetxt(('FinalTarget.csv'), Final_Target, delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save(('M_N_groups.npy'), P_N_groups)
np.save(('ID.npy'), Output_Id)
np.save(('N_IDs.npy'), P_N_Ids)
# Now change back to Base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
return Output
def TSFRESH_Extraction(D_S_parameters):
''' Function to extract features of the time series using
TSFRESH method
Parameters:
------
D_S_parameters : dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
Returns:
-------
list
a list of string with the name of the extracted features by TSFRESH
'''
print(' ')
print('TSFRESH Control Output')
print('----------------------------------')
#Changing Work Folder
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Kernel directory
os.chdir( Kernel_path )
###______________________________________________________________________###
### Feature Extraction ###
#E_N_groups = np.load('E_N_groups.npy')
P_N_groups = D_S_parameters['M_N_groups']
for i in range(P_N_groups):
Data = np.genfromtxt('Data_' + str(i) + '.csv', delimiter=',')
data = pd.DataFrame(Data, columns= ['id','time'] + ['Sensor_' + str(x) for x in range(1,(Data.shape[1]-1))])
Data_extracted_features = extract_features(data,column_id = "id", column_sort="time",n_jobs=4,disable_progressbar=True)
extracted_names = list(Data_extracted_features.columns)
np.savetxt('Data_Features_' + str(i) + '.csv', Data_extracted_features.values, delimiter=',')
#for i in range(E_N_groups):
# data = pd.DataFrame(np.genfromtxt('Eminence_' + str(i) + '.csv', delimiter=','),
# columns= ['id','time','sensor_1','sensor_2','sensor_3','sensor_4',
# 'sensor_5','sensor_6','sensor_7'])
# extracted_features = extract_features(data, column_id = "id", column_sort="time")
# np.savetxt('Eminence_Features_' + str(i) + '.csv', extracted_features, delimiter=',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save('extracted_names.npy',extracted_names)
# Now change back to base directory
os.chdir( base_path )
print("Number of Extracted Features: {}".format(len(extracted_names)))
return extracted_names
def tsfresh_chucksize(full_data,output_id):
# Loading the required input
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
with open('Kernel/valid_features_dict.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],
#chunksize=3*24000,
n_jobs=4,
disable_progressbar=False)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
filtered_features = select_features(final_features, target,n_jobs=4)
filtered_features.sort_index(inplace = True)
with open('Kernel/final_target_' + output_id + '.pkl', 'wb') as f:
pickle.dump(target, f)
# Extracting the selected features dictionary from pandas data frame
kind_to_fc_parameters = tsfresh.feature_extraction.settings.from_columns(filtered_features)
# Saving dictionary for the on-line phase
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(filtered_features.columns.to_list(), f)
Output = {'FeaturesFiltered': filtered_features,
'FinalTarget': target,
'ID': int(output_id)}
return Output
def tsfresh_chucksize_test(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_' + output_id + '.csv',
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=4,default_fc_parameters=EfficientFCParameters())
return extracted_features
def tsfresh_NaN_filter(output_id,fft=False):
"""
Given an output_id, this function
withdraw all NaN features from the
TSFRESH extraction;
Inputs:
-output_id: str() -> the given id
-fft: True or False -> filter fft features
Outputs:
- Saves via picklen in ./Kernel/
an extraction dictonary without
features that generates NaN
"""
df = tsfresh_chucksize_test(output_id)
features = df.columns
nan_columns = []
for col in features:
data = df.loc[:,col].values
nan_test = np.isnan(data)
aux = col.split('__')[1].split('_')[0]
if aux == 'fft' and fft == True:
nan_columns.append(col)
elif any(nan == True for nan in nan_test):
nan_columns.append(col)
print('Percentage of invalid features: ', len(nan_columns)*100/len(features))
valid_features = []
for i in range(len(features)):
if features[i] not in nan_columns:
valid_features.append(features[i])
print('Percentage of valid features: ', len(valid_features)*100/len(features))
valid_features_dict = from_columns(valid_features)
with open('Kernel/valid_features_dict.pkl', 'wb') as f:
pickle.dump(valid_features_dict, f)
return
def tsfresh_ensemble(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_{}.csv'.format(output_id),
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
n_measures = int(max(info[:,1]))
n_timeseries = int(max(info[:,0]))
label = full_data[::n_measures,-1]
scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
data = scaler.transform(data)
with open('Kernel/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
full_data = np.concatenate((info,data), axis=1)
divisions = 1
idx = np.random.choice(range(n_timeseries),n_timeseries,replace=False)
idx_division = np.array_split(idx,divisions)
for i,div in enumerate(idx_division):
div.sort()
indices = [d2 for d1 in div for d2 in range(d1*n_measures,(d1+1)*n_measures)]
ensemble_data = full_data[indices,:]
ensemble_label = label[div]
df = pd.DataFrame(ensemble_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=0)
features = extracted_features.columns
nan_columns = []
for col in features:
nan_test = np.isnan(extracted_features.loc[:,col].values)
if any(nan == True for nan in nan_test):
nan_columns.append(col)
print(' - Percentage of invalid features: ', len(nan_columns)*100/len(features))
cleaned_features = features.drop(nan_columns)
cleaned_df = extracted_features[cleaned_features]
filtered_df, relevance_table = selection.select_features(cleaned_df, ensemble_label, n_jobs=0)
relevance_table.fillna(value=100)
if i == 0:
relevance_table_final = relevance_table.copy()
extracted_features_final = extracted_features.copy()
else:
relevance_table_final.p_value = relevance_table_final.p_value + relevance_table.p_value
extracted_features_final = pd.concat([extracted_features_final,extracted_features], axis=0)
extracted_features_final = extracted_features_final.sort_index()
relevance_table_final.p_value = relevance_table_final.p_value/divisions
relevance_table_final.relevant = relevance_table_final.p_value < 0.0029
relevant_features = relevance_table_final[relevance_table_final.relevant].feature
extracted_features_final = extracted_features_final[relevant_features]
kind_to_fc_parameters = from_columns(relevant_features)
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(relevant_features.keys().tolist(), f)
with open('Kernel/final_target_{}.pkl'.format(output_id), 'wb') as f:
pickle.dump(label, f)
Output = {'FeaturesFiltered': extracted_features_final,
'FinalTarget': label,
'ID': int(output_id)}
return Output
def dynamic_tsfresh (total_data, mode='prototype'):
''' Function for ONLINE mode
This function read the data from the acquisition module and executes a
dynamic and lighter version of TSFRESH.
Parameters:
------
output_id : int
identifier of the seed dataset
extracted_names: list
Returns:
-------
dataframe #########################################################
'''
data = total_data[:,2:-1]
info = total_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
total_data = np.concatenate((info,data), axis=1)
# ----------------------------------------------------------------- #
df = pd.DataFrame(total_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,(total_data.shape[1]-1))])
# Loading feature dictionary
with open('Kernel/kind_to_fc_parameters.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
# Loading column names
with open('Kernel/columns.pkl', 'rb') as f:
original_columns = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],#chunksize=24000,
n_jobs=0
#disable_progressbar=True
)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
final_features = final_features[original_columns]
return impute(final_features), extracted_features
def test_tsfresh (SelectedFeatures,extracted_features):
tsf_offline = SelectedFeatures['FeaturesFiltered'].values
tsf_online = extracted_features.values
equal = np.equal(tsf_offline,tsf_online)
n_errors = 0
error_size = []
for i in range(equal.shape[0]):
for j in range(equal.shape[1]):
if equal[i,j]== False:
n_errors += 1
error_size.append(100*(tsf_offline[i,j]-tsf_online[i,j])/tsf_online[i,j])
error_size =
|
pd.DataFrame(error_size)
|
pandas.DataFrame
|
# coding: utf-8
"""
"""
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
import logging
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
def cal_jlr_zengzhanglv(data, col1, col2):
kuisun_count = 0
if not (data.iat[0] > 0 and data.iat[-1] > 0):
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[0] / data.iat[-1]) ** (1.0 / (len(data) - 1)) - 1) * 100
for d in data[:-1]:
if d < 0:
kuisun_count += 1
return pd.Series({col1: fhzzl, col2: kuisun_count})
def cal_PEG(data, col1, col2):
# data.iat[0] is PE
if not (data.iat[0] > 0 and data.iat[1] > 0 and data.iat[-1] > 0):
peg = np.nan
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[1] / data.iat[-1]) ** (1.0 / (len(data) - 2)) - 1) * 100
if fhzzl == 0:
peg = np.nan
else:
peg = data.iat[0] / fhzzl
return pd.Series({col1: fhzzl, col2: peg})
def generate_date_label(start_label):
dongtai_label = start_label
if pd.Timestamp(dongtai_label).is_year_end:
jingtai_label = dongtai_label
else:
jingtai_label = (pd.Timestamp(dongtai_label)-
|
pd.offsets.YearEnd(1)
|
pandas.offsets.YearEnd
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2022/1/7 17:19
Desc: 东方财富网-数据中心-新股数据-注册制审核
http://data.eastmoney.com/kcb/?type=nsb
"""
import pandas as pd
import requests
def stock_register_kcb() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-注册制审核-科创板
http://data.eastmoney.com/kcb/?type=nsb
:return: 科创板注册制审核结果
:rtype: pandas.DataFrame
"""
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '<KEY>',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '<KEY>',
'client': 'WEB',
'filter': '(TOLIST_MARKET="科创板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = range(1, len(big_df) + 1)
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_datetime(big_df['更新日期']).dt.date
big_df['受理日期'] = pd.to_datetime(big_df['受理日期']).dt.date
return big_df
def stock_register_cyb() -> pd.DataFrame:
"""
东方财富网-数据中心-新股数据-注册制审核-创业板
http://data.eastmoney.com/xg/cyb/
:return: 创业板注册制审核结果
:rtype: pandas.DataFrame
"""
url = "https://datacenter.eastmoney.com/securities/api/data/get"
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': '1',
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '<KEY>',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
page_num = data_json['result']['pages']
big_df = pd.DataFrame()
for page in range(1, page_num+1):
params = {
'st': 'UPDATE_DATE',
'sr': '-1',
'ps': '5000',
'p': page,
'type': 'RPT_REGISTERED_INFO',
'sty': 'ORG_CODE,ORG_CODE_OLD,ISSUER_NAME,CHECK_STATUS,CHECK_STATUS_CODE,REG_ADDRESS,CSRC_INDUSTRY,RECOMMEND_ORG,LAW_FIRM,ACCOUNT_FIRM,UPDATE_DATE,ACCEPT_DATE,TOLIST_MARKET,SECURITY_CODE',
'token': '<KEY>',
'client': 'WEB',
'filter': '(TOLIST_MARKET="创业板")',
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'
}
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.reset_index(inplace=True)
big_df['index'] = big_df.index + 1
big_df.columns = [
"序号",
"_",
"_",
"发行人全称",
"审核状态",
"_",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
"_",
]
big_df = big_df[
[
"序号",
"发行人全称",
"审核状态",
"注册地",
"证监会行业",
"保荐机构",
"律师事务所",
"会计师事务所",
"更新日期",
"受理日期",
"拟上市地点",
]
]
big_df['更新日期'] = pd.to_da
|
tetime(big_df['更新日期'])
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from sklearn.decomposition import NMF
class BaseOnRandom:
def __init__(self):
self.rating_df = None
self.id_index_dic = None
self.index_rating_dic = None
return
def fit(self, unique_id, rating_min, rating_max, seed=None):
#
unique_id_num = len(unique_id)
#
np.random.seed(seed)
random_rt = np.random.rand(unique_id_num) * (rating_max - rating_min) + rating_min
#
self.rating_df = pd.concat([pd.DataFrame(unique_id), pd.DataFrame(random_rt)], axis=1)
self.rating_df.columns = ['id', 'rating']
#
tempdf = self.rating_df['id'].reset_index() # index, id
tempdf.index = tempdf['id'] # id, index, id
self.id_index_dic = tempdf['index'].to_dict() # key=id, value=index
#
self.index_rating_dic = self.rating_df['rating'].to_dict() # key=index, value=rating
return
def predict(self, id):
def search(_id):
_idx = self.id_index_dic.get(_id)
_rt = self.index_rating_dic.get(_idx) if _idx is not None else 0
_rt = _rt if _rt is not None else 0
return _rt
vecf_search = np.vectorize(search)
#
pre_rating = vecf_search(id)
return pre_rating
class BaseOnConstant:
def __init__(self):
self.constant = None
return
def fit(self, constant):
self.constant = constant
return
def predict(self, id):
pre_rating = np.ones_like(id) * self.constant
return pre_rating
class BaseOnMean:
def __init__(self):
self.rating_df = None
self.id_index_dic = None
self.index_rating_dic = None
return
def fit(self, id, rating):
#
df = pd.concat([pd.DataFrame(id), pd.DataFrame(rating)], axis=1)
df.columns = ['id', 'rating']
#
grouped = df.groupby('id').mean()
#
self.rating_df = grouped.reset_index() # id, rating
#
tempdf = self.rating_df['id'].reset_index() # index, id
tempdf.index = tempdf['id'] # id, index, id
self.id_index_dic = tempdf['index'].to_dict() # key=id, value=index
#
self.index_rating_dic = self.rating_df['rating'].to_dict() # key=index, value=rating
return
def predict(self, id):
def search(_id):
_idx = self.id_index_dic.get(_id)
_rt = self.index_rating_dic.get(_idx) if _idx is not None else 0
_rt = _rt if _rt is not None else 0
return _rt
vecf_search = np.vectorize(search)
#
pre_rating = vecf_search(id)
return pre_rating
class NonNegaMF:
'''
Non-negative Matrix Factorization: NMF
'''
def __init__(self, n_components):
self.n_components = n_components
#
self.nmf_rt_mtrx = None
self.dfindex_index_dic = None
self.dfcolumn_index_dic = None
return
def fit(self, rating_mtrx_df):
#
tempdf =
|
pd.DataFrame(rating_mtrx_df.index)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import sys, os, time
import matplotlib.pyplot as plt
import seaborn as sns
import datajoint as dj
from IPython import embed as shell # for debugging
from scipy.special import erf # for psychometric functions
import matplotlib as mpl
# import wrappers etc
from ibl_pipeline import reference, subject, action, acquisition, data, behavior
from ibl_pipeline.utils import psychofit as psy
# ================================================================== #
# DEFINE PSYCHFUNCFIT TO WORK WITH FACETGRID IN SEABORN
# ================================================================== #
def fit_psychfunc(df):
choicedat = df.groupby('signed_contrast').agg({'trial':'count', 'choice2':'mean'}).reset_index()
pars, L = psy.mle_fit_psycho(choicedat.values.transpose(), P_model='erf_psycho_2gammas',
parstart=np.array([choicedat['signed_contrast'].mean(), 20., 0.05, 0.05]),
parmin=np.array([choicedat['signed_contrast'].min(), 0., 0., 0.]),
parmax=np.array([choicedat['signed_contrast'].max(), 100., 1, 1]))
df2 = {'bias':pars[0],'threshold':pars[1], 'lapselow':pars[2], 'lapsehigh':pars[3]}
df2 =
|
pd.DataFrame(df2, index=[0])
|
pandas.DataFrame
|
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import transition
from ...utils import testing as ust
@pytest.fixture
def basic_df():
return pd.DataFrame(
{'x': range(5),
'y': range(5, 10)},
index=range(100, 105))
@pytest.fixture
def year():
return 2112
@pytest.fixture
def totals_col():
return 'total'
@pytest.fixture
def rates_col():
return 'growth_rate'
@pytest.fixture
def grow_targets(year, totals_col):
return pd.DataFrame({totals_col: [7]}, index=[year])
@pytest.fixture
def grow_targets_filters(year, totals_col):
return pd.DataFrame({'x_min': [0, 2, np.nan],
'y_max': [7, 9, np.nan],
'x': [np.nan, np.nan, 4],
totals_col: [1, 4, 10]},
index=[year, year, year])
@pytest.fixture(scope='function')
def random_df(request):
"""
Seed the numpy prng and return a data frame w/ predictable test inputs
so that the tests will have consistent results across builds.
"""
old_state = np.random.get_state()
def fin():
# tear down: reset the prng after the test to the pre-test state
np.random.set_state(old_state)
request.addfinalizer(fin)
np.random.seed(1)
return pd.DataFrame(
{'some_count': np.random.randint(1, 8, 20)},
index=range(0, 20))
@pytest.fixture
def growth_rates(rates_col, totals_col, grow_targets):
del grow_targets[totals_col]
grow_targets[rates_col] = [0.4]
return grow_targets
@pytest.fixture
def growth_rates_filters(rates_col, totals_col, grow_targets_filters):
del grow_targets_filters[totals_col]
grow_targets_filters[rates_col] = [0.5, -0.5, 0]
return grow_targets_filters
def assert_empty_index(index):
pdt.assert_index_equal(index, pd.Index([]))
def assert_for_add(new, added):
assert len(new) == 7
pdt.assert_index_equal(added, pd.Index([105, 106]))
def assert_for_remove(new, added):
assert len(new) == 3
assert_empty_index(added)
def test_add_rows(basic_df):
nrows = 2
new, added, copied = transition.add_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_starting_index(basic_df):
nrows = 2
starting_index = 1000
new, added, copied = transition.add_rows(basic_df, nrows, starting_index)
assert len(new) == len(basic_df) + nrows
pdt.assert_index_equal(added, pd.Index([1000, 1001]))
assert len(copied) == nrows
assert copied.isin(basic_df.index).all()
def test_add_rows_zero(basic_df):
nrows = 0
new, added, copied = transition.add_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
def test_add_rows_with_accounting(random_df):
control = 10
new, added, copied = transition.add_rows(
random_df, control, accounting_column='some_count')
assert control == new.loc[copied]['some_count'].sum()
assert copied.isin(random_df.index).all()
def test_remove_rows(basic_df):
nrows = 2
new, removed_indexes = transition.remove_rows(basic_df, nrows)
assert_for_remove(new, transition._empty_index())
assert len(removed_indexes) == nrows
assert removed_indexes.isin(basic_df.index).all()
def test_remove_rows_zero(basic_df):
nrows = 0
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(removed)
def test_remove_rows_all(basic_df):
nrows = len(basic_df)
new, removed = transition.remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df.loc[[]])
ust.assert_index_equal(removed, basic_df.index)
def test_remove_rows_with_accounting(random_df):
control = 10
new, removed = transition.remove_rows(
random_df, control, accounting_column='some_count')
assert control == random_df.loc[removed]['some_count'].sum()
assert removed.isin(random_df.index).all()
def test_remove_rows_raises(basic_df):
# should raise ValueError if asked to remove more rows than
# are in the table
nrows = 25
with pytest.raises(ValueError):
transition.remove_rows(basic_df, nrows)
def test_add_or_remove_rows_add(basic_df):
nrows = 2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_add(new, added)
assert len(copied) == abs(nrows)
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_add_or_remove_rows_remove(basic_df):
nrows = -2
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
assert_for_remove(new, added)
assert len(removed) == abs(nrows)
assert removed.isin(basic_df.index).all()
assert_empty_index(copied)
def test_add_or_remove_rows_zero(basic_df):
nrows = 0
new, added, copied, removed = \
transition.add_or_remove_rows(basic_df, nrows)
pdt.assert_frame_equal(new, basic_df)
assert_empty_index(added)
assert_empty_index(copied)
assert_empty_index(removed)
def test_grtransition_add(basic_df):
growth_rate = 0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_add(new, added)
assert len(copied) == 2
assert copied.isin(basic_df.index).all()
assert_empty_index(removed)
def test_grtransition_add_with_accounting(random_df):
growth_rate = .1
year = 2012
orig_total = random_df['some_count'].sum()
growth = int(round(orig_total * growth_rate))
target = orig_total + growth
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert growth == new.loc[copied]['some_count'].sum()
assert target == new['some_count'].sum()
assert copied.isin(random_df.index).all()
assert_empty_index(removed)
def test_grtransition_remove(basic_df):
growth_rate = -0.4
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
assert_for_remove(new, added)
assert_empty_index(copied)
assert len(removed) == 2
assert removed.isin(basic_df.index).all()
def test_grtransition_remove_with_accounting(random_df):
growth_rate = -.1
year = 2012
orig_total = random_df['some_count'].sum()
change = -1 * int(round(orig_total * growth_rate))
target = orig_total - change
grt = transition.GrowthRateTransition(growth_rate, 'some_count')
new, added, copied, removed = grt(random_df, year)
assert change == random_df.loc[removed]['some_count'].sum()
assert target == new['some_count'].sum()
assert removed.isin(random_df.index).all()
assert_empty_index(added)
assert_empty_index(copied)
def test_grtransition_remove_all(basic_df):
growth_rate = -1
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
pdt.assert_frame_equal(new, basic_df.loc[[]])
assert_empty_index(added)
assert_empty_index(copied)
ust.assert_index_equal(removed, basic_df.index)
def test_grtransition_zero(basic_df):
growth_rate = 0
year = 2112
grt = transition.GrowthRateTransition(growth_rate)
new, added, copied, removed = grt.transition(basic_df, year)
|
pdt.assert_frame_equal(new, basic_df)
|
pandas.util.testing.assert_frame_equal
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by <NAME> and CBIG under MIT license:
https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
"""
import os
import sys
import random
import datetime
import numpy as np
import pandas as pd
import scipy.io as sio
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
sys.path.append("..")
try:
from config import config
except ImportError:
raise
def filter_subject_by_FC():
'''filter the raw CSV file by only include subjects with FC ('25751-2.0')
Args:
None
Returns:
None
'''
raw_csv_name = config.CSV_RAW
csv_base_name = config.CSV_BASE
if not os.path.isfile(csv_base_name):
if 'raw_csv' not in locals():
raw_csv = pd.read_csv(raw_csv_name)
tmp = raw_csv['25751-2.0'].dropna()
print(tmp.shape[0], 'numbers of subjects with FC by UKBB')
csv_base = raw_csv.loc[tmp.index.values, :]
csv_base.to_csv(csv_base_name)
else:
print('file exists:', csv_base_name)
def convert_date2float(data):
'''convert dataframe's date to float
Args:
data (pandas.DataFrame): df that to convert the date to float
Returns:
pandas.DataFrame: df with date converted to float
'''
data[~data.isnull()] = pd.to_datetime(
data.dropna()).astype(int).astype(float)
return data.astype(float)
def unique_entry(x):
'''check number of unique entry
Args:
x (list): entry list
Returns:
int: number of unique entry
'''
return len(np.unique([tmp.split('-')[0] for tmp in x]))
def coarse_filter():
'''perform coarse filter to phenotypes based on various criterion, like:
remove category phenotypes except sex
remove brain MRI phenotypes
remove phenotypes have less than 2000 participants
remove phenotypes with same value for more than 80% participants
(details in section 4.4 in the meta-matching paper)
Args:
None
Returns:
None
'''
if os.path.isfile(config.CSV_COARSE_FILTER):
print('file exists:', config.CSV_COARSE_FILTER)
return
# load data
dat_csv = pd.read_csv(config.CSV_BASE)
ori_csv = dat_csv.copy()
inf_csv = pd.read_csv(config.CSV_UKBB_CAT_INFO)
# check entry in the UKBioBank showcase
# '20235-0.0' is deleted here due to not in showcase and all nan value
entry_v0 = dat_csv.columns.values[4:]
print('number of entry before selection:', len(entry_v0), 'unique:',
unique_entry(entry_v0))
inf_list = inf_csv['data_id'].values
entry_v1 = []
for entry in entry_v0:
phenotype = int(entry.split('-')[0])
if phenotype not in inf_list:
print(entry, 'is not in the UKBioBank showcase webstie')
else:
entry_v1.append(entry)
print('number of entry check ukbb showcase:', len(entry_v1), 'unique:',
unique_entry(entry_v1))
# Removed non-continuous and non-integer data fields (date and time
# converted to float)
# Removed Brain MRI phenotypes (category ID 100)
entry_v2 = []
for entry in entry_v1:
phenotype = int(entry.split('-')[0])
row = inf_csv.loc[inf_csv['data_id'] == phenotype]
phe_type = row['Value Type'].values[0]
phe_brain_mri = row['level1'].values[0]
if phe_brain_mri == 'Brain MRI':
# print(entry, row['data_field'].values[0])
continue
if phe_type.startswith('Date') or phe_type.startswith('Time'):
dat_csv[entry] = convert_date2float(dat_csv[entry].copy())
entry_v2.append(entry)
if phe_type.startswith('Continuous') or phe_type.startswith('Integer'):
entry_v2.append(entry)
print('number of entry after keep Continuous or Integer value:',
len(entry_v2), 'unique:', unique_entry(entry_v2))
# only keep first imaging visit, except early visit have more subjects
entry_v2_5 = entry_v2
entry_v2_5 = np.array(entry_v2_5)
phe_list = np.unique([int(i.split('-')[0]) for i in entry_v2_5])
for i in phe_list:
# Removed first repeat imaging visit (instance 3)
temp_list = [j for j in entry_v2_5 if j.startswith(str(i) + '-')]
for j in temp_list:
if j.startswith(str(i) + '-3.'):
entry_v2_5 = np.delete(entry_v2_5,
np.where(entry_v2_5 == j)[0][0])
temp_list = [j for j in temp_list if not j.startswith(str(i) + '-3.')]
# Removed first two instances (instance 0 and 1) if first imaging visit
# (instance 2) exists and first imaging visit (instance 2) participants
# were more than double of participants from instance 0 or 1
temp_list_2 = np.sort(np.unique([j.split('.')[0] for j in temp_list]))
if len(temp_list_2) in [2, 3] and str(i) + '-2' == temp_list_2[-1]:
n_subj = [0, 0, 0]
for j in temp_list:
tmp = dat_csv[j].dropna().values.shape[0]
tmp_i = int(j.split('.')[0].split('-')[1])
if n_subj[tmp_i] < tmp:
n_subj[tmp_i] = tmp
print(i, n_subj)
if n_subj[2] * 2 >= n_subj[1] and n_subj[2] * 2 >= n_subj[0]:
for j in temp_list:
if not j.startswith(str(i) + '-2'):
entry_v2_5 = np.delete(entry_v2_5,
np.where(entry_v2_5 == j)[0][0])
# Removed first instance (instance 0) if only the first two instances
# (instance 0 and 1) exist, and instance 1 participants were more than
# double of participants from instance 0
if len(temp_list_2) == 2 and str(i) + '-1' == temp_list_2[-1]:
n_subj = [0, 0]
for j in temp_list:
tmp = dat_csv[j].dropna().values.shape[0]
tmp_i = int(j.split('.')[0].split('-')[1])
if n_subj[tmp_i] < tmp:
n_subj[tmp_i] = tmp
print(i, n_subj)
if n_subj[1] * 2 >= n_subj[0]:
for j in temp_list:
if not j.startswith(str(i) + '-1'):
entry_v2_5 = np.delete(entry_v2_5,
np.where(entry_v2_5 == j)[0][0])
print('number of entry after remove previous visit:', len(entry_v2_5),
'unique:', unique_entry(entry_v2_5))
# Removed bulk item
# Removed phenotypes for which less than 2000 participants had RSFC data
# Removed behaviors with the same value for more than 80% of participants
entry_v3 = []
for entry in entry_v2_5:
phe_temp = entry.replace('-', '_').replace('.', '_')
phe_value = dat_csv[entry].dropna().values
phe_type = dat_csv[entry].dtype
_, phe_count = np.unique(phe_value, return_counts=True)
if phe_temp in phe_value:
print(entry)
elif np.logical_not(np.all(np.isreal(phe_value))):
print(entry, 'not real')
elif phe_type == object:
print(entry)
elif phe_value.shape[0] < 2000:
print(entry, phe_value.shape[0])
elif np.max(phe_count) / np.sum(phe_count) > 0.8:
print(entry, phe_count)
else:
entry_v3.append(entry)
print('after delete bulk item:', len(entry_v3), 'unique:',
unique_entry(entry_v3))
entry_v3.append('31-0.0') # add back sex
# calculate detailed age
entry_v3.append('age-2.0')
entry_v3.remove('34-0.0')
entry_v3.remove('53-2.0')
entry_v3.remove('21022-0.0')
np.savetxt(
os.path.join(config.DIR_1_OUTPUT, 'phe_continuous_new.txt'),
entry_v3,
fmt='%s')
entry_v3.remove('age-2.0')
entry_v3.append('eid')
entry_v3.append('25741-2.0')
final_csv = dat_csv.loc[:, entry_v3]
temp = final_csv.loc[:, 'eid'].astype(int)
final_csv.loc[:, 'eid'] = temp
age = np.zeros((final_csv.shape[0]))
for i in range(final_csv.shape[0]):
year = int(ori_csv['34-0.0'].values[i])
month = int(ori_csv['52-0.0'].values[i])
scan_date = ori_csv['53-2.0'].values[i]
scan_date = datetime.datetime.strptime(scan_date, '%Y-%m-%d').date()
birth_date = datetime.datetime.strptime(
str(year) + str(month), '%Y%m').date()
age[i] = (scan_date - birth_date).days / 365
final_csv['age-2.0'] = pd.Series(age, index=final_csv.index)
# Save out filtered CSV
for i in final_csv.columns.values[1:]:
print(i, np.sum(~np.isnan(final_csv[i].values)))
final_csv = final_csv.sort_values(by=['eid'])
final_phes = list(final_csv.columns.values)
final_phes.remove('eid')
final_phes.remove('25741-2.0')
print('phenotypes count after first filter', len(final_phes), 'unique:',
unique_entry(final_phes))
final_csv.to_csv(config.CSV_COARSE_FILTER)
def plot_phe_num_subj(df_phe, save_name, phes=None):
'''plot hist of dataframe subjects count for various phenotypes
Args:
df_phe (pandas.DataFrame): phenotype DataFrame
save_name (str): save place of png files
phes (List): phenotypes to plot
Returns:
None
'''
subj_cnt = []
if phes is None:
phes = df_phe.columns.values
for phe in phes:
cnt = np.sum(~np.isnan(df_phe[phe].values))
subj_cnt.append(cnt)
# print(phe, np.sum(~np.isnan(df_phe[phe].values)))
fig, ax = plt.subplots()
ax.hist(subj_cnt, 50)
ax.set(xlabel='num of subjects', ylabel='num of phenotype')
plt.tight_layout()
fig.savefig(save_name)
plt.close('all')
phe_count = []
for phe in phes:
phe_count.append(df_phe[phe].dropna().values.shape[0])
phe_count = np.sort(phe_count)
print(save_name, df_phe.shape, phe_count)
def split_for_phe_select():
'''split 1000 subjects for next step phenotype selection
Args:
None
Returns:
None
'''
csv_1000 = config.CSV_1000_FOR_PHE_SELECT
csv_remain = config.CSV_REMAIN
if os.path.isfile(csv_1000) and os.path.isfile(csv_remain):
print('file exists:', csv_1000, 'and', csv_remain)
return
df_phe = pd.read_csv(config.CSV_COARSE_FILTER)
seed = config.RAMDOM_SEED
random.seed(seed)
np.random.seed(seed)
df_train, df_test = train_test_split(
df_phe, test_size=1000, random_state=seed)
df_train.sort_index(inplace=True)
df_test.sort_index(inplace=True)
if not np.array_equal(df_test['eid'], np.sort(df_test['eid'])):
raise Exception('df test eid is not sorted')
if not np.array_equal(df_train['eid'], np.sort(df_train['eid'])):
raise Exception('df train eid is not sorted')
del df_train['Unnamed: 0']
del df_test['Unnamed: 0']
plot_phe_num_subj(
df_train, os.path.join(config.DIR_1_OUTPUT,
'num_subj_dist_remain.png'))
phe_count = []
for phe in df_test.columns.values:
phe_count.append(df_test[phe].dropna().values.shape[0])
phe_count = np.sort(phe_count)
print(phe_count)
df_test.reset_index(drop=True, inplace=True)
df_test.to_csv(csv_1000)
df_train.reset_index(drop=True, inplace=True)
df_train.to_csv(csv_remain)
plot_phe_num_subj(
pd.read_csv(csv_1000),
os.path.join(config.DIR_1_OUTPUT, 'num_subj_dist_1000.png'))
def get_pfc(subject_list, roi=55, flag_HCP=False):
'''convert txt FC file downloaded form UK Biobank to ndarray
Args:
subject_list (ndarray): list of subjects eid
roi (int): number of roi for FC
flag_HCP (optional, bool): whether get data for HCP dataset
Returns:
Tuple: ndarray for partial FC, one is in NxN matrix, one is flatten
'''
index = np.tril(np.ones(roi), k=-1) == 1
if roi == 55:
dir_pfc = config.DIR_PFC
else:
if flag_HCP:
dir_pfc = config.DIR_5_FC[str(roi)]
else:
dir_pfc = config.DIR_DIFF_ROI_UKBB[str(roi)]
pfc_list = []
pfc_list_flat = []
for i in subject_list:
if roi == 55:
temp = np.zeros((roi, roi))
temp_flat = np.loadtxt(
os.path.join(dir_pfc,
str(i) + '_25753_2_0.txt'))
temp[index] = temp_flat
temp = temp + temp.T
else:
if flag_HCP:
temp = sio.loadmat(
os.path.join(
dir_pfc,
'FC_' + str(roi) + '_ROIs_' + str(i) + '.mat'))
temp = temp['corr_mat']
else:
temp = np.load(
os.path.join(dir_pfc,
str(i) + '_alex' + str(roi) + '_FC.npy'))
# print(temp.shape)
temp_flat = temp[index]
# print(temp_flat.shape)
pfc_list.append(temp)
pfc_list_flat.append(temp_flat)
pfc = np.stack(pfc_list, axis=2)
pfc_flat = np.stack(pfc_list_flat, axis=1)
return pfc, pfc_flat
def create_pfc_corr_mat():
'''create mat file for partial FC for 1000 subjects
These 1000 subjects are going to be used only for phenotypes selection
Args:
None
Returns:
None
'''
# get partial FC and subject list
if os.path.isfile(config.MAT_PFC_1000_FLAT) and os.path.isfile(
config.PHE_LIST_COARSE_FILTER):
print('file exists:', config.MAT_PFC_1000_FLAT)
print('file exists:', config.PHE_LIST_COARSE_FILTER)
return
csv =
|
pd.read_csv(config.CSV_1000_FOR_PHE_SELECT)
|
pandas.read_csv
|
"""
This module exrtacts features from the data, saves the feauters
from all measurements to global results file and creates
one file for every sensor with all measurements.
:copyright: (c) 2022 by <NAME>, Hochschule-Bonn-Rhein-Sieg
:license: see LICENSE for more details.
"""
from pyexpat import features
import pandas as pd
from scipy.signal import chirp, find_peaks, peak_widths
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
######################################################################################################################
## this class creates objects for every sensor and stores all measurment for this sensor ##
class Sensor:
"""This is for creating objects for every sensor and stores the data from all measurements
in this object. Sensor names are picked up in properties. One DataFrame for every sensor is created
Args:
properties (dictionary): properties is a dictionary with all parameters for evaluating the data
"""
def __init__(self, properties):
"""
constructor method
"""
df_dict = {} # dicht with df for each sensor, one for all measurments
self.properties = properties
for sensor in self.properties['sensors']:
df_dict[sensor] =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 15 11:51:39 2020
This is best run inside Spyder, not as standalone script.
Author: @hk_nien on Twitter.
"""
import re
import sys
import io
import urllib
import urllib.request
from pathlib import Path
import time
import locale
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import nl_regions
import scipy.signal
import scipy.interpolate
import scipy.integrate
import tools
from g_mobility_data import get_g_mobility_data
from nlcovidstats_data import (
init_data,
DFS,
get_municipalities_by_pop,
load_cumulative_cases,
)
# These delay values are tuned to match the RIVM Rt estimates.
# The represent the delay (days) from infection to report date,
# referencing the report date.
# Extrapolation: constant value.
DELAY_INF2REP = [
('2020-07-01', 7.5),
('2020-09-01', 7),
('2020-09-15', 9),
('2020-10-09', 9),
('2020-11-08', 7),
('2020-12-01', 6.5),
('2021-02-15', 6.5),
('2021-04-05', 4),
('2021-07-06', 4),
('2021-07-15', 5),
('2021-07-23', 4),
('2021-07-30', 4),
('2021-11-04', 4),
('2021-11-11', 4.5),
('2021-11-20', 5),
('2021-11-25', 5),
('2021-12-04', 4.5), # test capacity increased
('2021-12-08', 4), # Speculation...
]
_DOW_CORR_CACHE = {} # keys: dayrange tuples.
def get_dow_correction_rolling(nweeks=7, taper=0.5):
"""Return DoW correction factors for all dates.
Parameters:
- nweeks: number of preceding weeks to use for each date.
- taper: which fraction of old data to taper to lower weight.
Return:
- Series with same timestamp index as cases data.
"""
df, _ = get_region_data('Nederland', lastday=-1, correct_dow=None)
# df = df.iloc[3:-3].copy() # strip edge points without well defined 7d mean.
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
ntaper = int(nweeks*taper + 0.5)
kernel = np.zeros(nweeks*2 + 1)
kernel[-nweeks:] = 1
kernel[-nweeks:-nweeks+ntaper] = np.linspace(1/ntaper, 1-1/ntaper, ntaper)
kernel /= kernel.sum()
df['Dow_factor'] = np.nan
for idow in range(7):
row_select = df.index[df.index.dayofweek == idow]
facs = df.loc[row_select, 'Delta_factor']
n = len(facs)
assert len(facs) > nweeks
mean_factors = np.convolve(facs, kernel, mode='same')
mean_factors[mean_factors == 0] = np.nan
df.loc[row_select, 'Dow_factor'] = 1/mean_factors
df.loc[df.index[:8], 'Dow_factor'] = np.nan
return df['Dow_factor']
def get_dow_correction(dayrange=(-50, -1), verbose=False):
"""Return array with day-of-week correction factors.
- dayrange: days to consider for DoW correction.
- verbose: whether to show plots and print diagnostics.
Return:
- dow_corr_factor: array (7,) with DoW correction (0=Monday).
"""
dayrange = tuple(dayrange)
if dayrange in _DOW_CORR_CACHE and not verbose:
return _DOW_CORR_CACHE[dayrange].copy()
# timestamp index, columns Delta, Delta7r, and others.
df, _ = get_region_data('Nederland', lastday=dayrange[-1], correct_dow=None)
df = df.iloc[:-4] # Discard the last rows that have no correct rolling average.
df = df.iloc[dayrange[0]-dayrange[1]:]
# Correction factor - 1
df['Delta_factor'] = df['Delta']/df['Delta7r']
# Collect by day of week (0=Monday)
factor_by_dow = np.zeros(7)
for i in range(7):
factor_by_dow[i] = 1 / df.loc[df.index.dayofweek == i, 'Delta_factor'].mean()
factor_by_dow /= factor_by_dow.mean()
df['Delta_est_factor'] = factor_by_dow[df.index.dayofweek]
df['Delta_corrected'] = df['Delta'] * df['Delta_est_factor']
rms_dc = (df['Delta_corrected']/df['Delta7r']).std()
rms_d = df['Delta_factor'].std()
if verbose:
print('DoW effect: deviations from 7-day rolling average.\n'
f' Original: RMS={rms_d:.3g}; after correction: RMS={rms_dc:.3g}')
fig, ax = plt.subplots(tight_layout=True)
ax.plot(df['Delta_factor'], label='Delta')
ax.plot(df['Delta_corrected'] / df['Delta7r'], label='Delta_corrected')
ax.plot(df['Delta_est_factor'], label='Correction factor')
tools.set_xaxis_dateformat(ax, 'Date')
ax.legend()
ax.set_ylabel('Daily cases deviation')
title = 'Day-of-week correction on daily cases'
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.show()
if rms_dc > 0.8*rms_d:
print(f'WARNING: DoW correction for dayrange={dayrange} does not seem to work.\n'
' Abandoning this correction.')
factor_by_dow = np.ones(7)
_DOW_CORR_CACHE[dayrange] = factor_by_dow.copy()
return factor_by_dow
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop
def _correct_delta_anomalies(df):
"""Apply anomaly correction to 'Delta' column.
Store original values to 'Delta_orig' column.
Pull data from DFS['anomalies']
"""
dfa = DFS['anomalies']
df['Delta_orig'] = df['Delta'].copy()
dt_tol = pd.Timedelta(12, 'h') # tolerance on date matching
match_date = lambda dt: abs(df.index - dt) < dt_tol
preserve_n = True
for (date, data) in dfa.iterrows():
if date == '2021-02-08':
print('@foo')
f = data['fraction']
dt = data['days_back']
dn = df.loc[match_date(date), 'Delta_orig'] * f
if len(dn) == 0:
print(f'Anomaly correction: no match for {date}; skipping.')
continue
assert len(dn) == 1
dn = dn[0]
df.loc[match_date(date + pd.Timedelta(dt, 'd')), 'Delta'] += dn
if dt != 0:
df.loc[match_date(date), 'Delta'] -= dn
else:
preserve_n = False
if preserve_n:
assert np.isclose(df["Delta"].sum(), df["Delta_orig"].sum(), rtol=1e-6, atol=0)
else:
delta = df["Delta"].sum() - df["Delta_orig"].sum()
print(f'Note: case count increased by {delta*17.4e6:.0f} cases due to anomalies.')
def construct_Dfunc(delays, plot=False):
"""Return interpolation functions fD(t) and fdD(t).
fD(t) is the delay between infection and reporting at reporting time t.
fdD(t) is its derivative.
Parameter:
- delays: tuples (datetime_report, delay_days). Extrapolation is at
constant value.
- plot: whether to generate a plot.
Return:
- fD: interpolation function for D(t) with t in nanoseconds since epoch.
- fdD: interpolation function for dD/dt.
(taking time in ns but returning dD per day.)
- delay_str: delay string e.g. '7' or '7-9'
"""
ts0 = [float(pd.to_datetime(x[0]).to_datetime64()) for x in delays]
Ds0 = [float(x[1]) for x in delays]
if len(delays) == 1:
# prevent interp1d complaining.
ts0 = [ts0[0], ts0[0]+1e9]
Ds0 = np.concatenate([Ds0, Ds0])
# delay function as linear interpolation;
# nanosecond timestamps as t value.
fD0 = scipy.interpolate.interp1d(
ts0, Ds0, kind='linear', bounds_error=False,
fill_value=(Ds0[0], Ds0[-1])
)
# construct derivative dD/dt, smoothen out
day = 1e9*86400 # one day in nanoseconds
ts = np.arange(ts0[0]-3*day, ts0[-1]+3.01*day, day)
dDs = (fD0(ts+3*day) - fD0(ts-3*day))/6
fdD = scipy.interpolate.interp1d(
ts, dDs, 'linear', bounds_error=False,
fill_value=(dDs[0], dDs[-1]))
# reconstruct D(t) to be consistent with the smoothened derivative.
Ds = scipy.integrate.cumtrapz(dDs, ts/day, initial=0) + Ds0[0]
fD = scipy.interpolate.interp1d(
ts, Ds, 'linear', bounds_error=False,
fill_value=(Ds[0], Ds[-1]))
Dmin, Dmax = np.min(Ds0), np.max(Ds0)
if Dmin == Dmax:
delay_str = f'{Dmin:.0f}'
else:
delay_str = f'{Dmin:.0f}-{Dmax:.0f}'
if plot:
fig, ax = plt.subplots(1, 1, figsize=(7, 3), tight_layout=True)
tsx = np.linspace(
ts[0],
int(pd.to_datetime('now').to_datetime64())
)
ax.plot(pd.to_datetime(tsx.astype(np.int64)), fD(tsx))
ax.set_ylabel('Vertraging (dagen)')
tools.set_xaxis_dateformat(ax, 'Rapportagedatum')
title = 'Vertraging = t_rapportage - t_infectie - t_generatie/2'
fig.canvas.set_window_title(title)
ax.set_title(title)
fig.show()
return fD, fdD, delay_str
def estimate_Rt_df(r, delay=9, Tc=4.0):
"""Return Rt data, assuming delay infection-reporting.
- r: Series with smoothed new reported cases.
(e.g. 7-day rolling average or other smoothed data).
- delay: assume delay days from infection to positive report.
alternatively: list of (timestamp, delay) tuples if the delay varies over time.
The timestamps refer to the date of report.
- Tc: assume generation interval.
Return:
- DataFrame with columns 'Rt' and 'delay'.
"""
if not hasattr(delay, '__getitem__'):
# simple delay - attach data to index with proper offset
log_r = np.log(r.to_numpy()) # shape (n,)
assert len(log_r.shape) == 1
log_slope = (log_r[2:] - log_r[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
index = r.index[1:-1] - pd.Timedelta(delay, unit='days')
Rdf = pd.DataFrame(
dict(Rt=pd.Series(index=index, data=Rt, name='Rt'))
)
Rdf['delay'] = delay
else:
# the hard case: delay varies over time.
# if ri is the rate of infections, tr the reporting date, and D
# the delay, then:
# ri(tr-D(tr)) = r(tr) / (1 - dD/dt)
fD, fdD, _ = construct_Dfunc(delay)
# note: timestamps in nanoseconds since epoch, rates in 'per day' units.
day_ns = 86400e9
tr = r.index.astype(int)
ti = tr - fD(tr) * day_ns
ri = r.to_numpy() / (1 - fdD(tr))
# now get log-derivative the same way as above
log_ri = np.log(np.where(ri==0, np.nan, ri))
log_slope = (log_ri[2:] - log_ri[:-2])/2 # (n-2,)
Rt = np.exp(Tc*log_slope) # (n-2,)
# build series with timestamp index
# (Note: int64 must be specified explicitly in Windows, 'int' will be
# int32.)
Rt_series = pd.Series(
data=Rt, name='Rt',
index=pd.to_datetime(ti[1:-1].astype(np.int64))
)
Rdf = pd.DataFrame(dict(Rt=Rt_series))
Rdf['delay'] = fD(tr[1:-1])
return Rdf
def get_t2_Rt(ncs, delta_t, i0=-3):
"""Return most recent doubling time and Rt, from case series"""
# exponential fit
t_gen = 4.0 # generation time (d)
t_double = delta_t / np.log2(ncs.iloc[i0]/ncs.iloc[i0-delta_t])
Rt = 2**(t_gen / t_double)
return t_double, Rt
def add_labels(ax, labels, xpos, mindist_scale=1.0, logscale=True):
"""Add labels, try to have them avoid bumping.
- labels: list of tuples (y, txt)
- mindist_scale: set to >1 or <1 to tweak label spacing.
"""
from scipy.optimize import fmin_cobyla
ymin, ymax = ax.get_ylim()
if logscale:
mindist = np.log10(ymax/ymin)*0.025*mindist_scale
else:
mindist = (ymax - ymin)*0.025*mindist_scale
labels = sorted(labels)
# log positions and sorted$ffmpeg -i Rt_%03d.png -c:v libx264 -r 25 -pix_fmt yuv420p out.mp4
if logscale:
Ys = np.log10([l[0] for l in labels])
else:
Ys = np.array([l[0] for l in labels])
n = len(Ys)
# Distance matrix: D @ y = distances between adjacent y values
D = np.zeros((n-1, n))
for i in range(n-1):
D[i, i] = -1
D[i, i+1] = 1
def cons(Y):
ds = D @ Y
errs = np.array([ds - mindist, ds])
#print(f'{np.around(errs, 2)}')
return errs.reshape(-1)
# optimization function
def func(Y):
return ((Y - Ys)**2).sum()
new_Ys = fmin_cobyla(func, Ys, cons, catol=mindist*0.05)
for Y, (_, txt) in zip(new_Ys, labels):
y = 10**Y if logscale else Y
ax.text(xpos, y, txt, verticalalignment='center')
def _zero2nan(s):
"""Return copy of array/series s, negative/zeros replaced by NaN."""
sc = s.copy()
sc[s <= 0] = np.nan
return sc
def _add_event_labels(ax, tmin, tmax, with_ribbons=True, textbox=False, bottom=True,
flagmatch='RGraph'):
"""Add event labels and ribbons to axis (with date on x-axis).
- ax: axis object
- tmin, tmax: time range to assume for x axis.
- textbox: whether to draw text in a semi-transparent box.
- bottom: whether to put labels at the bottom rather than top.
- flagmatch: which flags to match (regexp).
"""
ymin, ymax = ax.get_ylim()
y_lab = ymin if bottom else ymax
ribbon_yspan = (ymax - ymin)*0.35
ribbon_hgt = ribbon_yspan*0.1 # ribbon height
ribbon_ystep = ribbon_yspan*0.2
df_events = DFS['events']
ribbon_colors = ['#ff0000', '#cc7700'] * 10
if df_events is not None:
i_res = 0
for _, (res_t, res_t_end, res_d, flags) in df_events.reset_index().iterrows():
if not (tmin <= res_t <= tmax):
continue
if flags and not re.match(flagmatch, flags):
continue
res_d = res_d.replace('\\n', '\n')
# note; with \n in text, alignment gets problematic.
txt = ax.text(res_t, y_lab, f' {res_d}', rotation=90, horizontalalignment='center',
verticalalignment='bottom' if bottom else 'top',
fontsize=8)
if textbox:
txt.set_bbox(dict(facecolor='white', alpha=0.4, linewidth=0))
if pd.isna(res_t_end):
continue
if with_ribbons:
res_t_end = min(res_t_end, tmax)
a, b = (ribbon_ystep * i_res), (ribbon_yspan - ribbon_hgt)
rect_y_lo = a % b + y_lab
color = ribbon_colors[int(a // b)]
rect = matplotlib.patches.Rectangle((res_t, rect_y_lo), res_t_end-res_t, ribbon_hgt,
color=color, alpha=0.15, lw=0, zorder=20)
ax.add_patch(rect)
i_res += 1
def plot_daily_trends(ndays=100, lastday=-1, mun_regexp=None, region_list=None,
source='r7', subtitle=None):
"""Plot daily-case trends (pull data from global DFS dict).
- lastday: up to this day.
- source: 'r7' (7-day rolling average), 'raw' (no smoothing), 'sg'
(Savitsky-Golay smoothed).
- mun_regexp: regular expression matching municipalities.
- region_list: list of municipalities (including e.g. 'HR:Zuid',
'POP:100-200', 'JSON:{...}'.
if mun_regexp and mun_list are both specified, then concatenate.
If neither are specified, assume 'Nederland'.
JSON is a json-encoded dict with:
- 'label': short label string
- 'color': for plotting, optional.
- 'fmt': format for plotting, e.g. 'o--', optional.
- 'muns': list of municipality names
- subtitle: second title line (optional)
"""
df_events = DFS['events']
df_mun = DFS['mun']
fig, ax = plt.subplots(figsize=(12, 6))
fig.subplots_adjust(top=0.945-0.03*(subtitle is not None),
bottom=0.1, left=0.09, right=0.83)
if region_list is None:
region_list = []
if mun_regexp:
region_list = [m for m in df_mun.index if re.match(mun_regexp, m)] + region_list
if region_list == []:
region_list = ['Nederland']
labels = [] # tuples (y, txt)f
citystats = [] # tuples (Rt, T2, cp100k, cwk, popk, city_name)
for region in region_list:
df1, n_inw = get_region_data(region, lastday=lastday)
df1 = df1.iloc[-ndays:]
fmt = 'o-' if ndays < 70 else '-'
psize = 5 if ndays < 30 else 3
dnc_column = dict(r7='Delta7r', raw='Delta', sg='DeltaSG')[source]
if region.startswith('JSON:'):
reg_dict = json.loads(region[5:])
reg_label = reg_dict['label']
if 'fmt' in reg_dict:
fmt = reg_dict['fmt']
color = reg_dict['color'] if 'color' in reg_dict else None
else:
reg_label = re.sub(r'POP:(.*)-(.*)', r'\1k-\2k inw.', region)
reg_label = re.sub(r'^[A-Z]+:', '', reg_label)
color = None
ax.semilogy(df1[dnc_column]*1e5, fmt, color=color, label=reg_label, markersize=psize)
delta_t = 7
i0 = dict(raw=-1, r7=-3, sg=-3)[source]
t_double, Rt = get_t2_Rt(df1[dnc_column], delta_t, i0=i0)
citystats.append((np.around(Rt, 2), np.around(t_double, 2),
np.around(df1['Delta'][-1]*1e5, 2),
int(df1['Delta7r'][-4] * n_inw * 7 + 0.5),
int(n_inw/1e3 + .5), reg_label))
if abs(t_double) > 60:
texp = f'Stabiel'
elif t_double > 0:
texp = f'×2: {t_double:.3g} d'
elif t_double < 0:
texp = f'×½: {-t_double:.2g} d'
ax.semilogy(
df1.index[[i0-delta_t, i0]], df1[dnc_column].iloc[[i0-delta_t, i0]]*1e5,
'k--', zorder=-10)
labels.append((df1[dnc_column][-1]*1e5, f' {reg_label} ({texp})'))
_add_event_labels(
ax, df1.index[0], df1.index[-1], with_ribbons=False,
flagmatch='CaseGraph'
)
dfc = pd.DataFrame.from_records(
sorted(citystats), columns=['Rt', 'T2', 'C/100k', 'C/wk', 'Pop/k', 'Region'])
dfc.set_index('Region', inplace=True)
print(dfc)
lab_x = df1.index[-1] + pd.Timedelta('1.2 d')
add_labels(ax, labels, lab_x)
if source == 'r7':
ax.axvline(df1.index[-4], color='gray')
# ax.text(df1.index[-4], 0.3, '3 dagen geleden - extrapolatie', rotation=90)
title = '7-daags voortschrijdend gemiddelde; laatste 3 dagen zijn een schatting'
elif source == 'sg':
ax.axvline(df1.index[-8], color='gray')
# ax.text(df1.index[-4], 0.3, '3 dagen geleden - extrapolatie', rotation=90)
title = 'Gefilterde data; laatste 7 dagen zijn minder nauwkeurig'
else:
title = 'Dagcijfers'
ax.set_ylabel('Nieuwe gevallen per 100k per dag')
#ax.set_ylim(0.05, None)
ax.set_xlim(None, df1.index[-1] + pd.Timedelta('1 d'))
from matplotlib.ticker import LogFormatter, FormatStrFormatter
ax.yaxis.set_major_formatter(FormatStrFormatter('%g'))
# Monkey-patch to prevent '%e' formatting.
LogFormatter._num_to_string = lambda _0, x, _1, _2: ('%g' % x)
ax.yaxis.set_minor_formatter(LogFormatter(minor_thresholds=(3, 1)))
#plt.xticks(pd.to_dateTime(['2020-0{i}-01' for i in range(1, 9)]))
ax.legend() # loc='lower left')
tools.set_xaxis_dateformat(ax, yminor=True)
if subtitle:
title += f'\n{subtitle}'
win_xtitle = f', {subtitle}'
else:
win_xtitle = ''
ax.set_title(title)
fig.canvas.set_window_title(f'Case trends (ndays={ndays}){win_xtitle}')
fig.show()
def plot_cumulative_trends(ndays=100, regions=None,
source='r7'):
"""Plot cumulative trends per capita (pull data from global DFS dict).
- lastday: up to this day.
- source: 'r7' (7-day rolling average), 'raw' (no smoothing), 'sg'
(Savitsky-Golay smoothed).
- region_list: list of municipalities (including e.g. 'HR:Zuid',
'POP:100-200').
"""
fig, ax = plt.subplots(figsize=(12, 6))
# fig.subplots_adjust(top=0.945, bottom=0.085, left=0.09, right=0.83)
for region in regions:
df, npop = nl_regions.select_cases_region(DFS['cases'], region)
df = df.iloc[-ndays:]
ax.semilogy(df['Total_reported'] * (1e5/npop), label=region)
ax.set_ylabel('Cumulatieve Covid-19 gevallen per 100k')
tools.set_xaxis_dateformat(ax)
ax.legend()
fig.show()
def plot_anomalies_deltas(ndays=120):
"""Show effect of anomaly correction."""
df, _npop = get_region_data('Nederland', correct_anomalies=True)
fig, ax = plt.subplots(tight_layout=True, figsize=(8, 5))
col_labs = [('Delta_orig', 'Raw'), ('Delta', 'Anomalies corrected')]
for col, lab in col_labs:
ax.semilogy(df.iloc[-ndays:][col], label=lab)
ax.legend()
tools.set_xaxis_dateformat(ax, maxticks=7)
title = 'Anomaly correction'
ax.set_title(title)
fig.canvas.set_window_title(title)
fig.show()
def _add_mobility_data_to_R_plot(ax):
try:
df = get_g_mobility_data()
except Exception as e:
print(f'No Google Mobility data: {e.__class__.__name__}: {e}')
return
ymin, ymax = ax.get_ylim()
y0 = ymin + (ymax - ymin)*0.85
scale = (ymax - ymin)*0.1
cols = ['retail_recr', 'transit', 'work', 'resid']
# from rcParams['axes.prop_cycle']Ik werk met he
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] * 5
scale /= df[cols].values.max()
ts = df.index
ax.axhline(y0, linestyle='--', color='gray')
for c, clr in zip(cols, colors):
hs = df[c].values
# ax.fill_between(ts, y0-scale*hs, y0+scale*hs, color=clr, alpha=0.3, label=c)
ax.plot(ts, y0+scale*hs, color=clr, label=c)
def _coord_format_Rplot(axR, axD, Tgen):
"""Setup cursor coordinate formatting for R graph, from ax and twinx ax.
axR: R/date axis; axD: doubling time/date axis
Tgen: generation time"""
def format_coord(x, y):
#display_coord = axR.transData.transform((x,y))
#inv =
# convert back to data coords with respect to ax
# ax_coord = inv.transform(display_coord) # x2, y2
# In this case, (x1, x2) == (x2, y2), but the y2 labels
# are custom made. Otherwise:
# x2, y2 = axD.transData.inverted().transform(axR.transData.transform((x,y)))
t, R = x, y
from matplotlib.dates import num2date
tm_str = num2date(t).strftime('%Y-%m-%d %H:%M')
T2 = np.log(2)/np.log(R) * Tgen
return f'{tm_str}: R={R:.3f}, T2={T2:.3g} d'
axD.format_coord = format_coord
def plot_Rt(ndays=100, lastday=-1, delay=9, regions='Nederland', source='r7',
Tc=4.0, correct_anomalies=True, g_mobility=False, mode='show',
ylim=None, only_trendlines=False):
"""Plot R number based on growth/shrink in daily cases.
- lastday: use case data up to this day.
- delay: assume delay days from infection to positive report.
alternatively: list of (timestamp, delay) tuples if the delay varies over time.
The timestamps refer to the date of report. See doc of estimeate_Rt_series.
- source: 'r7' or 'sg' for rolling 7-day average or Savitsky-Golay-
filtered data.
- Tc: generation interval timepd.to_datetime(matplotlib.dates.num2date(ax.get_xlim()))
- regions: comma-separated string (or list of str);
'Nederland', 'V:xx' (holiday region), 'P:xx' (province), 'M:xx'
(municipality).
set to 'DUMMY' or '' to plot only RIVM curve.
- correct_anomalies: whether to correct for known reporting anomalies.
- g_mobility: include Google mobility data (experimental, not very usable yet).
- mode: 'show' or 'return_fig'
- ylim: optional y axis range (ymin, ymax)
- only_trendlines: no scatter points, only trend lines.
"""
Rt_rivm = DFS['Rt_rivm']
fig, ax = plt.subplots(figsize=(10, 5))
fig.subplots_adjust(top=0.90, bottom=0.12, left=0.09, right=0.92)
plt.xticks(rotation=-20)
if ylim:
ax.set_ylim(*ylim)
# dict: municitpality -> population
# from rcParams['axes.prop_cycle']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',
'#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] * 5
markers = 'o^v<>s+x123' * 5
labels = [] # tuples (y, txt)
if isinstance(regions, str):
regions = regions.split(',')
if len(regions) == 0:
regions = ['DUMMY']
for i_region, (region, color, marker) in enumerate(zip(regions, colors, markers)):
df1, _npop = get_region_data(
'Nederland' if region=='DUMMY' else region,
lastday=lastday, correct_anomalies=correct_anomalies
)
source_col = dict(r7='Delta7r', sg='DeltaSG')[source]
# skip the first 10 days because of zeros
Rdf = estimate_Rt_df(df1[source_col].iloc[10:], delay=delay, Tc=Tc)
Rt = Rdf['Rt'].iloc[-ndays:]
delays = Rdf['delay'].iloc[-ndays:]
delay_min, delay_max = delays.min(), delays.max()
if delay_min == delay_max:
delay_str = f'{delay_min:.2g}'
else:
delay_str = f'{delay_min:.2g}-{delay_max:.2g}'
fmt = 'o'
psize = 5 if ndays < 30 else 3
if region.startswith('POP:'):
label = region[4:] + ' k inw.'
elif region == 'Nederland':
label = 'R schatting Nederland'
else:
label = re.sub('^[A-Z]+:', '', region)
if not only_trendlines and region != 'DUMMY':
ax.plot(Rt[:-3], fmt, label=label, marker=marker, markersize=psize, color=color)
ax.plot(Rt[-3:], fmt, markersize=psize, color=color, marker=marker, alpha=0.35)
# add confidence range (ballpark estimate)
print(region)
# Last 3 days are extrapolation, but peek at one extra day for the
# smooth curve generation.
# SG filter (13, 2): n=13 (2 weeks) will iron out all weekday effects
# remaining despite starting from a 7-day average.
Rt_smooth = scipy.signal.savgol_filter(Rt.iloc[:-2], 13, 2)[:-1]
Rt_smooth = pd.Series(Rt_smooth, index=Rt.index[:-3])
print(f'Smooth R: {Rt_smooth.iloc[-1]:.3g} @ {Rt_smooth.index[-1]}')
if region == 'Nederland':
# Error: hardcoded estimate 0.05. Because of SG filter, last 6 days
# are increasingly less accurate.
Rt_err = np.full(len(Rt_smooth), 0.05)
Rt_err[-6:] *= np.linspace(1, 1.4, 6)
ax.fill_between(Rt_smooth.index,
Rt_smooth.values-Rt_err, Rt_smooth.values+Rt_err,
color=color, alpha=0.15, zorder=-10
)
# This is for posting on Twitter
Rt_smooth_latest = Rt_smooth.iloc[-1]
Rt_point_latest = Rt.iloc[-4]
date_latest = Rt.index[-4].strftime('%d %b')
slope = (Rt_smooth.iloc[-1] - Rt_smooth.iloc[-4])/3
if abs(Rt_smooth_latest - Rt_point_latest) < 0.015:
txt = f'R={(Rt_smooth_latest+Rt_point_latest)/2:.2f}'
else:
txt = (f'R={Rt_point_latest:.2f} (datapunt), '
f'R={Rt_smooth_latest:.2f} (voorlopige trendlijn)')
print(f'Update reproductiegetal Nederland t/m {date_latest}: {txt}.'
' #COVID19NL\n'
f'Trend: {"+" if slope>=0 else "−"}{abs(slope):.3f} per dag.')
label = None
if region == 'Nederland':
label = 'R trend Nederland'
elif only_trendlines:
label = re.sub('^.*:', '', region)
if region != 'DUMMY':
smooth_line = ax.plot(Rt_smooth[:-5], color=color, alpha=1, zorder=0,
linestyle=('-' if i_region < 10 else '-.'),
label=label
)
ax.plot(Rt_smooth[-6:], color=color, alpha=1, zorder=0,
linestyle='--', dashes=(2,2))
labels.append((Rt[-1], f' {label}'))
if len(labels) == 0:
print('Note: no regions to plot.')
if Rt_rivm is not None:
tm_lo, tm_hi = Rt.index[[0, -1]] # lowest timestamp
tm_rivm_est = Rt_rivm[Rt_rivm['R'].isna()].index[0] # 1st index with NaN
# final values
df_Rt_rivm_final = Rt_rivm.loc[tm_lo:tm_rivm_est, ['R', 'Rt_update']]
ax.plot(df_Rt_rivm_final.iloc[:-1]['R'], 'k-', label='RIVM')
ax.plot(df_Rt_rivm_final.iloc[:-1]['Rt_update'], 'k^', markersize=4,
label='RIVM updates', zorder=10)
# estimates
Rt_rivm_est = Rt_rivm.loc[tm_rivm_est-pd.Timedelta(1, 'd'):Rt.index[-1]]
# print(Rt_rivm_est)
ax.fill_between(Rt_rivm_est.index, Rt_rivm_est['Rmin'], Rt_rivm_est['Rmax'],
color='k', alpha=0.15, label='RIVM prognose')
iex = dict(r7=3, sg=7)[source] # days of extrapolation
# add_labels(ax, labels, lab_x)
# marker at 12:00 on final day (index may be a few hours off)
t_mark = Rt.index[-iex-1]
t_mark +=
|
pd.Timedelta(12-t_mark.hour, 'h')
|
pandas.Timedelta
|
import os
import pandas as pd
import requests
import json
import re
import warnings
import geocoder
import getpass
google_key = os.getenv("POETRY_GOOGLE_KEY")
yelp_key = os.getenv("POETRY_YELP_KEY")
pd.options.display.max_colwidth = 300
if google_key is None:
google_key = getpass.getpass("Please input your Google API key.\n")
if yelp_key is None:
yelp_key = getpass.getpass("Please input your Yelp Fusion API key.\n")
def ParsingAddress(raw_location_list):
"""
A supporting function that parses the raw location info from Yelp Fusion API to make it more readable.
Parameters
----------
raw_location_list : pandas.core.series.Series
Required. A pd.Series of dictionaries containing address information in the JSON output from Fusion API.
Returns
-------
pandas.core.series.Series
A list that stores more readable result. A typical element from the output list is a string of format: "<street address>, <City>, <State> <ZIP code>". E.g. "509 Amsterdam Ave, New York, NY 10024".
"""
location_list = []
for raw_location in raw_location_list:
temp = [v for k,v in raw_location.items()]
temp_location = ', '.join(temp[len(temp)-1])
location_list = location_list + [temp_location]
return(location_list)
def SearchRestaurant(yelp_key = yelp_key,
searching_keywords = "restaurant",
location = "Union Square, New York, NY 10003",
longitude = None,
latitude = None,
distance_max = 15000,
list_len = 40,
price = "1,2,3,4"):
"""
Perform restaurant searching on Yelp.
Parameters
----------
yelp_key : str
Optional. The API key for Yelp fusion API.
searching_keywords : str
Optional. The keywords for Yelp searching. If not specified, the general term "restaurant" is searched.
location : str
Optional. A string describe the address of the location around which the search is conducted.
longitude : float
Required if location is not specified. The longitude of the current location.
latitude : float
Required if location is not specified. The latitude of the current location.
distance_max : int
Optional. A suggested search radius in meters.
list_len : int
Optional. The number of restaurants to show in the resulting dataframe.
price : str
Optional. Pricing levels to filter the search result with: 1 = $, 2 = $$, 3 = $$$, 4 = $$$$.
The price filter can be a list of comma delimited pricing levels. For example, "1, 2, 3" will
filter the results to show the ones that are $, $$, or $$$.
Returns
-------
pandas.core.frame.DataFrame
A dataframe that include essential information about the restaurants in the resarching result.
Examples
--------
>>> from yelpgoogletool import yelpgoogletool
>>> yelpgoogletool.SearchRestaurant(location = "Columbia University, NYC",list_len=2)
name id distance location price phone rating review_count
0 The Tang - Upper West Side TzhAlljC_843JO7UDDUIaQ 0.6 920 Amsterdam Ave, New York, NY 10025 $$ +16465967970 4.5 215
1 <NAME> H9GD7km7riFooM0FkdwOPg 0.5 2756 Broadway, New York, NY 10025 $$ +12128735025 4.0 2527
"""
# Check whether the parameters are of valid type
longlat_input_checker = (longitude == None) + (latitude == None)
assert type(searching_keywords) == str, "The parameter 'searching_keywords' should be a string!"
assert type(location) == str, "The parameter 'location' should be a string!"
assert (type(longitude) == type(None) or type(longitude) == float), "The parameter 'longitude' should be a float!"
assert (type(latitude) == type(None) or type(latitude) == float), "The parameter 'latitude' should be a float!"
assert type(distance_max) == int, "The parameter 'distance_max' should be an integer!"
assert type(list_len) == int, "The parameter 'list_len' should be an integer!"
assert (type(price) == type(None) or type(price) == str), "The parameter 'price' should be a str representing price levels, e.g. '1,2,3'!"
# Check whether longitude and latitude are speciefied or not specified at the same time
assert longlat_input_checker != 1, "Either both or neither of 'longitude' and 'latitude' should be specified!"
# Check whether some parameters are off limit
assert distance_max <= 20000, "You do not want to travel more than 20 km for your dinner!"
assert list_len <= 500, "The length of searching result list should be no more than 500!"
# Set the parameters for API queries
url = "https://api.yelp.com/v3/businesses/search"
headers = {"Authorization":yelp_key}
querystring = {"term":searching_keywords}
if longlat_input_checker == 0:
assert (longitude >= -180) & (latitude >= -180) & (longitude <= 180) & (latitude <= 180), "Invalid 'longitude' or 'latitude'"
if location != "Union Square, New York, NY 10003":
warnings.warn("The parameter 'location' is not used when longitude and latitude are specified.")
querystring["longitude"] = longitude
querystring["latitude"] = latitude
else:
querystring["location"] = location
if type(price) == str:
querystring["price"] = price
# Set offset to be the number of records that has already been searched
offset = 0
df_restaurant_list = pd.DataFrame()
while offset < list_len:
# This is the number of records to search in this batch
limit = min(list_len - offset, 50)
querystring["limit"] = limit
querystring["offset"] = offset
# request data from Fusion API
response = requests.request("GET", url, headers = headers, params = querystring)
rspn_json = response.json()
#if rspn_json
# merge the data into df_restaurant_list
for business in rspn_json['businesses']:
df_restaurant_list = df_restaurant_list.append(pd.Series(business),ignore_index=True)
# Update the offset variable
offset += limit
df_restaurant_list = df_restaurant_list[['name',
'id',
'distance',
'location',
'price',
'phone',
'rating',
'review_count']].assign(
location = lambda x: ParsingAddress(x.location),
review_count = df_restaurant_list['review_count'].astype(int),
distance = round(df_restaurant_list['distance']/1609,1))
return(df_restaurant_list)
def ExactRestaurantID(restaurant_name, location, yelp_key=yelp_key):
"""
Search the unique id of a restaurant by its name and location.
Parameters
----------
restaurant_name : str
Required. The name of the restaurant. Do not need to be exact.
location : str
Optional. A string describe the address of the location around which the search is conducted.
yelp_key : str
Optional. The API key for Yelp fusion API.
Returns
-------
str
A string that serves as the identifier of the restaurant of interest.
Example
-------
>>> from yelpgoogletool import yelpgoogletool
>>> yelpgoogletool.ExactRestaurantID("<NAME>","NYC")
name location
0 <NAME> 178 Broadway, Brooklyn, NY 11211
1 <NAME> Steak House 255 Northern Blvd, Great Neck, NY 11021
2 <NAME>'s Double Eagle Steakhouse 1221 Ave Of The Americas, New York, NY 10020
3 DeStefano's Steakhouse 89 Conselyea St, Brooklyn, NY 11211
4 Wolfgang's Steakhouse 4 Park Ave, New York, NY 10016
Is the desired restaurant in the list above? (Y/N)
Y
Please input the index at the beginning of the row corresponding to the desired restaurant.
0
Restaurant found!
'4yPqqJDJOQX69gC66YUDkA'
"""
# Set the parameters for API queries
url = "https://api.yelp.com/v3/businesses/search"
headers = {"Authorization":yelp_key}
found = "N"
restaurants_searched = 0
while found != "Y":
querystring = {"term":restaurant_name, "limit":5, "location":location,"offset":restaurants_searched}
response = requests.request("GET", url, headers = headers, params = querystring)
restaurants_searched += 5
rspn_json = response.json()
#if rspn_json
# merge the data into df_restaurant_list
df_restaurant_list =
|
pd.DataFrame()
|
pandas.DataFrame
|
__author__ = "<NAME>"
import numpy
import pandas
import logging
import traceback
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
coloc_r = None
K_COLUMNS = None
from ... import Utilities
from ...data_management import DataFrameStreamer
#Awful hack to avoid having to learn the python import process
def initialize(script_path=None):
global coloc_r
global K_COLUMNS
if coloc_r:
raise RuntimeError("Coloc re-initialization not allowed.")
if not script_path:
from rpy2.robjects.packages import importr
coloc_r = importr('coloc').coloc_abf
K_COLUMNS = ["gene_id", "p0", "p1", "p2", "p3", "p4"]
else:
r = robjects.r
r['source'](script_path)
coloc_r = r['coloc.abf']
K_COLUMNS = ["gene_id", "p0", "p1", "p2", "p3", "p4", "cp0", "cp1", "cp2", "cp3", "cp4"]
########################################################################################################################
def _sanitize(d):
if "pvalue" in d:
d = d.loc[d.pvalue>0]
if "frequency" in d:
d = d.loc[d.frequency>0]
if "maf" in d:
d = d.loc[d.maf>0]
return d
########################################################################################################################
def _read(gwas, cols, gwas_sample_size):
if gwas_sample_size == "FROM_GWAS":
cols += ["sample_size"]
d = pandas.read_table(gwas, usecols=cols)
logging.log(9, "sanitizing gwas")
#d = _sanitize(d)
gwas_sample_size = None if gwas_sample_size == "FROM_GWAS" else int(gwas_sample_size)
return d, gwas_sample_size
def _read_gwas_pvalue(gwas, gwas_sample_size):
d, gwas_sample_size = _read(gwas, ["panel_variant_id", "pvalue", "frequency"], gwas_sample_size)
return {x.panel_variant_id: (x.pvalue, x.frequency, gwas_sample_size if gwas_sample_size else x.sample_size) for x in d.itertuples() if (x.pvalue>0 and x.frequency>0)}
def _read_gwas_bse(gwas, gwas_sample_size):
d, gwas_sample_size = _read(gwas, ["panel_variant_id", "effect_size", "standard_error", "frequency"], gwas_sample_size)
return {x.panel_variant_id: (x.effect_size, x.standard_error, x.frequency, gwas_sample_size if gwas_sample_size is not None else x.sample_size)for x in d.itertuples() if x.frequency>0}
def _read_gwas_zscore_1(gwas, gwas_sample_size):
d, gwas_sample_size = _read(gwas, ["panel_variant_id", "zscore", "frequency"], gwas_sample_size)
return { x.panel_variant_id: (x.zscore, 1.0, x.frequency, gwas_sample_size if gwas_sample_size is not None else x.sample_size) for x in d.itertuples() if x.frequency>0}
def read_gwas(gwas, gwas_sample_size, gwas_mode="pvalue"):
methods = {"pvalue": _read_gwas_pvalue, "bse": _read_gwas_bse, "zscore_1":_read_gwas_zscore_1}
method = methods.get(gwas_mode)
if not method: raise RuntimeError("unsupported gwas mode")
return method(gwas, gwas_sample_size)
########################################################################################################################
def eqtl_streamer(eqtl_path, keys):
columns = ["maf", "pval_nominal", "slope", "slope_se"]
#_skip = lambda x: x not in keys
#return DataFrameStreamer.data_frame_streamer(eqtl_path, sanitize=True, to_numeric=columns, sentinel_column="gene_id", additional_skip_row_check=_skip)
#
return DataFrameStreamer.data_frame_streamer(eqtl_path, sanitize=True, to_numeric=columns, sentinel_column="gene_id")
def get_eqtl(d, eqtl_sample_size, eqtl_mode="bse"):
logging.log(9, "sanitizing eqtl")
####################################################################################################################
# This nice code can't be because of an obscure coloc bug, concerning MAF>0 check.
# Coloc thinks itself smart and able to handle MAF==0 but it fails miserably.
# if eqtl_mode == "pvalue": l = lambda x: (x.pval_nominal, x.maf, eqtl_sample_size)
# elif eqtl_mode == "bse": l = lambda x: (x.slope, x.slope_se, x.maf, eqtl_sample_size)
# elif eqtl_mode == "zscore_1": l = lambda x: (x.slope/x.slope_se, 1.0, x.maf, eqtl_sample_size)
# else: raise RuntimeError("unsupported eqtl mode")
#return {x.variant_id: l(x) for x in d.itertuples()}
####################################################################################################################
# you suck bigtime, coloc. May a ciphrang feast on you on the outside.
if eqtl_mode == "pvalue":
r = {x.variant_id:(x.pval_nominal, x.maf, eqtl_sample_size) for x in d.itertuples() if (x.pval_nominal>0 and x.maf>0)}
elif eqtl_mode == "bse":
r = {x.variant_id:(x.slope, x.slope_se, x.maf, eqtl_sample_size) for x in d.itertuples() if x.maf>0}
elif eqtl_mode == "zscore_1":
r = {x.variant_id: (x.slope/x.slope_se, 1.0, x.maf, eqtl_sample_size) for x in d.itertuples() if x.maf>0}
else:
raise RuntimeError("unsupported eqtl mode")
return r
########################################################################################################################
def coloc_on_gwas_eqtl(gene, gwas, eqtl, gwas_mode, eqtl_mode, p1=1e-4, p2=1e-4, p12=1e-5):
g = {k: gwas[k] for k in eqtl if k in gwas}
keys = sorted(g.keys())
_gwas = _to_coloc_data(g, gwas_mode, keys)
_eqtl = _to_coloc_data(eqtl, eqtl_mode, keys)
return _coloc(gene, _gwas, _eqtl, p1, p2, p12)
def _convert(d, mode, keys):
if mode == "pvalue":
#converted = pandas.DataFrame([d[k] for k in keys], columns=["pvalue", "frequency", "sample_size"])
converted = pandas.DataFrame([(k,)+d[k] for k in keys], columns=["id", "pvalue", "frequency", "sample_size"])
elif mode == "bse" or mode == "zscore_1":
# zscore_1 has a different meaning but all processing difference is upstream
#converted = pandas.DataFrame([d[k] for k in keys], columns=["beta", "se", "frequency", "sample_size"])
converted =
|
pandas.DataFrame([(k,)+d[k] for k in keys], columns=["id", "beta", "se", "frequency", "sample_size"])
|
pandas.DataFrame
|
import argparse
from deepforest import get_data
from deepforest import preprocess
import torch
from torchvision.ops import nms
import cv2 as cv
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils.plotting import plot_bboxes
from models.predict_model import get_model
def predict_large_image(
numpy_image, model, patch_size=400, patch_overlap=0.05, iou_threshold=0.1, should_display=False
):
# crop original image
windows = preprocess.compute_windows(
numpy_image=numpy_image, patch_size=patch_size, patch_overlap=patch_overlap
)
predictions_list = []
for window in tqdm(windows, desc=f"Iterating sliding patches", disable=(not should_display)):
crop = numpy_image[window.indices()]
pred_df = model.predict_image(crop.astype(np.float32))
# adjust crop offset relative to the original image
pred_df["xmin"] = window.x + pred_df["xmin"]
pred_df["ymin"] = window.y + pred_df["ymin"]
pred_df["xmax"] = (window.x + window.w) - (patch_size - pred_df["xmax"])
pred_df["ymax"] = (window.y + window.h) - (patch_size - pred_df["ymax"])
predictions_list.append(pred_df)
final_preds_df =
|
pd.concat(predictions_list)
|
pandas.concat
|
import base64
import hashlib
import hmac
import time
import json
import requests
import gdax
import pandas as pd
from lib.utils import *
class GdaxArmy():
"""
An API object that is built on top of the unofficial Gdax python api:
https://github.com/danpaquin/gdax-python
"""
def __init__(self):
self.client = gdax.PublicClient()
self.auth_client = None
def authenticate(self, api_key, secret_key, passphrase,
interest_currency, is_sandbox_url=True):
"""
Authenticate the account.
"""
if is_sandbox_url:
api_url = 'https://api-public.sandbox.gdax.com'
else:
api_url = 'https://api.gdax.com'
self.auth_client = gdax.AuthenticatedClient(api_key,
secret_key,
passphrase,
api_url=api_url
)
# Get the acounts info
accts = self.auth_client.get_accounts()
self.auth_client.accts_dict = {}
for acct in accts:
if acct['currency'] in interest_currency:
self.auth_client.accts_dict[acct['currency']] = acct
def get_accts_dict(self):
"""
Return the acct in Gdax info.
"""
return self.auth_client.accts_dict
def buy(self, price=1, size=0.01, product_id='LTC-USD',
post_only=True, **kwargs):
"""
Perform a buy order. For example buy 0.01 LTC @100 USD
"""
order = self.auth_client.buy(price=price, size=size,
product_id=product_id,
post_only=post_only, **kwargs)
return order
def sell(self, price=10000, size=0.01, product_id='LTC-USD',
post_only=True, **kwargs):
"""
Perform a sell order. For example sell 0.01 LTC @ 100USD
"""
order = self.auth_client.sell(price=price, size=size,
product_id=product_id,
post_only=post_only, **kwargs)
return order
def get_fills(self, **kwargs):
"""
Get fills info for a particular order id or product id.
"""
return self.auth_client.get_fills(**kwargs)
def get_order(self, id):
"""
Get the order info for a particular order.
"""
return self.auth_client.get_order(id)
def get_orders(self):
"""
Get the list of orders.
"""
return self.auth_client.get_orders()
def cancel_order(self, id):
"""
Cancel an order.
"""
return self.auth_client.cancel_order(id)
def cancel_all_orders(self,id, product='LTC-USD'):
"""
Cancel all the orders of a product
"""
return self.auth_client.cancel_all(product)
def get_trade_trends(self, currency='LTC-USD', granularity=3600,
num_buckets=200
):
"""
Getting the trade trends in a period of time. A period contains num_buckets of buckets. For example, a period can be 24 hours (defined by granularity); this also indicates there will be num_buckets=24 and each bucket has a time interval of an hour. Each bucket contains several information:
low: lowest price during the bucket interval
high: highest price during the bucket interval
open: first trade in the bucket interval
close: last trade in the bucket interval
volume: volume of trading activity during the bucket interval
:params currency: currency that we are interested in
:params granularity: unit is second. Amt of time in a bucket
:params num_buckets: number of buckets
"""
trades = self.client.get_product_historic_rates(currency,
granularity=granularity)
trades = trades[:num_buckets]
trades = trades[::-1] # closest time goes last
df =
|
pd.DataFrame(data=trades)
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A module for meta-learner model selection.
This module contains:
- :class:`MetaLearnModelSelect` for meta-learner models selection, which recommends the forecasting model based on time series or time series features;
- :class:`RandomDownSampler` for creating balanced dataset via downsampling.
"""
import ast
import logging
from collections import Counter, defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from kats.consts import TimeSeriesData
from kats.tsfeatures.tsfeatures import TsFeatures
from sklearn import metrics
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
class MetaLearnModelSelect:
"""Meta-learner framework on forecasting model selection.
This framework uses classification algorithms to recommend suitable forecasting models.
For training, it uses time series features as inputs and the best forecasting models as labels.
For prediction, it takes time series or time series features as inputs to predict the most suitable forecasting model.
The class provides count_category, preprocess, plot_feature_comparison, get_corr_mtx, plot_corr_heatmap, train, pred, pred_by_feature, pred_fuzzy, load_model and save_model.
Attributes:
metadata: Optional; A list of dictionaries representing the meta-data of time series (e.g., the meta-data generated by GetMetaData object).
Each dictionary d must contain at least 3 components: 'hpt_res', 'features' and 'best_model'. d['hpt_res'] represents the best hyper-parameters for each candidate model and the corresponding errors;
d['features'] are time series features, and d['best_model'] is a string representing the best candidate model of the corresponding time series data.
metadata should not be None unless load_model is True. Default is None.
load_model: Optional; A boolean to specify whether or not to load a trained model. Default is False.
Sample Usage:
>>> mlms = MetaLearnModelSelect(data)
>>> mlms.train(n_trees=200, test_size=0.1, eval_method='mean') # Train a meta-learner model selection model.
>>> mlms.pred(TSdata) # Predict/recommend forecasting model for a new time series data.
>>> mlms2.pred(TSdata, n_top=3) # Predict/recommend the top 3 most suitable forecasting model.
>>> mlms.save_model("mlms.pkl") # Save the trained model.
>>> mlms2 = MetaLearnModelSelect(metadata=None, load_model=True) # Create a new object and then load a pre-trained model.
>>> mlms2.load_model("mlms.pkl")
"""
def __init__(
self, metadata: Optional[List[Dict[str, Any]]] = None, load_model: bool = False
) -> None:
if not load_model:
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[List[typing.Any]]`.
if len(metadata) <= 30:
msg = "Dataset is too small to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if metadata is None:
msg = "Missing metadata!"
logging.error(msg)
raise ValueError(msg)
if "hpt_res" not in metadata[0]:
msg = "Missing best hyper-params, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if "features" not in metadata[0]:
msg = "Missing time series features, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
if "best_model" not in metadata[0]:
msg = "Missing best models, not able to train a meta learner!"
logging.error(msg)
raise ValueError(msg)
self.metadata = metadata
self._reorganize_data()
self._validate_data()
self.scale = False
self.clf = None
elif load_model:
pass
else:
msg = "Fail to initiate MetaLearnModelSelect."
raise ValueError(msg)
def _reorganize_data(self) -> None:
hpt_list = []
metadataX_list = []
metadataY_list = []
for i in range(len(self.metadata)):
if isinstance(self.metadata[i]["hpt_res"], str):
hpt_list.append(ast.literal_eval(self.metadata[i]["hpt_res"]))
else:
hpt_list.append(self.metadata[i]["hpt_res"])
if isinstance(self.metadata[i]["features"], str):
metadataX_list.append(
list(ast.literal_eval(self.metadata[i]["features"]).values())
)
else:
metadataX_list.append(list(self.metadata[i]["features"].values()))
metadataY_list.append(self.metadata[i]["best_model"])
self.col_namesX = list(self.metadata[0]["features"].keys())
self.hpt = pd.Series(hpt_list, name="hpt")
self.metadataX = pd.DataFrame(metadataX_list, columns=self.col_namesX)
self.metadataX.fillna(0, inplace=True)
self.metadataY = pd.Series(metadataY_list, name="y")
self.x_mean = np.average(self.metadataX.values, axis=0)
self.x_std = np.std(self.metadataX.values, axis=0)
self.x_std[self.x_std == 0] = 1.0
def _validate_data(self):
num_class = self.metadataY.nunique()
if num_class == 1:
msg = "Only one class in the label column (best_model), not able to train a classifier!"
logging.error(msg)
raise ValueError(msg)
local_count = list(self.count_category().values())
if min(local_count) * num_class < 30:
msg = "Not recommend to do downsampling! Dataset will be too small after downsampling!"
logging.info(msg)
elif max(local_count) > min(local_count) * 5:
msg = "Number of obs in majority class is much greater than in minority class. Downsampling is recommended!"
logging.info(msg)
else:
msg = "No significant data imbalance problem, no need to do downsampling."
logging.info(msg)
def count_category(self) -> Dict[str, int]:
"""Count the number of observations of each candidate model in meta-data.
Returns:
A dictionary storing the number of observations of each candidate model in meta-data.
"""
return Counter(self.metadataY)
def preprocess(self, downsample: bool = True, scale: bool = False) -> None:
"""Pre-process meta data before training a classifier.
There are 2 options in this function: 1) whether or not to downsample meta-data to ensure each candidate model has the same number of observations;
and 2) whether or not to rescale the time series features to zero-mean and unit-variance.
Args:
downsample: Optional; A boolean to specify whether or not to downsample meta-data to ensure each candidate model has the same number of observations.
Default is True.
scale: Optional; A boolean to specify whether or not to rescale the time series features to zero-mean and unit-variance.
Returns:
None
"""
if downsample:
self.hpt, self.metadataX, self.metadataY = RandomDownSampler(
self.hpt, self.metadataX, self.metadataY
).fit_resample()
logging.info("Successfully applied random downsampling!")
self.x_mean = np.average(self.metadataX.values, axis=0)
self.x_std = np.std(self.metadataX.values, axis=0)
self.x_std[self.x_std == 0] = 1.0
if scale:
self.scale = True
self.metadataX = (self.metadataX - self.x_mean) / self.x_std
logging.info(
"Successfully scaled data by centering to the mean and component-wise scaling to unit variance!"
)
def plot_feature_comparison(self, i: int, j: int) -> None:
"""Generate the time series features comparison plot.
Args:
i: A integer representing the index of one feature vector from feature matrix to be compared.
j: A integer representing the other index of one feature vector from feature matrix to be compared.
Returns:
None
"""
combined = pd.concat([self.metadataX.iloc[i], self.metadataX.iloc[j]], axis=1)
combined.columns = [
str(self.metadataY.iloc[i]) + " model",
str(self.metadataY.iloc[j]) + " model",
]
# pyre-fixme[29]: `CachedAccessor` is not a function.
combined.plot(kind="bar", figsize=(12, 6))
def get_corr_mtx(self) -> pd.DataFrame:
"""Calculate correlation matrix of feature matrix.
Returns:
A pd.DataFrame representing the correlation matrix of time series features.
"""
return self.metadataX.corr()
def plot_corr_heatmap(self, camp: str = "RdBu_r") -> None:
"""Generate heat-map for correlation matrix of feature matrix.
Args:
camp: Optional; A string representing the olor bar used to generate heat-map. Default is "RdBu_r".
Returns:
None
"""
fig, _ = plt.subplots(figsize=(8, 6))
_ = sns.heatmap(
self.get_corr_mtx(),
cmap=camp,
yticklabels=self.metadataX.columns,
xticklabels=self.metadataX.columns,
)
def train(
self,
method: str = "RandomForest",
eval_method: str = "mean",
test_size: float = 0.1,
n_trees: int = 500,
n_neighbors: int = 5,
) -> Dict[str, Any]:
"""Train a meta-learner model selection model (i.e., a classifier).
Args:
method: Optional; A string representing the name of the classification algorithm. Can be 'RandomForest', 'GBDT', 'SVM', 'KNN' or 'NaiveBayes'. Default is 'RandomForest'.
eval_method: Optional; A string representing the aggregation method used for computing errors. Can be 'mean' or 'median'. Default is 'mean'.
test_size: Optional; A float representing the proportion of test set, which should be within (0, 1). Default is 0.1.
n_trees: Optional; An integer representing the number of trees in random forest model. Default is 500.
n_neighbors: Optional; An integer representing the number of neighbors in KNN model. Default is 5.
Returns:
A dictionary summarizing the performance of the trained classifier on both training and validation set.
"""
if method not in ["RandomForest", "GBDT", "SVM", "KNN", "NaiveBayes"]:
msg = "Only support RandomForest, GBDT, SVM, KNN, and NaiveBayes method."
logging.error(msg)
raise ValueError(msg)
if eval_method not in ["mean", "median"]:
msg = "Only support mean and median as evaluation method."
logging.error(msg)
raise ValueError(msg)
if test_size <= 0 or test_size >= 1:
msg = "Illegal test set."
logging.error(msg)
raise ValueError(msg)
x_train, x_test, y_train, y_test, hpt_train, hpt_test = train_test_split(
self.metadataX, self.metadataY, self.hpt, test_size=test_size
)
if method == "RandomForest":
clf = RandomForestClassifier(n_estimators=n_trees)
elif method == "GBDT":
clf = GradientBoostingClassifier()
elif method == "SVM":
clf = make_pipeline(StandardScaler(), SVC(gamma="auto"))
elif method == "KNN":
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
else:
clf = GaussianNB()
clf.fit(x_train, y_train)
y_fit = clf.predict(x_train)
y_pred = clf.predict(x_test)
# calculate model errors
fit_error, pred_error = {}, {}
# evaluate method
em = np.mean if eval_method == "mean" else np.median
# meta learning errors
fit_error["meta-learn"] = em(
[hpt_train.iloc[i][c][-1] for i, c in enumerate(y_fit)]
)
pred_error["meta-learn"] = em(
[hpt_test.iloc[i][c][-1] for i, c in enumerate(y_pred)]
)
# pre-selected model errors, for all candidate models
for label in self.metadataY.unique():
fit_error[label] = em(
[hpt_train.iloc[i][label][-1] for i in range(len(hpt_train))]
)
pred_error[label] = em(
[hpt_test.iloc[i][label][-1] for i in range(len(hpt_test))]
)
self.clf = clf
return {
"fit_error": fit_error,
"pred_error": pred_error,
"clf_accuracy": metrics.accuracy_score(y_test, y_pred),
}
def save_model(self, file_name: str) -> None:
"""Save the trained model.
Args:
file_name: A string representing the path to save the trained model.
Returns:
None.
"""
if self.clf is None:
msg = "Haven't trained a model."
logging.error(msg)
raise ValueError(msg)
else:
joblib.dump(self.__dict__, file_name)
logging.info("Successfully saved the trained model!")
def load_model(self, file_name: str) -> None:
"""Load a pre-trained model.
Args:
file_name: A string representing the path to load the pre-trained model.
Returns:
None.
"""
try:
self.__dict__ = joblib.load(file_name)
logging.info("Successfully loaded a pre-trained model!")
except Exception:
msg = "No existing pre-trained model. Please change file path or train a model first!"
logging.error(msg)
raise ValueError(msg)
def pred(
self, source_ts: TimeSeriesData, ts_scale: bool = True, n_top: int = 1
) -> Union[str, List[str]]:
"""Predict the best forecasting model for a new time series data.
Args:
source_ts: :class:`kats.consts.TimeSeriesData` object representing the new time series data.
ts_scale: Optional; A boolean to specify whether or not to rescale time series data (i.e., normalizing it with its maximum vlaue) before calculating features. Default is True.
n_top: Optional; A integer for the number of top model names to return. Default is 1.
Returns:
A string or a list of strings of the names of forecasting models.
"""
ts = TimeSeriesData(pd.DataFrame(source_ts.to_dataframe().copy()))
if self.clf is None:
msg = "Haven't trained a model. Please train a model or load a model before predicting."
logging.error(msg)
raise ValueError(msg)
if ts_scale:
# scale time series to make ts features more stable
ts.value /= ts.value.max()
msg = "Successful scaled! Each value of TS has been divided by the max value of TS."
logging.info(msg)
new_features = TsFeatures().transform(ts)
# pyre-fixme[16]: `List` has no attribute `values`.
new_features_vector = np.asarray(list(new_features.values()))
if np.any(np.isnan(new_features_vector)):
msg = (
"Features of the test time series contains NaN value, consider processing it. Features are: "
f"{new_features}. Fill in NaNs with 0."
)
logging.warning(msg)
return self.pred_by_feature([new_features_vector], n_top=n_top)[0]
def pred_by_feature(
self,
source_x: Union[np.ndarray, List[np.ndarray], pd.DataFrame],
n_top: int = 1,
) -> np.ndarray:
"""Predict the best forecasting models given a list/dataframe of time series features
Args:
source_x: the time series features of the time series that one wants to predict, can be a np.ndarray, a list of np.ndarray or a pd.DataFrame.
n_top: Optional; An integer for the number of top model names to return. Default is 1.
Returns:
An array of strings representing the forecasing models. If n_top=1, a 1-d np.ndarray will be returned. Otherwise, a 2-d np.ndarray will be returned.
"""
if self.clf is None:
msg = "Haven't trained a model. Please train a model or load a model before predicting."
logging.error(msg)
raise ValueError(msg)
if isinstance(source_x, List):
x = np.row_stack(source_x)
elif isinstance(source_x, np.ndarray):
x = source_x.copy()
else:
msg = f"Invalid source_x type: {type(source_x)}."
logging.error(msg)
raise ValueError(msg)
if self.scale:
x = (x - self.x_mean) / self.x_std
x[np.isnan(x)] = 0.0
if n_top == 1:
return self.clf.predict(x)
prob = self.clf.predict_proba(x)
order = np.argsort(-prob, axis=1)
classes = np.array(self.clf.classes_)
return classes[order][:, :n_top]
def _bootstrap(self, data: np.ndarray, rep: int = 200) -> float:
"""Helper function for bootstrap test and returns the pvalue."""
diff = data[:, 0] - data[:, 1]
n = len(diff)
idx = np.random.choice(np.arange(n), n * rep)
sample = diff[idx].reshape(-1, n)
bs = np.average(sample, axis=1)
pvalue = np.average(bs < 0)
return pvalue
def pred_fuzzy(
self, source_ts: TimeSeriesData, ts_scale: bool = True, sig_level: float = 0.2
) -> Dict[str, Any]:
"""Predict a forecasting model for a new time series data using fuzzy method.
The fuzzy method returns the best candiate model and the second best model will be returned if there is no statistically significant difference between them.
The statistical test is based on the bootstrapping samples drawn from the fitted random forest model. This function is only available for random forest classifier.
Args:
source_ts: :class:`kats.consts.TimeSeriesData` object representing the new time series data.
ts_scale: Optional; A boolean to specify whether or not to rescale time series data (i.e., normalizing it with its maximum vlaue) before calculating features. Default is True.
sig_level: Optional; A float representing the significance level for bootstrap test. If pvalue>=sig_level, then we deem there is no difference between the best and the second best model.
Default is 0.2.
Returns:
A dictionary of prediction results, including forecasting models, their probability of being th best forecasting models and the pvalues of bootstrap tests.
"""
ts = TimeSeriesData(pd.DataFrame(source_ts.to_dataframe().copy()))
if ts_scale:
# scale time series to make ts features more stable
ts.value /= ts.value.max()
# pyre-fixme[16]: `List` has no attribute `values`.
test = np.asarray(list(TsFeatures().transform(ts).values()))
test[np.isnan(test)] = 0.0
if self.scale:
test = (test - self.x_mean) / self.x_std
test = test.reshape([1, -1])
m = len(self.clf.estimators_)
data = np.array(
[self.clf.estimators_[i].predict_proba(test)[0] for i in range(m)]
)
prob = self.clf.predict_proba(test)[0]
idx = np.argsort(-prob)[:2]
pvalue = self._bootstrap(data[:, idx[:2]])
if pvalue >= sig_level:
label = self.clf.classes_[idx[:2]]
prob = prob[idx[:2]]
else:
label = self.clf.classes_[idx[:1]]
prob = prob[idx[:1]]
ans = {"label": label, "probability": prob, "pvalue": pvalue}
return ans
def __str__(self):
return "MetaLearnModelSelect"
class RandomDownSampler:
"""An assistant class for class MetaLearnModelSelect to do random downsampling.
RandomDownSampler provides methods for creating a balanced dataset via downsampling. It contains fit_resample.
Attributes:
hpt: A `pandas.Series` object storing the best hyper-parameters and the corresponding errors for each model.
dataX: A `pandas.DataFrame` object representing the time series features matrix.
dataY: A `pandas.Series` object representing the best models for the corresponding time series.
"""
def __init__(self, hpt: pd.Series, dataX: pd.DataFrame, dataY: pd.Series) -> None:
self.hpt = hpt
self.dataX = dataX
self.dataY = dataY
self.col_namesX = self.dataX.columns
def fit_resample(self) -> Tuple[pd.Series, pd.DataFrame, pd.Series]:
"""Create balanced dataset via random downsampling.
Returns:
A tuple containing the `pandas.Series` object of the best hyper-parameters and the corresponding errors, the `pandas.DataFrame` object of the downsampled time series features,
and the `pandas.Series` object of the downsampled best models for the corresponding time series.
"""
resampled_x, resampled_y, resampled_hpt = [], [], []
# naive down-sampler technique for data imbalance problem
min_n = min(Counter(self.dataY).values())
idx_dict = defaultdict(list)
for i, c in enumerate(self.dataY):
idx_dict[c].append(i)
for key in idx_dict:
idx_dict[key] = np.random.choice(idx_dict[key], size=min_n, replace=False)
resampled_x += self.dataX.iloc[np.asarray(idx_dict[key]), :].values.tolist()
resampled_y += list(self.dataY.iloc[np.asarray(idx_dict[key])])
resampled_hpt += list(self.hpt.iloc[np.asarray(idx_dict[key])])
resampled_x =
|
pd.DataFrame(resampled_x)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
import pickle
epoch_time_model_path = "optimus_prime_epoch_time.pkl"
epoch_time_model = pickle.load(open(epoch_time_model_path, 'rb'))
accuracy_model_path = "optimus_prime_epoch_accuracy.pkl"
accuracy_model = pickle.load(open(accuracy_model_path, 'rb'))
data = {'num_of_paramters': 11689500, 'is_distributed': False, 'num_workers_data_loader': 16,
'num_of_gpus': 4, 'batch_size': 128, 'P40': 0, 'P100': 1, 'V100': 0}
test = {'num_of_paramters': '11689500', 'is_distributed': False, 'num_workers_data_loader': '4',
'num_of_gpus': '1', 'batch_size': '128', 'P40': 0, 'P100': 1, 'V100': 0}
def predict_epoch_time(data=data):
data =
|
pd.DataFrame([data])
|
pandas.DataFrame
|
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces_and_case():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|True
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(
df.a, df.b, rel_tol=0.2, ignore_spaces=True, ignore_case=True
)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_date_columns_equal():
data = """a|b|expected
2017-01-01|2017-01-01|True
2017-01-02|2017-01-02|True
2017-10-01|2017-10-10|False
2017-01-01||False
|2017-01-01|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
# First compare just the strings
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# Then compare converted to datetime objects
df["a"] = pd.to_datetime(df["a"])
df["b"] = pd.to_datetime(df["b"])
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
# and reverse
actual_out_rev = datacompy.columns_equal(df.b, df.a, rel_tol=0.2)
|
assert_series_equal(expect_out, actual_out_rev, check_names=False)
|
pandas.util.testing.assert_series_equal
|
import argparse
import uuid
import sys
import os
import hashlib
import re
import cv2
import numpy as np
import pandas as pd
from ellipse import EllipseGroup
from bspline import BSplineGroup
from drawing_group import newDrawingGroup
#np.set_printoptions(threshold=50)
# Create a black image, a window and bind the function to window
# mouse callback function
DBG_LVL = 0
class PathCaptureSettings():
def __init__(self, settings):
"""
this should sponge up all that spare state out of global namespace
"""
self._itmax = settings.itmax
self._npaths = settings.npaths
self._current_iteration = 0 if settings.current_iter is None \
else settings.current_iter
self._current_path = 0 if settings.current_path is None \
else settings.current_path
self._paths_remaining = True
self._mode = "bspline"
self._img = np.zeros((512, 512, 3), np.uint8)
self._img_drawlayer = np.zeros((512, 512, 3), np.uint8)
if settings.seed_state is not None:
np.random.set_state(settings.seed_state)
elif settings.seed is not None:
np.random.seed(settings.seed)
# store the MT19937 initial sate
(self._rnd_mt_str,
self._rnd_mt_keys,
self._rnd_mt_pos,
self._rnd_mt_has_gauss,
self._rnd_mt_gauss_cached) = np.random.get_state()
def get_img(self, layer='base'):
if layer=='base':
return self._img
if layer=='draw':
return self._img_drawlayer
if layer=='combined':
return np.maximum(self._img,self._img_drawlayer)
def to_dict(self):
return {
"iterations": self._itmax,
"n_paths": self._npaths,
"current_iter": self._current_iteration,
"current_path": self._current_path,
"rnd_state": self.get_seed_state_tuple(),
}
def get_seed_state_tuple(self):
return (self._rnd_mt_str, self._rnd_mt_keys, self._rnd_mt_pos,
self._rnd_mt_has_gauss, self._rnd_mt_gauss_cached)
@property
def paths_remaining(self):
return self._paths_remaining
def terminate(self):
self._paths_remaining = False
def _reset_random(self, n=0):
np.random.set_state((self._rnd_mt_str,
self._rnd_mt_keys,
self._rnd_mt_pos + n,
self._rnd_mt_has_gauss,
self._rnd_mt_gauss_cached))
def get_next_path_group(self):
"""
get the next path group.
this will fast forward to the current path and iteration, generating
all the same random states along the way
"""
for i in range(self._current_iteration, self._itmax):
if self._current_iteration < i:
self._current_iteration = i
self._reset_random()
for p in range(self._current_path, self._npaths):
if self._current_iteration == i and self._current_path < p:
self._current_path = p
if DBG_LVL > 0:
print(f"np.MT: key# {hashlib.md5(np.random.get_state()[1]).hexdigest()} | pos {np.random.get_state()[2]}")
pathgroup = self._get_next_path_group()
if self._current_iteration == i and self._current_path == p:
yield (pathgroup, i, p)
if self._current_iteration == i:
self._current_path = 0
self._current_iteration = None
self._current_path = None
def _get_next_path_group(self):
if self._mode == "bspline":
return newDrawingGroup(BSplineGroup, cached=True)(samples_per_n=50)
elif self._mode == "ellipse":
return newDrawingGroup(EllipseGroup, cached=True)()
def get_callbacks(path_capture_settings ):
group_gen = path_capture_settings.get_next_path_group()
path_group = None
def new_path_group():
"""
Gets a new group object and returns the co-routine that draws
each path highlighted iteratively
"""
nonlocal path_group
try:
path_group = next(group_gen)
except StopIteration as e:
path_capture_settings.terminate()
return path_group[0].draw_iterative_highlight_ends(path_capture_settings.get_img())
path_group_draw = iter([])
def cycle_path_group():
"""
cycles through the paths iteratively highlighting individual ones
"""
nonlocal path_group_draw
path_capture_settings.get_img('base')[:] = 0
path_capture_settings.get_img('draw')[:] = 0
try:
iteration_info = next(path_group_draw)
cv2.putText(path_capture_settings.get_img(),
f"{path_group[1]+1}/{path_capture_settings._itmax} "
f"{path_group[2]+1}/{path_capture_settings._npaths} "
f"{iteration_info+1}/3",
(50, 450), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
except StopIteration as e:
path_group_draw = new_path_group()
iteration_info = cycle_path_group()
return iteration_info
def keyboard_callback(key):
if key == 32: # spacebar
i = cycle_path_group()
path_group[0].set_active_subpath(i)
def draw_callback(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN and path_group is not None:
path_capture_settings.get_img('draw')[:] = 0
path_group[0].reset_active_path()
path_group[0].append_coord(x, y)
cv2.circle(path_capture_settings.get_img('draw'),
(x, y), 4, (255, 0, 0))
if event == cv2.EVENT_LBUTTONUP and path_group is not None:
pass
if event == cv2.EVENT_MOUSEMOVE and path_group is not None and flags & cv2.EVENT_FLAG_LBUTTON:
# draw circle and add point to path
path_group[0].append_coord(x, y)
cv2.circle(path_capture_settings.get_img('draw'),(x,y),4,(255,0,0))
return {'draw':draw_callback,
'keyboard':keyboard_callback}
class FileIOHelper():
def __init__(self, arg_settings):
self._data_file_root = os.path.abspath(arg_settings.outfile)
self._temp_file_root = os.path.abspath(f".temp.{uuid.uuid4().hex}")
self._data_file_ok = True
self._out_type = arg_settings.outtype
if arg_settings.outtype == "pickle":
self._writer = pd.DataFrame.to_pickle
self._writer_args = dict(compression="bz2")
self._reader = pd.read_pickle
self._reader_args = dict(compression="bz2")
self._file_ext = ".bz2.pkl"
elif arg_settings.outtype == "parquet":
self._writer = pd.DataFrame.to_parquet
self._writer_args = dict()
self._reader = pd.read_parquet
self._reader_args = dict()
self._file_ext = ".parquet"
elif arg_settings.outtype == "csv":
self._writer = pd.DataFrame.to_csv
self._writer_args = dict()
self._reader = pd.read_csv
self._reader_args = dict()
self._file_ext = ".csv"
def _get_file_root(self):
return self._data_file_root if self._data_file_ok else self._temp_file_root
@property
def settings_filename(self):
return f"{self._get_file_root()}.settings.pkl"
@property
def data_filename(self):
return f"{self._get_file_root()}{self._file_ext}"
def read(self, target="settings"):
if target == "settings":
return pd.read_pickle(self.settings_filename)
elif target == "data":
return self._reader(self.data_filename, **self._reader_args)
raise ValueError("unknown target type")
def write(self, data_frame):
try:
self._writer(data_frame, self.data_filename, **self._writer_args)
except FileNotFoundError:
self._data_file_ok = False
print( "The target output file could not be written to; output will try to be written to:\n"
f"{os.path.abspath(self.data_filename)}")
self._writer(data_frame, self.data_filename, **self._writer_args)
def parse_args():
global DBG_LVL
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outfile", dest="outfile", required=True, # type=String,
help="output file name [less extension] to save the"
" results data frame to")
parser.add_argument("-t", "--outtype", dest="outtype", default="pickle",
choices=["pickle", "parquet", "csv"],
help="output file name to save the results data frame to")
parser.add_argument("-s", "--seed", dest="seed", type=int, default=None,
help="set a seed value for the random shape generator for repeatability",
)
parser.add_argument("-S", "--settingsfile", dest="settingsfile", default=None,
help="load settings from file this would have been "
"created in a previous run")
parser.add_argument("-n", "--npaths", dest="npaths", type=int, default=25,
help="the number of paths to record")
parser.add_argument("-i", "--itmax", dest="itmax", type=int, default=5,
help="the number times to record each path")
parser.add_argument( "--_dbg_level", dest="DBG_LVL", type=int, default=0,
help=argparse.SUPPRESS)
parser.add_argument( "--start_iter", dest="current_iter", type=int, default=None,
help=argparse.SUPPRESS)
parser.add_argument( "--start_path", dest="current_path", type=int, default=None,
help=argparse.SUPPRESS)
args = parser.parse_args()
io_helper = FileIOHelper(args)
# run is a continutation from previous and should be appended to the same file
args.continuation = False
# check for existing settings file
if args.settingsfile is None and os.path.exists(io_helper.settings_filename):
msg = "Matching settings file found\n" \
"Would you like to load these settings? [Y/n]"
loadsettings = input(msg)
if re.match(r'[nN][oO]|[nN]', loadsettings) is None:
args.settingsfile = io_helper.settings_filename
if args.settingsfile is not None:
print("loading pickled settings file")
state = pd.read_pickle(args.settingsfile)
args.seed_state = state.rnd_state[0]
if state.current_iter[0] is not None or state.current_path[0] is not None \
and os.path.exists(io_helper.data_filename):
msg = "It looks like you were half way through this, shall we pick up were you left of? [Y/n]"
load_last_position = input(msg)
if re.match(r'[nN][oO]|[nN]', load_last_position ) is None:
args.continuation = True
args.current_path = state.current_path[0]
args.current_iter = state.current_iter[0]
args.itmax = state.iterations[0]
args.npaths = state.n_paths[0]
else:
args.seed_state = None
DBG_LVL = args.DBG_LVL
return args, io_helper
def main():
args, io_helper = parse_args()
path_capture_settings = PathCaptureSettings(args)
cv2.putText(path_capture_settings.get_img(),
"press [space] to begin", (160, 250),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))
call_backs = get_callbacks(path_capture_settings)
cv2.namedWindow('image')
cv2.setMouseCallback('image', call_backs['draw'])
while(path_capture_settings.paths_remaining):
cv2.imshow('image', path_capture_settings.get_img('combined'))
key = cv2.waitKey(20) & 0xFF
if key == 27:
break
call_backs['keyboard'](key)
state_d = path_capture_settings.to_dict()
state_df = pd.DataFrame.from_dict([state_d])
out_df = pd.DataFrame.from_dict(newDrawingGroup(BSplineGroup,
cached=True).exportlist())
if args.continuation:
old_df = io_helper.read("data")
out_df =
|
pd.concat([old_df, out_df])
|
pandas.concat
|
import requests
import pandas as pd
import datetime
import talib
def download_daily_data(ticker):
return download_data(datatype='K_DAY', ticker=ticker)
def downlaod_multiple_daily(tickers):
dfs = list()
for tk in tickers:
dfs.append(download_daily_data(tk))
return pd.concat(dfs, axis=0, keys=tickers)
def download_data(datatype, ticker):
url = "http://127.0.0.1:8000/historicals"
params = {'datatype': datatype, 'ticker': ticker, 'from_exchange': False}
result = requests.get(url, params=params).json()
df = pd.read_json(result['return']['content'])
return df
def download_multiple_data(datatype, tickers):
dfs = list()
for tk in tickers:
try:
new_df = download_data(datatype=datatype, ticker=tk)
dfs.append(new_df)
except Exception:
print(f'Failed to download {tk}')
return pd.concat(dfs, axis=0, keys=tickers) if len(dfs) > 0 else pd.DataFrame()
def fill_daily_data(ticker):
return fill_data(datatype='K_DAY', ticker=ticker)
def fill_multiple_daily(tickers):
return fill_multiple_data(datatypes='K_DAY', tickers=tickers)
def fill_data(datatype, ticker):
url = "http://192.168.3.3:8000/db/fill"
params = {'ticker': ticker, 'datatype': datatype}
result = requests.post(url, data=params).json()
return result['return']['content']
def fill_multiple_data(datatypes, tickers):
result = dict()
for tk in tickers:
try:
result[tk] = fill_data(datatype=datatypes, ticker=tk)
except Exception:
result[tk] = 'Failed'
return result
def sma_trade_recon(date, datatype, tickers, trade_record_path='Trade_Recon_Trade/'):
import os
import collections
df = download_multiple_data(datatype, tickers)
tmp = dict()
files = os.listdir(trade_record_path)
trades = {'BUY': collections.defaultdict(lambda: list()), 'SELL': collections.defaultdict(lambda: list())}
dfs = {'BUY': collections.defaultdict(lambda: dict()), 'SELL': collections.defaultdict(lambda: dict())}
for file in files:
splits = file.split('_')
ticker = splits[-1][:-5]
year = splits[0]
minute = int(splits[2]) + 1
hour = int(splits[1])
if minute == 60:
hour = hour + 1
minute = 0
dt = datetime.datetime.strptime(year, "%Y%m%d") + datetime.timedelta(hours=hour, minutes=minute)
trades[splits[-2]][ticker].append(dt)
dfs[splits[-2]][ticker][
|
pd.to_datetime(dt)
|
pandas.to_datetime
|
"""Auxiliary functions for the comparative statics analysis."""
import matplotlib.pyplot as plt
import pandas as pd
def ambiguity_effect_experiences(dict_model_dfs):
"""Summarizes average experience under for different models.
Parameters:
-----------
dict_model_dfs: dict
Dictionary that contains a simulated models in the format
key = ambiguity level, value = pd.DataFrame.
Returns:
--------
summarized: pd.DataFrame
Dataframe that summarizes the experience levels for each occupation
and each ambiguity level.
"""
_df = dict_model_dfs
summarized = {}
for ak in _df.keys():
exp_bluecol = (
_df[ak].groupby("Identifier")["Experience_Blue_Collar"].max().mean()
)
exp_whitecol = (
_df[ak].groupby("Identifier")["Experience_White_Collar"].max().mean()
)
exp_military = _df[ak].groupby("Identifier")["Experience_Military"].max().mean()
exp_school = _df[ak].groupby("Identifier")["Experience_School"].max().mean()
summarized[ak] = [
exp_bluecol,
exp_whitecol,
exp_military,
exp_school,
]
summarized =
|
pd.DataFrame.from_dict(summarized, orient="index")
|
pandas.DataFrame.from_dict
|
import math
import sys
import heapq
import time
import re
import pandas as pd
import numpy as np
from collections import namedtuple
from empress.compare import Default_Cmp
from empress.compare import Balace_Cmp
from empress.tree import Tree
from empress.tree import DEFAULT_COLOR
from empress.tree import SELECT_COLOR
import empress.tools as tools
DEFAULT_WIDTH = 4096
DEFAULT_HEIGHT = 4096
class Model(object):
def __init__(self, tree, metadata, highlight_ids=None,
coords_file=None, port=8080):
""" Model constructor.
This initializes the model, including
the tree object and the metadata.
Parameters
----------
tree : skbio.TreeNode
Tree data structure.
metadata : str
Metadata object for the features being plotted on the tree.
clade_field : str
Name of field within metadata that contains clade names
highlight_file : list of str
List of nodes to highlight
port : int
port number
Notes
-----
The first column name should be renamed to Node_id
"""
self.TIP_LIMIT = 100
self.zoom_level = 1
self.scale = 1
# convert to empress tree
print('converting tree TreeNode to Tree')
self.tree = Tree.from_tree(tree)
tools.name_internal_nodes(self.tree)
if coords_file is None:
print('calculating tree coords')
self.tree.tip_count_per_subclade()
self.edge_metadata = self.tree.coords(DEFAULT_WIDTH, DEFAULT_HEIGHT)
else:
print('extracting tree coords from file')
self.tree.from_file(coords_file)
self.edge_metadata = self.tree.to_df()
# read in main metadata
self.headers = metadata.columns.values.tolist()
self.edge_metadata = pd.merge(self.edge_metadata, metadata,
how='outer', on="Node_id")
# todo need to warn user that some entries in metadata do not have a mapping to tree
self.edge_metadata = self.edge_metadata[self.edge_metadata.x.notnull()]
self.edge_metadata['index'] = self.edge_metadata['Node_id']
self.edge_metadata = self.edge_metadata.set_index('index')
print(metadata)
self.triangles = pd.DataFrame()
self.selected_tree = pd.DataFrame()
self.selected_root = self.tree
self.triData = {}
self.colored_clades = {}
# cached subtrees
self.cached_subtrees = list()
self.cached_clades = list()
# start = time.time()
# print('starting auto collapse')
# self.default_auto_collapse(100)
# end = time.time()
# print('finished auto collapse in %d' % (end - start))
print('highlight_ids')
self.highlight_nodes(highlight_ids)
self.__clade_level()
def layout(self, layout_type):
""" Calculates the coordinates for the tree.
Pipeline function
This calculates the actual coordinates for
the tree. These are not the coordinates that
will be rendered. The calculated coordinates
will be updated as a class property.
The layout will only be utilized during
initialization.
Parameters
----------
layout_type : str
This specifies the layout algorithm to be used.
Note
----
This will wipe the coords and viewcoords in order to
recalculate the coordinates with the new layout.
"""
self.coords = pd.DataFrame()
# These are coordinates scaled to the canvas
self._canvascoords = np.array()
# These are coordinates scaled for viewing
self.viewcoords = np.array()
# TODO: These will need to be recomputed once the algorithms for
# new layouts has been created.
pass
def select_edge_category(self):
"""
Select categories required by webgl to plot edges
Parameters
----------
Returns
-------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
# TODO: may want to add in width in the future
attributes = ['x', 'y', 'px', 'py', 'branch_color']
return self.select_category(attributes, 'branch_is_visible')
def select_node_category(self):
"""
Select categories required by webgl to plot nodes
Parameters
----------
Returns
-------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
attributes = ['x', 'y', 'node_color', 'size']
return self.select_category(attributes, 'node_is_visible')
def select_category(self, attributes, is_visible_col):
""" Returns edge_metadata whose 'is_visible_col is True'
Parameters
----------
edgeData : pd.Dataframe
dataframe containing information necessary to draw tree in
webgl
"""
is_visible = self.edge_metadata[is_visible_col]
edgeData = self.edge_metadata[is_visible]
return edgeData[attributes]
def update_edge_category(self, attribute, category,
new_value=DEFAULT_COLOR, lower="",
equal="", upper=""):
""" Returns edge_metadata with updated width value which tells View
what to hightlight
Parameters
----------
attribute : str
The name of the attribute(column of the table).
category:
The column of table that will be updated such as branch_color
new_value : str
A hex string representing color to change branch
lower : float
The smallest number a feature must match in order for its color to change
equal : str/float
The number/string a feature must match in order for its color to change
upper : float
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
All entries from self.edge_metadata that are visible and match criteria
passed in.
"""
# update the cached trees
new_value = DEFAULT_COLOR if new_value == "DEFAULT" else new_value
for edge_data, _ in self.cached_subtrees:
if lower is not "":
edge_data.loc[edge_data[attribute] > float(lower), category] = new_value
if equal is not "":
try:
value = float(equal)
except ValueError:
value = equal
edge_data.loc[edge_data[attribute] == value, category] = new_value
if upper is not "":
edge_data.loc[edge_data[attribute] < float(upper), category] = new_value
# update the current tree
if lower is not "":
self.edge_metadata.loc[self.edge_metadata[attribute] > float(lower), category] = new_value
if equal is not "":
try:
value = float(equal)
except ValueError:
value = equal
self.edge_metadata.loc[self.edge_metadata[attribute] == value, category] = new_value
if upper is not "":
self.edge_metadata.loc[self.edge_metadata[attribute] < float(upper), category] = new_value
return self.edge_metadata
def highlight_nodes(self, highlight_ids=None):
""" Reads in Node_ids for 'file' and colors their branches red
Parameters
----------
file : csv file containing Node_ids
"""
# with open(highlight_ids, 'r') as file:
# lines = file.readlines()
# ids = [re.split(';', item) for item in lines]
# em = self.edge_metadata
# for i in range(len(ids)):
# em.loc[em['Node_id'] == ids[i][0], 'branch_color'] = ids[i][1]
if highlight_ids is not None:
# idx = self.edge_metadata['Node_id'].isin(highlight_ids)
# self.edge_metadata.loc[idx, 'branch_color'] = highlight_color
self.edge_metadata.update(highlight_ids)
def get_highlighted_values(self, attribute, lower="",
equal="", upper=""):
""" Returns edge_metadata with that match the arguments
Parameters
----------
attribute : str
The name of the attribute(column of the table).
lower : int
The smallest number a feature must match in order for its color to change
equal : str/int
The number/string a feature must match in order for its color to change
upper : int
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
updated version of edge metadata
"""
columns = list(self.headers)
columns.append('x')
columns.append('y')
if lower is not "":
return self.edge_metadata.loc[self.edge_metadata[attribute] > float(lower), columns]
if equal is not "":
value = equal
return self.edge_metadata.loc[self.edge_metadata[attribute] == value, columns]
if upper is not "":
return self.edge_metadata.loc[self.edge_metadata[attribute] < float(upper), columns]
def get_default_table_values(self):
""" Returns all edge_metadata values need to initialize slickgrid
Parameters
----------
Returns
-------
pd.DataFrame
dataframe containing information necessary to draw tree in
webgl
"""
columns = list(self.headers)
columns.append('x')
columns.append('y')
return self.edge_metadata[columns]
def get_headers(self):
""" Returns a list of the headers for the metadata
Parameters
----------
Returns
-------
return : list
a list of the internal metadata headers
"""
return self.headers
def color_clade(self, clade_field, clade, color):
""" Will highlight a certain clade by drawing a sector around the clade.
The sector will start at the root of the clade and create an arc from the
most to the right most tip. The sector will aslo have a defualt arc length
equal to the distance from the root of the clade to the deepest tip..
Parameters
----------
clade : string
The clade to highlight
color : string (hex string)
The color to highlight the clade with
Returns
-------
return : list
A list of all highlighted clades
"""
if clade_field != 'None':
c = clade
clade_root = self.edge_metadata.loc[self.edge_metadata[clade_field] == clade]
clade_roots_id = clade_root['Node_id'].values
if len(clade_roots_id) == 0:
for c in range(0, len(self.cached_clades)):
if clade in self.cached_clades[c]:
self.cached_clades[c][clade]['color'] = color
return {"empty": []}
i = 0
for clade_root_id in clade_roots_id:
clades = self.tree.find_all(clade_root_id)
for clade in clades:
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = color
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
self.colored_clades[c+str(i)] = {'data': color_clade_s,
'depth': depth,
'color': color,
'node': clade}
i += 1
else:
i = 0
clade_name = clade
for (k,v) in self.colored_clades.items():
if clade_name in k:
clade = v['node']
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = color
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
self.colored_clades[k] = {'data': color_clade_s,
'depth': depth,
'color': color,
'node': clade}
i += 1
return self.get_colored_clade()
def clear_clade(self, clade):
""" Removes the colored clade
Note this doesn't remove any branches from the tree. It only removes the artifacts
created by javascript
"""
clades = self.colored_clades.keys()
clades = [c for c in clades]
for c in clades:
if clade in c:
self.colored_clades.pop(c)
for colored_clades in self.cached_clades:
clades = colored_clades.keys()
clades = [c for c in clades]
for c in clades:
if clade in c:
colored_clades.pop(c)
return self.get_colored_clade()
def get_colored_clade(self):
CLADE_INDEX = 0
DEPTH_INDEX = 1
clades = [(k, v['depth']) for k, v in self.colored_clades.items()]
clades.sort(key=lambda clade: clade[DEPTH_INDEX])
sorted_clades = [self.colored_clades[clade[CLADE_INDEX]]['data'] for clade in clades]
sorted_clades = [flat for two_d in sorted_clades for flat in two_d]
return {"clades": sorted_clades}
# Todo need to added the other items in colored-clades
def refresh_clades(self):
colored_clades = {}
for k, v in self.colored_clades.items():
clade_id = self.colored_clades[k]['id']
clade = self.tree.find(clade_id)
color_clade = self.tree.get_clade_info(clade)
color_clade['color'] = v['color']
color_clade_s = tools.create_arc_sector(color_clade)
depth = len([node.name for node in clade.ancestors()])
colored_clades[k] = {'data': color_clade_s,
'depth': depth,
'color': color_clade['color']}
return colored_clades
def create_subtree(self, attribute, lower="", equal="", upper=""):
""" Creates a subtree from from the tips whose metadata matches the users query. Also, if
the attribute referes to an inner node, then this method will first locate the tips whose
ansestor is the inner node. This will create a subtree by passing in the tips to skbio.shear()
Parameters
----------
attribute : string
The name of the attribute(column of the table).
lower : integer
The smallest number a feature must match in order for its color to change
equal : string/integer
The number/string a feature must match in order for its color to change
upper : integer
The largest number a feature can match in order for its color to change
Returns
-------
edgeData : pd.Dataframe
updated version of edge metadata
"""
# retrive the tips of the subtree
nodes = self.get_highlighted_values(attribute, lower, equal, upper)
nodes = nodes['Node_id'].values
tips = list()
for node in nodes:
# node is a tip
if self.tree.find(node).is_tip():
tips.append(node)
continue
# retive the tips of node
for tip in self.tree.find(node).tips():
tips.append(tip.name)
# store the previous tree/metadata
self.cached_subtrees.append((self.edge_metadata, self.tree))
# grab relivent metadata for old metadata
columns = list(self.edge_metadata.columns.values)
columns.remove('x')
columns.remove('y')
columns.remove('px')
columns.remove('py')
self.tree = self.tree.shear(tips)
nodes = list()
for node in self.tree.postorder():
nodes.append(node.name)
metadata = self.edge_metadata.loc[self.edge_metadata["Node_id"].isin(nodes), columns]
# create new metadata
self.edge_metadata = self.tree.coords(900, 1500)
self.edge_metadata = self.edge_metadata[['Node_id', 'x', 'y', 'px', 'py']]
self.edge_metadata = pd.merge(self.edge_metadata, metadata,
how='outer', on="Node_id")
self.cached_clades.append(self.colored_clades)
self.colored_clades = self.refresh_clades()
return self.edge_metadata
def get_old_tree(self):
""" retrives the nost recently cached tree if one exists.
"""
if len(self.cached_subtrees) > 0:
self.edge_metadata, self.tree = self.cached_subtrees.pop()
old_clades = self.colored_clades
self.colored_clades = self.cached_clades.pop()
for k, v in old_clades.items():
if k not in self.colored_clades:
self.colored_clades[k] = v
self.colored_clades[k]['color'] = old_clades[k]['color']
self.colored_clades = self.refresh_clades()
return self.edge_metadata
return pd.DataFrame()
def select_sub_tree(self, x1, y1, x2, y2):
""" Marks all tips whose coordinates in the box created by (x1, y1) and (x2, y2). The marked
tips can then be used in collapse_selected_tree
Parameters
----------
x1 : Number
The x coordinate of the top left corner of the select box
y1 : Number
The y coordinate of the top left corner of the select box
x2 : Number
The x coordinate of the bottom right corner of the select box
y2 : Number
The y coordinate of the bottom right corner of the select box
"""
df = self.edge_metadata
(x1, y1, x2, y2) = (float(x1), float(y1), float(x2), float(y2))
(smallX, smallY) = (min(x1, x2), min(y1, y2))
(largeX, largeY) = (max(x1, x2), max(y1, y2))
entries = df.loc[
(df['x'] >= smallX) & (df['x'] <= largeX) &
(df['y'] >= smallY) & (df['y'] <= largeY)]
entries = entries["Node_id"].values
if len(entries) == 0:
return pd.DataFrame()
if len(entries) == 1:
nodes = entries
root = entries
else:
root = self.tree.lowest_common_ancestor(entries)
nodes = [node.name for node in root.postorder(include_self=False)]
selected_tree = self.edge_metadata.loc[self.edge_metadata["Node_id"].isin(nodes)]
self.selected_tree = selected_tree.copy()
self.selected_tree['branch_color'] = SELECT_COLOR
self.selected_root = root
return self.selected_tree
def collapse_selected_tree(self):
clade = self.selected_root
self.__collapse_clade(clade)
self.update_collapse_clades()
return self.edge_metadata.loc[self.edge_metadata['branch_is_visible']]
def update_collapse_clades(self):
"""
Call this method after a series of clade collapse to hide the collapse clade within
collapsed clades
"""
collapse_ids = self.triData.keys()
for node_id in collapse_ids:
ancestors = [a.name for a in self.tree.find(node_id).ancestors()]
for other_id in collapse_ids:
if other_id in ancestors:
self.triData[node_id]['visible'] = False
def get_triangles(self):
triangles = {k: v for (k, v) in self.triData.items() if v['visible']}
self.triangles =
|
pd.DataFrame(triangles)
|
pandas.DataFrame
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
v = next(v for v in values if v is not None)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if not _np_version_under1p7 or isinstance(v.index,MultiIndex) or key_index is None:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values,index=key_index,columns=index)
else:
# GH5788 instead of stacking; concat gets the dtypes correct
from pandas.tools.merge import concat
result = concat(values,keys=key_index,names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack([np.asarray(x) for x in values])
result = DataFrame(stacked_values.T,index=v.index,columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
if (self._selected_obj.ndim == 2
and self._selected_obj.dtypes.isin(_DATELIKE_DTYPES).any()):
cd = 'coerce'
else:
cd = True
return result.convert_objects(convert_dates=cd)
else:
# only coerce dates if we find at least 1 datetime
cd = 'coerce' if any([ isinstance(v,Timestamp) for v in values ]) else False
return Series(values, index=key_index).convert_objects(convert_dates=cd)
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except Exception: # pragma: no cover
res = fast_path(group)
path = fast_path
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
concatenated.sort_index(inplace=True)
return concatenated
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed DataFrame on each group and
return a DataFrame having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each subframe
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
"""
# try to do a fast transform via merge if possible
try:
obj = self._obj_with_exclusions
if isinstance(func, compat.string_types):
result = getattr(self, func)(*args, **kwargs)
else:
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
result = getattr(self, cyfunc)()
else:
return self._transform_general(func, *args, **kwargs)
except:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
# a grouped that doesn't preserve the index, remap index based on the grouper
# and broadcast it
if ((not isinstance(obj.index,MultiIndex) and
type(result.index) != type(obj.index)) or
len(result.index) != len(obj.index)):
results = obj.values.copy()
for (name, group), (i, row) in zip(self, result.iterrows()):
indexer = self._get_index(name)
results[indexer] = np.tile(row.values,len(indexer)).reshape(len(indexer),-1)
return DataFrame(results,columns=result.columns,index=obj.index).convert_objects()
# we can merge the result in
# GH 7383
names = result.columns
result = obj.merge(result, how='outer', left_index=True, right_index=True).iloc[:,-result.shape[1]:]
result.columns = names
return result
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notnull(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Example
--------
>>> grouped = df.groupby(lambda x: mapping[x])
>>> grouped.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if (isinstance(res, (bool, np.bool_)) or
np.isscalar(res) and isnull(res)):
if res and notnull(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
_block_agg_axis = 1
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) %s already selected' % self._selection)
if isinstance(key, (list, tuple, Series, Index, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: %s"
% str(bad_keys)[1:-1])
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif not self.as_index:
if key not in self.obj.columns:
raise KeyError("Column not found: %s" % key)
return DataFrameGroupBy(self.obj, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
else:
if key not in self.obj:
raise KeyError("Column not found: %s" % key)
# kind of a kludge
return SeriesGroupBy(self.obj[key], selection=key,
grouper=self.grouper,
exclusions=self.exclusions)
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if result:
if self.axis == 0:
result = DataFrame(result, index=obj.columns,
columns=result_index).T
else:
result = DataFrame(result, index=obj.index,
columns=result_index)
else:
result = DataFrame(result)
return result
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
group_levels = self.grouper.get_group_levels()
zipped = zip(self.grouper.names, group_levels)
for i, (name, labels) in enumerate(zipped):
result.insert(i, name, labels)
result = result.consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result).convert_objects()
def _reindex_output(self, result):
"""
if we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not participated in
the groupings (e.g. may have all been nan groups)
This can re-expand the output space
"""
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
elif not any([ping._was_factor for ping in groupings]):
return result
levels_list = [ ping._group_index for ping in groupings ]
index = MultiIndex.from_product(levels_list, names=self.grouper.names)
return result.reindex(**{ self.obj._get_axis_name(self.axis) : index, 'copy' : False }).sortlevel()
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.tools.merge import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
from pandas.tools.plotting import boxplot_frame_groupby
DataFrameGroupBy.boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise NotImplementedError
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
class NDArrayGroupBy(GroupBy):
pass
#----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = com._ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return com.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return _algos.groupsort_indexer(self.labels, self.ngroups)[0]
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
raise StopIteration
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data.take(self.sort_idx, axis=self.axis, convert=False)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise NotImplementedError
class ArraySplitter(DataSplitter):
pass
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # ix[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
#----------------------------------------------------------------------
# Misc utilities
def get_group_index(label_list, shape):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations.
"""
if len(label_list) == 1:
return label_list[0]
n = len(label_list[0])
group_index = np.zeros(n, dtype=np.int64)
mask = np.zeros(n, dtype=bool)
for i in range(len(shape)):
stride = np.prod([x for x in shape[i + 1:]], dtype=np.int64)
group_index += com._ensure_int64(label_list[i]) * stride
mask |= label_list[i] < 0
np.putmask(group_index, mask, -1)
return group_index
_INT64_MAX = np.iinfo(np.int64).max
def _int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *=
|
long(x)
|
pandas.compat.long
|
"""Create tables of followup visits"""
import os
import sys
import argparse
import logging
from collections import namedtuple
from logging import debug, info, warning, error, critical
import contextlib
import sqlite3
import datetime
import pandas as pd
# constants
# exception classes
# interface functions
def overwrite_schedule(reference_schedule, replacement_sequences, gap=120):
"""Create a new schedule, replacing visits in a reference schedule.
Args:
- reference_schedule :: a pandas.DataFrame with the original visits
- replacements :: pandas.GroupBy of replacement visit sequences
- gap :: gap duration (in seconds) between reference
and overwrite visits
"""
prior_clock = 0
subsequences = []
for _, replacement_sequence in replacement_sequences:
seq_start_clock = replacement_sequence.observationStartTime.min()
info("Overwriting with replacement sequence starting at "
+ datetime.datetime.fromtimestamp(seq_start_clock).isoformat())
# Find the start time of the next replacement sequence
replacement_start_clock = (replacement_sequence
.observationStartTime
.min())
# Find visits in the reference schedule between the end of the previous
# replacement sequence and the start of the next one, and add them
# to the list of sequences if there are any.
ref_subset = reference_schedule.query(
f'(observationStartTime > {prior_clock+gap}) and ((observationStartTime+visitTime+{gap})<{replacement_start_clock})')
if len(ref_subset) > 0:
subsequences.append(ref_subset)
# Actually add the replacement sequence
subsequences.append(replacement_sequence)
# Record the end for use in determining the next window.
prior_clock = (subsequences[-1]
.eval('observationStartTime+visitTime')
.max())
ref_subset = reference_schedule.query(
f'(observationStartTime > {prior_clock+gap})')
if len(ref_subset) > 0:
subsequences.append(ref_subset)
info("Combining %d subsequences" % len(subsequences))
if len(subsequences) >= 48:
group_size = int(len(subsequences)/48)
else:
group_size = 10
if len(subsequences) > group_size:
all_visits = subsequences[0]
subsequences = subsequences[1:]
while len(subsequences) > group_size:
new_subseqs = subsequences[:group_size]
subsequences = subsequences[group_size:]
all_visits = pd.concat([all_visits] + new_subseqs)
info("all_visits now %d long", len(all_visits))
if len(subsequences) > 0:
all_visits = pd.concat([all_visits] + subsequences)
else:
all_visits = pd.concat(subsequences, sort=True)
return all_visits
def query_summary(fname):
"""Query and opsim database for visits
Args:
- fname :: the name of the sqlite3 database file
Returns:
a named tuple with the summary and proposal tables
"""
table_contents = []
with contextlib.closing(sqlite3.connect(fname)) as conn:
for table_name in OpsimDatabaseDataFrames._fields:
info("Querying " + table_name)
table_contents.append(
|
pd.read_sql_query('SELECT * FROM ' + table_name, conn)
|
pandas.read_sql_query
|
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
# TODO(ArrayManager) concat with reindexing
pytestmark = td.skip_array_manager_not_yet_implemented
def test_error():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
with pytest.raises(ValueError, match="column must be a scalar"):
df.explode(list("AA"))
df.columns = list("AA")
with pytest.raises(ValueError, match="columns must be unique"):
df.explode("A")
def test_basic():
df = pd.DataFrame(
{"A": pd.Series([[0, 1, 2], np.nan, [], (3, 4)], index=list("abcd")), "B": 1}
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4], index=list("aaabcdd"), dtype=object
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_rows():
df = pd.DataFrame(
{"A": np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), "B": 1},
index=pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]),
)
result = df.explode("A")
expected = pd.DataFrame(
{
"A": pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.MultiIndex.from_tuples(
[
("a", 1),
("a", 1),
("a", 1),
("a", 2),
("b", 1),
("b", 2),
("b", 2),
]
),
dtype=object,
),
"B": 1,
}
)
tm.assert_frame_equal(result, expected)
def test_multi_index_columns():
df = pd.DataFrame(
{("A", 1): np.array([[0, 1, 2], np.nan, [], (3, 4)], dtype=object), ("A", 2): 1}
)
result = df.explode(("A", 1))
expected = pd.DataFrame(
{
("A", 1): pd.Series(
[0, 1, 2, np.nan, np.nan, 3, 4],
index=pd.Index([0, 0, 0, 1, 2, 3, 3]),
dtype=object,
),
("A", 2): 1,
}
)
tm.assert_frame_equal(result, expected)
def test_usecase():
# explode a single column
# gh-10511
df = pd.DataFrame(
[[11, range(5), 10], [22, range(3), 20]], columns=list("ABC")
).set_index("C")
result = df.explode("B")
expected = pd.DataFrame(
{
"A": [11, 11, 11, 11, 11, 22, 22, 22],
"B": np.array([0, 1, 2, 3, 4, 0, 1, 2], dtype=object),
"C": [10, 10, 10, 10, 10, 20, 20, 20],
},
columns=list("ABC"),
).set_index("C")
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
import calendar
from ..utils import search_quote
from datetime import datetime, timedelta
from ..utils import process_dataframe_and_series
import rich
from jsonpath import jsonpath
from retry import retry
import pandas as pd
import requests
import multitasking
import signal
from tqdm import tqdm
from typing import (Dict,
List,
Union)
from ..shared import session
from ..common import get_quote_history as get_quote_history_for_stock
from ..common import get_history_bill as get_history_bill_for_stock
from ..common import get_today_bill as get_today_bill_for_stock
from ..common import get_realtime_quotes_by_fs
from ..utils import (to_numeric,
get_quote_id)
from .config import EASTMONEY_STOCK_DAILY_BILL_BOARD_FIELDS, EASTMONEY_STOCK_BASE_INFO_FIELDS
from ..common.config import (
FS_DICT,
MARKET_NUMBER_DICT,
EASTMONEY_REQUEST_HEADERS,
EASTMONEY_QUOTE_FIELDS
)
signal.signal(signal.SIGINT, multitasking.killall)
@to_numeric
def get_base_info_single(stock_code: str) -> pd.Series:
"""
获取单股票基本信息
Parameters
----------
stock_code : str
股票代码
Returns
-------
Series
单只股票基本信息
"""
fields = ",".join(EASTMONEY_STOCK_BASE_INFO_FIELDS.keys())
secid = get_quote_id(stock_code)
if not secid:
return pd.Series(index=EASTMONEY_STOCK_BASE_INFO_FIELDS.values())
params = (
('ut', 'fa5fd1943c7b386f172d6893dbfba10b'),
('invt', '2'),
('fltt', '2'),
('fields', fields),
('secid', secid),
)
url = 'http://push2.eastmoney.com/api/qt/stock/get'
json_response = session.get(url,
headers=EASTMONEY_REQUEST_HEADERS,
params=params).json()
s = pd.Series(json_response['data']).rename(
index=EASTMONEY_STOCK_BASE_INFO_FIELDS)
return s[EASTMONEY_STOCK_BASE_INFO_FIELDS.values()]
def get_base_info_muliti(stock_codes: List[str]) -> pd.DataFrame:
"""
获取股票多只基本信息
Parameters
----------
stock_codes : List[str]
股票代码列表
Returns
-------
DataFrame
多只股票基本信息
"""
@multitasking.task
@retry(tries=3, delay=1)
def start(stock_code: str):
s = get_base_info_single(stock_code)
dfs.append(s)
pbar.update()
pbar.set_description(f'Processing => {stock_code}')
dfs: List[pd.DataFrame] = []
pbar = tqdm(total=len(stock_codes))
for stock_code in stock_codes:
start(stock_code)
multitasking.wait_for_tasks()
df =
|
pd.DataFrame(dfs)
|
pandas.DataFrame
|
from glob import glob
from os import path, makedirs
from time import time
import pandas as pd
import argparse
from shutil import move
from PIL import Image
def dHashBin(imgfile, size=(8, 9)):
im = Image.open(imgfile)
pixels = im.resize(size, Image.ANTIALIAS).load()
bin_list = list()
h, w = size
for i in range(h):
for j in range(w-1):
b = '0' if pixels[i, j] < pixels[i, j+1] else '1'
bin_list.append(b)
return bin_list
def binlist2int(l):
return int(''.join(l), 2)
def dhash64(imgfile):
return binlist2int(dHashBin(imgfile))
def load_image_datasets(images_dir):
images = glob(images_dir + '/*/*')
extract_cid_aid = lambda x: path.basename(x).split('.')[0].split('_')
image_datasets = [(x, *extract_cid_aid(x)) for x in images]
return image_datasets
def save_datasets(datasets, save_path):
if len(datasets) == 0:
print('save empty datasets!')
return
save_dir = path.dirname(save_path)
if save_dir and (not path.isdir(save_dir)):
makedirs(save_dir)
with open(save_path, 'a+', encoding='utf-8') as f:
for url, cid, aid, dhash in datasets:
line = '{},{},{},{}'.format(url, cid, aid, dhash)
f.write('{}\n'.format(line))
f.close()
def hash_stat(datasets_file, hash_name='dhash', names=['url', 'cid', 'aid', 'dhash']):
df = pd.read_csv(datasets_file, names=names, header=None)
hash_df = df.sort_values(hash_name).groupby(hash_name)[['aid']].count().sort_values('aid', ascending=False)
hash_df =
|
pd.DataFrame({'hash': hash_df.index, 'count': hash_df['aid']})
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import itertools
import matplotlib.pyplot as plt
import re
import os
from scipy.special import loggamma, digamma, polygamma
def get_var_haplotype_df(vcf_df, mapped_reads_df):
"""
# Haplotype reconstruction inference
We can reconstruct the distribution of the most parsimonious alleles that make up the virus population of a sample
using a maximum-likelihood approach ([Ghafari et al., 2020](https://jvi.asm.org/content/early/2020/04/09/JVI.00014-20)).
We can think of the short reads covering a subset of loci of the genome $l$ as observed partial haplotype counts
($X_{l}^{P}$). The true (unobserved) proportions underlying these data is denoted by $q_{l}^{P}$. The proportions
($q_{l}^{P}$) of these partial haplotypes ($h_l^P$) can be mapped to the actual haplotypes ($h_i$ where $i$ indexes each
distinct haplotype) proportions ($q_{i}$) by $q_l^P = T_l\times q_i$. For instance, if the partial haplotypes consists of
{A-, G-} and the hypothetical true haplotypes are {AT, AC, GT, GC}:
$
\begin{bmatrix}
q_{A-}^{P}\\
q_{G-}^{P}
\end{bmatrix}=
\begin{bmatrix}
1 & 1 & 0 & 0\\
0 & 0 & 1 & 1
\end{bmatrix}
\begin{bmatrix}
q_{AT}\\
q_{AC}\\
q_{GT}\\
q_{GC}
\end{bmatrix}
$
Assuming that $X_{l}^{P} \sim DirMultinomial(N_l^P, \pi_l^P, \varphi)$ where $N_l^P$ is the total number of reads
covering loci subset $l$, we reconstruct the most parsimonious distribution of haplotypes by making guesses of $q_i$
(and assume some level of overdisperion $\varphi$) and maximise liklihood of the partial haplotype Diriclet multinomial
model.
"""
try:
vcf_df = vcf_df.reset_index()
except:
pass
vcf_df = vcf_df.drop_duplicates(['nucpos', 'nuc_var'])
vcf_df = vcf_df.set_index("nucpos")
try:
mapped_reads_df = mapped_reads_df.reset_index()
except:
pass
# filter reads with polymorphic position
readidx_to_nucpos = [{"idx":read_idx, "nucpos":nucpos} for nucpos in vcf_df.index for read_idx in mapped_reads_df[(mapped_reads_df['start_nucaln_pos']<=nucpos)&(mapped_reads_df['end_nucaln_pos_plus1']>nucpos)].index]
readidx_to_nucpos =
|
pd.DataFrame.from_dict(readidx_to_nucpos)
|
pandas.DataFrame.from_dict
|
# -*- encoding: utf-8 -*-
"""
1. filter universe: separate helper functions
2. calc weights
3. generate trades
------------------------
- modify models: register function (with context parameter)
- modify AlphaStrategy: inheritate
------------------------
suspensions and limit reachers:
1. deal with them in re_balance function, not in filter_universe
2. do not care about them when construct portfolio
3. subtract market value and re-normalize weights (positions) after (daily) market open, before sending orders
"""
from __future__ import print_function
from __future__ import absolute_import
import time
import numpy as np
import numpy.linalg as nlg
import pandas as pd
import scipy.stats as stats
import jaqs.trade.analyze as ana
from jaqs.trade import PortfolioManager
from jaqs.data import RemoteDataService
from jaqs.data import DataView
from jaqs.trade import model
from jaqs.trade import AlphaBacktestInstance
from jaqs.trade import AlphaTradeApi
from jaqs.trade import AlphaStrategy
import jaqs.util as jutil
from config_path import DATA_CONFIG_PATH, TRADE_CONFIG_PATH
data_config = jutil.read_json(DATA_CONFIG_PATH)
trade_config = jutil.read_json(TRADE_CONFIG_PATH)
dataview_dir_path = '../../output/prepared/ICCombine/dataview'
backtest_result_dir_path = '../../output/ICCombine'
ic_weight_hd5_path = '../../output/ICCombine', 'ic_weight.hd5'
custom_data_path = '../../output/ICCombine', 'custom_date.json'
def save_dataview():
ds = RemoteDataService()
ds.init_from_config(data_config)
dv = DataView()
props = {'start_date': 20150101, 'end_date': 20170930, 'universe': '000905.SH',
'fields': ('turnover,float_mv,close_adj,pe,pb'),
'freq': 1}
dv.init_from_config(props, ds)
dv.prepare_data()
factor_formula = 'Cutoff(Standardize(turnover / 10000 / float_mv), 2)'
dv.add_formula('TO', factor_formula, is_quarterly=False)
factor_formula = 'Cutoff(Standardize(1/pb), 2)'
dv.add_formula('BP', factor_formula, is_quarterly=False)
factor_formula = 'Cutoff(Standardize(Return(close_adj, 20)), 2)'
dv.add_formula('REVS20', factor_formula, is_quarterly=False)
factor_formula = 'Cutoff(Standardize(Log(float_mv)), 2)'
dv.add_formula('float_mv_factor', factor_formula, is_quarterly=False)
factor_formula = 'Delay(Return(close_adj, 1), -1)'
dv.add_formula('NextRet', factor_formula, is_quarterly=False)
dv.save_dataview(folder_path=dataview_dir_path)
def ic_calculation(snapshot, factorList):
"""
Calculate factor IC on single date
:param snapshot:
:return: factor IC on single date
"""
ICresult = []
for factor in factorList:
# drop na
factorPanel = snapshot[[factor, 'NextRet']]
factorPanel = factorPanel.dropna()
ic, _ = stats.spearmanr(factorPanel[factor], factorPanel['NextRet'])
ICresult.append(ic)
return ICresult
def get_ic(dv):
"""
Calculate factor IC on all dates and save it in a DataFrame
:param dv:
:return: DataFrame recording factor IC on all dates
"""
factorList = jutil.read_json(custom_data_path)
ICPanel = {}
for singleDate in dv.dates:
singleSnapshot = dv.get_snapshot(singleDate)
ICPanel[singleDate] = ic_calculation(singleSnapshot, factorList)
ICPanel = pd.DataFrame(ICPanel).T
return ICPanel
def ic_weight_calculation(icpanel):
"""
Calculate factor IC weight on single date
:param icpanel:
:return:
"""
mat = np.mat(icpanel.cov())
mat = nlg.inv(mat)
weight = mat * np.mat(icpanel.mean()).reshape(len(mat), 1)
weight = np.array(weight.reshape(len(weight), ))[0]
return weight
def get_ic_weight(dv):
"""
Calculate factor IC weight on all dates and save it in a DataFrame
:param dv:
:return:
"""
ICPanel = get_ic(dv)
ICPanel = ICPanel.dropna()
N = 10
IC_weight_Panel = {}
for i in range(N, len(ICPanel)):
ICPanel_sub = ICPanel.iloc[i - N:i, :]
ic_weight = ic_weight_calculation(ICPanel_sub)
IC_weight_Panel[ICPanel.index[i]] = ic_weight
IC_weight_Panel =
|
pd.DataFrame(IC_weight_Panel)
|
pandas.DataFrame
|
"""Tests for the sdv.constraints.tabular module."""
import uuid
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, UniqueCombinations)
def dummy_transform():
pass
def dummy_reverse_transform():
pass
def dummy_is_valid():
pass
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid'
# Run
instance = CustomConstraint(
transform=dummy_transform,
reverse_transform=dummy_reverse_transform,
is_valid=is_valid_fqn
)
# Assert
assert instance.transform == dummy_transform
assert instance.reverse_transform == dummy_reverse_transform
assert instance.is_valid == dummy_is_valid
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test___init___strict_false(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is False
assert instance._high_is_scalar is None
assert instance._low_is_scalar is None
assert instance._drop is None
def test___init___all_parameters_passed(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
- strict = True
- drop = 'high'
- high_is_scalar = True
- low_is_scalar = False
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._stric == True
- instance._drop = 'high'
- instance._high_is_scalar = True
- instance._low_is_scalar = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True, drop='high',
high_is_scalar=True, low_is_scalar=False)
# Asserts
assert instance._low == 'a'
assert instance._high == 'b'
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop == 'high'
def test_fit__low_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a scalar if ``_low_is_scalar`` is None.
Input:
- Table without ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is True
def test_fit__low_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if low is
a column name if ``_low_is_scalar`` is None.
Input:
- Table with ``low`` in columns.
Side Effect:
- ``_low_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._low_is_scalar is False
def test_fit__high_is_scalar_is_none_determined_as_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a scalar if ``_high_is_scalar`` is None.
Input:
- Table without ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``True``.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is True
def test_fit__high_is_scalar_is_none_determined_as_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should figure out if high is
a column name if ``_high_is_scalar`` is None.
Input:
- Table with ``high`` in columns.
Side Effect:
- ``_high_is_scalar`` should be set to ``False``.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._high_is_scalar is False
def test_fit__high_is_scalar__low_is_scalar_raises_error(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should raise an error if
`_low_is_scalar` and `_high_is_scalar` are true.
Input:
- Table with one column.
Side Effect:
- ``TypeError`` is raised.
"""
# Setup
instance = GreaterThan(low=1, high=2)
# Run / Asserts
table_data = pd.DataFrame({'a': [1, 2, 3]})
with pytest.raises(TypeError):
instance.fit(table_data)
def test_fit__column_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__column_to_reconstruct_default(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'b'
def test_fit__column_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_column_to_reconstruct``
to `low` if ``instance._high_is_scalar`` is ``True``.
Input:
- Table with two columns.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._column_to_reconstruct == 'a'
def test_fit__diff_column_one_column(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, high_is_scalar=True)
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#'
def test_fit__diff_column_multiple_columns(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should set ``_diff_column``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_column_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance.fit(table_data)
# Asserts
assert instance._diff_column == 'a#b'
def test_fit_int(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'i'
def test_fit_float(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_datetime(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'M'
def test_fit_type__high_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_high_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_fit_type__low_is_scalar(self):
"""Test the ``GreaterThan.fit`` method.
The ``GreaterThan.fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` is ``True``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance.fit(table_data)
# Asserts
assert instance._dtype.kind == 'f'
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False], name='b')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False], name='a')
pd.testing.assert_series_equal(expected_out, out)
def test_transform_int_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_high(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_int_drop_low(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type int.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_float_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type float.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_column = 'a#b'
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_transform_high_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, high_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_low_is_scalar(self):
"""Test the ``GreaterThan.transform`` method with high as scalar.
The ``GreaterThan.transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_high_is_scalar`` is ``True``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, low_is_scalar=True)
instance._diff_column = 'a#b'
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('float')
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = np.dtype('<M8[ns]')
instance._diff_column = 'a#b'
instance._is_datetime = True
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_low_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, low_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'b'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_high_is_scalar`` is ``True``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, high_is_scalar=True)
instance._dtype = pd.Series([1]).dtype # exact dtype (32 or 64) depends on OS
instance._diff_column = 'a#b'
instance._column_to_reconstruct = 'a'
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
class TestPositive():
def test__init__(self):
"""
Test the ``Positive.__init__`` method.
The method is expected to set the ``_low`` instance variable
to 0, the ``_low_is_scalar`` variable to ``True`` and the
``_high_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- high = 'a'
- drop = None
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._strict == True
- instance._high_is_scalar = False
- instance._low_is_scalar = True
- instance._drop = None
"""
# Run
instance = Positive(high='a', strict=True, drop=None)
# Asserts
assert instance._low == 0
assert instance._high == 'a'
assert instance._strict is True
assert instance._high_is_scalar is False
assert instance._low_is_scalar is True
assert instance._drop is None
class TestNegative():
def test__init__(self):
"""
Test the ``Negative.__init__`` method.
The method is expected to set the ``_high`` instance variable
to 0, the ``_high_is_scalar`` variable to ``True`` and the
``_low_is_scalar`` variable to ``False``. The rest of the
parameters should be passed.
Input:
- strict = True
- low = 'a'
- drop = None
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._high_is_scalar = True
- instance._low_is_scalar = False
- instance._drop = None
"""
# Run
instance = Negative(low='a', strict=True, drop=None)
# Asserts
assert instance._low == 'a'
assert instance._high == 0
assert instance._strict is True
assert instance._high_is_scalar is True
assert instance._low_is_scalar is False
assert instance._drop is None
def new_column(data):
"""Formula to be used for the ``TestColumnFormula`` class."""
return data['a'] + data['b']
class TestColumnFormula():
def test___init__(self):
"""Test the ``ColumnFormula.__init__`` method.
It is expected to create a new Constraint instance
and import the formula to use for the computation.
Input:
- column = 'c'
- formula = new_column
"""
# Setup
column = 'c'
# Run
instance = ColumnFormula(column=column, formula=new_column)
# Assert
assert instance._column == column
assert instance._formula == new_column
def test_is_valid_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a valid data.
If the data fulfills the formula, result is a series of ``True`` values.
Input:
- Table data fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_valid(self):
"""Test the ``ColumnFormula.is_valid`` method for a non-valid data.
If the data does not fulfill the formula, result is a series of ``False`` values.
Input:
- Table data not fulfilling the formula (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 2, 3]
})
instance = ColumnFormula(column=column, formula=new_column)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, False, False])
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``ColumnFormula.transform`` method.
It is expected to drop the indicated column from the table.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data without the indicated column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform(self):
"""Test the ``ColumnFormula.reverse_transform`` method.
It is expected to compute the indicated column by applying the given formula.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
column = 'c'
instance = ColumnFormula(column=column, formula=new_column)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [1, 1, 1]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [5, 7, 9]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestRounding():
def test___init__(self):
"""Test the ``Rounding.__init__`` method.
It is expected to create a new Constraint instance
and set the rounding args.
Input:
- columns = ['b', 'c']
- digits = 2
"""
# Setup
columns = ['b', 'c']
digits = 2
# Run
instance = Rounding(columns=columns, digits=digits)
# Assert
assert instance._columns == columns
assert instance._digits == digits
def test___init__invalid_digits(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``digits`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 20
"""
# Setup
columns = ['b', 'c']
digits = 20
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits)
def test___init__invalid_tolerance(self):
"""Test the ``Rounding.__init__`` method with an invalid argument.
Pass in an invalid ``tolerance`` argument, and expect a ValueError.
Input:
- columns = ['b', 'c']
- digits = 2
- tolerance = 0.1
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 0.1
# Run
with pytest.raises(ValueError):
Rounding(columns=columns, digits=digits, tolerance=tolerance)
def test_is_valid_positive_digits(self):
"""Test the ``Rounding.is_valid`` method for a positive digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 2
tolerance = 1e-3
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12, 5.51, None, 6.941, 1.129],
'c': [5.315, 7.12, 1.12, 9.131, 12.329],
'd': ['a', 'b', 'd', 'e', None],
'e': [123.31598, -1.12001, 1.12453, 8.12129, 1.32923]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([False, True, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_negative_digits(self):
"""Test the ``Rounding.is_valid`` method for a negative digits argument.
Input:
- Table data with desired decimal places (pandas.DataFrame)
Output:
- Series of ``True`` values (pandas.Series)
"""
# Setup
columns = ['b']
digits = -2
tolerance = 1
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [401, 500, 6921, 799, None],
'c': [5.3134, 7.1212, 9.1209, 101.1234, None],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_zero_digits(self):
"""Test the ``Rounding.is_valid`` method for a zero digits argument.
Input:
- Table data not with the desired decimal places (pandas.DataFrame)
Output:
- Series of ``False`` values (pandas.Series)
"""
# Setup
columns = ['b', 'c']
digits = 0
tolerance = 1e-4
instance = Rounding(columns=columns, digits=digits, tolerance=tolerance)
# Run
table_data = pd.DataFrame({
'a': [1, 2, None, 3, 4],
'b': [4, 5.5, 1.2, 6.0001, 5.99999],
'c': [5, 7.12, 1.31, 9.00001, 4.9999],
'd': ['a', 'b', None, 'd', 'e'],
'e': [2.1254, 17.12123, 124.12, 123.0112, -9.129434]
})
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False, True, True])
pd.testing.assert_series_equal(expected_out, out)
def test_reverse_transform_positive_digits(self):
"""Test the ``Rounding.reverse_transform`` method with positive digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.12345, None, 5.100, 6.0001, 1.7999],
'c': [1.1, 1.234, 9.13459, 4.3248, 6.1312],
'd': ['a', 'b', 'd', 'e', None]
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, None, 4],
'b': [4.123, None, 5.100, 6.000, 1.800],
'c': [1.100, 1.234, 9.135, 4.325, 6.131],
'd': ['a', 'b', 'd', 'e', None]
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_negative_digits(self):
"""Test the ``Rounding.reverse_transform`` method with negative digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b']
digits = -3
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41234.5, None, 5000, 6001, 5928],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [41000.0, None, 5000.0, 6000.0, 6000.0],
'c': [1.1, 1.23423, 9.13459, 12.12125, 18.12152],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_zero_digits(self):
"""Test the ``Rounding.reverse_transform`` method with zero digits.
Expect that the columns are rounded to the specified integer digit.
Input:
- Table data with the column with incorrect values (pandas.DataFrame)
Output:
- Table data with the computed column (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
digits = 0
instance = Rounding(columns=columns, digits=digits)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.12345, None, 5.0, 6.01, 7.9],
'c': [1.1, 1.0, 9.13459, None, 8.89],
'd': ['a', 'b', 'd', 'e', 'f']
})
out = instance.reverse_transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3, 4, 5],
'b': [4.0, None, 5.0, 6.0, 8.0],
'c': [1.0, 1.0, 9.0, None, 9.0],
'd': ['a', 'b', 'd', 'e', 'f']
})
pd.testing.assert_frame_equal(expected_out, out)
def transform(data, low, high):
"""Transform to be used for the TestBetween class."""
data = (data - low) / (high - low) * 0.95 + 0.025
return np.log(data / (1.0 - data))
class TestBetween():
def test_transform_scalar_scalar(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as scalars.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [4, 5, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_scalar_column(self):
"""Test the ``Between.transform`` method with ``low`` as scalar and ``high`` as a column.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 6],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_scalar(self):
"""Test the ``Between.transform`` method with ``low`` as a column and ``high`` as scalar.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
pd.testing.assert_frame_equal(expected_out, out)
def test_transform_column_column(self):
"""Test the ``Between.transform`` method by passing ``low`` and ``high`` as columns.
It is expected to create a new column similar to the constraint ``column``, and then
scale and apply a logit function to that column.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data with an extra column containing the transformed ``column`` (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0, -1, 0.5],
'c': [0.5, 1, 6]
})
instance.fit(table_data)
out = instance.transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as scalars.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True,
low_is_scalar=True)
table_data = pd.DataFrame({
'b': [4, 5, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [4, 5, 6],
'a#0.0#1.0': transform(table_data[column], low, high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_scalar_column(self):
"""Test ``Between.reverse_transform`` with ``low`` as scalar and ``high`` as a column.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
table_data = pd.DataFrame({
'b': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0.5, 1, 6],
'a#0.0#b': transform(table_data[column], low, table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_scalar(self):
"""Test ``Between.reverse_transform`` with ``low`` as a column and ``high`` as scalar.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'a#b#1.0': transform(table_data[column], table_data[low], high)
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_column_column(self):
"""Test ``Between.reverse_transform`` with ``low`` and ``high`` as columns.
It is expected to recover the original table which was transformed, but with different
column order. It does so by applying a sigmoid to the transformed column and then
scaling it back to the original space. It also replaces the transformed column with
an equal column but with the original name.
Input:
- Transformed table data (pandas.DataFrame)
Output:
- Original table data, without necessarily keepying the column order (pandas.DataFrame)
"""
# Setup
column = 'a'
low = 'b'
high = 'c'
instance = Between(column=column, low=low, high=high)
table_data = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a': [0.1, 0.5, 0.9]
})
# Run
instance.fit(table_data)
transformed = pd.DataFrame({
'b': [0, -1, 0.5],
'c': [0.5, 1, 6],
'a#b#c': transform(table_data[column], table_data[low], table_data[high])
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = table_data
pd.testing.assert_frame_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``Between.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the valid row and False
for the other two. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=True, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_strict_false(self):
"""Test the ``Between.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a valid row, a strictly invalid row and an
invalid row. (pandas.DataFrame)
Output:
- True should be returned for the first two rows, and False
for the last one (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 1.0
instance = Between(column=column, low=low, high=high, strict=False, high_is_scalar=True,
low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 1, 3],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out, check_names=False)
def test_is_valid_scalar_column(self):
"""Test the ``Between.is_valid`` method with ``low`` as scalar and ``high`` as a column.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the two first rows, False
for the last one. (pandas.Series)
"""
# Setup
column = 'a'
low = 0.0
high = 'b'
instance = Between(column=column, low=low, high=high, low_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 0.9],
'b': [0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, True, False])
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_column_scalar(self):
"""Test the ``Between.is_valid`` method with ``low`` as a column and ``high`` as scalar.
Is expected to return whether the constraint ``column`` is between the
``low`` and ``high`` values.
Input:
- Table data where the second value is smaller than ``low`` and
last value is greater than ``high``. (pandas.DataFrame)
Output:
- True should be returned for the first row, False
for the last two. (pandas.Series)
"""
# Setup
column = 'a'
low = 'b'
high = 1.0
instance = Between(column=column, low=low, high=high, high_is_scalar=True)
# Run
table_data = pd.DataFrame({
'a': [0.1, 0.5, 1.9],
'b': [-0.5, 1, 0.6],
})
instance.fit(table_data)
out = instance.is_valid(table_data)
# Assert
expected_out = pd.Series([True, False, False])
|
pd.testing.assert_series_equal(expected_out, out)
|
pandas.testing.assert_series_equal
|
# bca4abm
# See full license in LICENSE.txt.
import logging
import os.path
import numpy as np
import pandas as pd
from activitysim.core import inject
from activitysim.core import config
logger = logging.getLogger(__name__)
@inject.table()
def summary_results():
logger.debug("initializing empty summary_results table")
return pd.DataFrame(index=[0])
@inject.table()
def coc_results():
logger.debug("initializing empty coc_results table")
return
|
pd.DataFrame()
|
pandas.DataFrame
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame, concat, isna
from lib.case_line import convert_cases_to_time_series
from lib.cast import safe_int_cast
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import table_rename
class CearaDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
cases = table_rename(
dataframes[0],
{
"sexoPaciente": "sex",
"idadePaciente": "age",
"codigoMunicipioPaciente": "subregion2_code",
"dataResultadoExame": "date_new_tested",
"dataObito": "date_new_deceased",
"dataEntradaUtisSvep": "date_new_intensive_care",
"evolucaoCasoSivep": "_prognosis",
"dataInicioSintomas": "_date_onset",
"dataEvolucaoCasoSivep": "_date_update",
"resultadoFinalExame": "_test_result",
},
drop=True,
)
# Follow the procedure described in the data documentation to compute the confirmed cases:
# https://drive.google.com/file/d/1DUwST2zcXUnCJmJauiM5zmpSVWqLiAYI/view
cases["date_new_confirmed"] = None
confirmed_mask = cases["_test_result"] == "Positivo"
cases.loc[confirmed_mask, "date_new_confirmed"] = cases.loc[
confirmed_mask, "date_new_tested"
]
# Only count intensive care patients if they had a positive test result
cases.loc[~confirmed_mask, "date_new_intensive_care"] = None
# Drop columns which we have no use for
cases = cases[[col for col in cases.columns if not col.startswith("_")]]
# Make sure our region code is of type str
cases["subregion2_code"] = cases["subregion2_code"].apply(
lambda x: None if
|
isna(x)
|
pandas.isna
|
import pandas as pd
import pytest
from actymath import Calc
from actymath.tables import A1967_70_Exams
# A1967-70 table - Actuarial Green tables for testing
table = A1967_70_Exams()
q45 = table.qx(45, select=True)
# Create calc
calc = Calc()
calc.add_life(45, q45)
calc.add_i(rate=0.04) # Will use 4% tables
def test_t():
calc.populate("t")
assert calc["t"].iloc[0] == 0
assert calc["t"].iloc[25] == 25
def test_n():
calc.add_term(n=10)
calc.add_term(n=20)
assert calc["n1"].iloc[0] == 10
assert calc["n1"].iloc[10] == 0
assert
|
pd.isnull(calc["n1"].iloc[11])
|
pandas.isnull
|
import time
import numpy as np
import pandas as pd
from pandas import json_normalize
from tqdm import tqdm
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter
import httpx
import asyncio
import nest_asyncio
nest_asyncio.apply()
ano_pop = '2021'
arq_pop = "https://ftp.ibge.gov.br/Estimativas_de_Populacao/Estimativas_{}/estimativa_dou_{}.xls".format(
ano_pop, ano_pop)
df_population = pd.read_excel(arq_pop, sheet_name="Municípios")
df_population.columns = df_population.loc[0]
df_population = df_population.drop(df_population.index[0])
df_population = df_population[df_population["COD. UF"].notnull()]
df_population.rename(columns={"NOME DO MUNICÍPIO": "City",
"POPULAÇÃO ESTIMADA": "Population"}, inplace=True,)
df_population["ID_IBGE"] = (df_population["COD. UF"].map(
str).str[:2] + df_population["COD. MUNIC"].map(str).str[:5]).astype(str).astype(int)
df_population['Population'] = np.where(df_population['Population'].str.find("(") > 0, df_population['Population'].str.split(
r"(", expand=True)[0].str.replace(".", "", regex=True), df_population['Population'])
df_population = df_population[["ID_IBGE", "Population"]].reset_index(drop=True)
df_population = df_population.astype({"ID_IBGE": int, "Population": int})
async def get_async(url):
async with httpx.AsyncClient() as client:
return await client.get(url)
urls = ["https://servicodados.ibge.gov.br/api/v1/localidades/estados"]
async def obter_estados():
resps = await asyncio.gather(*map(get_async, urls))
data_json = [resp.json() for resp in resps]
for html in data_json:
states = json_normalize(data=html, sep="")
list_urls = ["https://servicodados.ibge.gov.br/api/v1/localidades/estados/{}/municipios".format(
uf) for uf in states.id.unique()]
return list_urls
async def launch2(urls):
resps = await asyncio.gather(*map(get_async, urls))
data_json = [resp.json() for resp in resps]
global df_cities
df_cities = pd.DataFrame()
for html in data_json:
states =
|
json_normalize(data=html, sep="")
|
pandas.json_normalize
|
#!/usr/bin/env python
# coding: utf-8
"""
Scipy model of the 750l radon detector
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import glob
import datetime
import os
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.integrate import odeint
#
# numerical solution - from
# http://wiki.scipy.org/Cookbook/Zombie_Apocalypse_ODEINT
# or
# http://docs.sympy.org/dev/modules/mpmath/calculus/odes.html#mpmath.odefun
#
# ... define system of equations as vector
# ... dY/dt = FY
#
radon_chain_half_life = np.array([3.82*24*3600, #Rn-222 (3.82 d)
3.05*60, #Po-218 (3.05 min)
26.8*60, #Pb-214 (26.8 min)
19.9*60 #Bi-214 (19.9 min)
])
radon_chain_num_alpha = np.array([1, 1, 0, 1])
radon_chain_name = [ 'Rn-222', 'Po-218', 'Pb-214', 'Bi-214']
radon_chain_lambda = np.log(2.0)/radon_chain_half_life
lamrn = radon_chain_lambda[0]
lama = radon_chain_lambda[1]
lamb = radon_chain_lambda[2]
lamc = radon_chain_lambda[3]
Q = 800.0 / 60.0 / 1000.0 # from Whittlestone's paper, L/min converted to m3/s
rs = 0.7 # from Whittlestone's paper (screen retention)
lamp = np.log(2.0)/120.0 # from Whittlestone's code (1994 tech report)
eff = 0.33 # Whittlestone's paper
Q_external = 40.0 / 60.0 / 1000.0
V_delay = 200.0 / 1000.0
V_tank = 750.0 / 1000.0
recoil_prob = 0.02
# play with values
#lamp = 0.0
# boundary value
def Nrn_ext_spike(t):
if t<60.0 and t >=0.0:
return 1.0 / lamrn
else:
return 0.0
def Nrn_ext_const(t):
# 1 Bq/m3
return 1.0 / lamrn
Nrn_ext = Nrn_ext_spike
def tank_concentrations(Y, t, Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn):
#unpack state vector
Na, Nb, Nc = Y
dNadt = Nrn*lamrn - Na*(lama+lamp)
dNbdt = Na*lama - Nb*(lamb+lamp)
dNcdt = Nb*lamb - Nc*(lamc+lamp)
return np.array([dNadt, dNbdt, dNcdt])
def calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn):
Y0 = np.zeros(3)
tt = V_tank/Q
t = np.linspace(0,tt,5)
parameters = Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn
soln = odeint(tank_concentrations, Y0, t, args=parameters)
return soln[-1,:]
#return t, soln #for testing
def rate_of_change(Y, t, Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob,
Nrn_ext=Nrn_ext_spike):
# unpack state vector
Nrnd, Nrn, Fa, Fb, Fc = Y
# effect of delay and tank volumes
dNrnddt = Q_external / V_delay * (Nrn_ext(t) - Nrnd) - Nrnd*lamrn
dNrndt = Q_external / V_tank * (Nrnd - Nrn) - Nrn*lamrn
# Na, Nb, Nc from steady-state in tank
Na, Nb, Nc = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn)
# compute rate of change of each state variable
dFadt = Q*rs*Na - Fa*lama
dFbdt = Q*rs*Nb - Fb*lamb + Fa*lama * (1.0-recoil_prob)
dFcdt = Q*rs*Nc - Fc*lamc + Fb*lamb * (1.0-recoil_prob)
return np.array([dNrnddt, dNrndt, dFadt, dFbdt, dFcdt])
def rate_of_change_opt(Y, t, Q, rs, lamp, eff, Q_external, V_delay, V_tank,
recoil_prob, Na_factor, Nb_factor, Nc_factor,
Nrn_ext=Nrn_ext_spike):
# unpack state vector
Nrnd, Nrn, Fa, Fb, Fc = Y
# effect of delay and tank volumes
dNrnddt = Q_external / V_delay * (Nrn_ext(t) - Nrnd) - Nrnd*lamrn
dNrndt = Q_external / V_tank * (Nrnd - Nrn) - Nrn*lamrn
# Na, Nb, Nc from steady-state in tank
# Na, Nb, Nc = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn)
Na = Na_factor * Nrn
Nb = Nb_factor * Nrn
Nc = Nc_factor * Nrn
# compute rate of change of each state variable
dFadt = Q*rs*Na - Fa*lama
dFbdt = Q*rs*Nb - Fb*lamb + Fa*lama * (1.0-recoil_prob)
dFcdt = Q*rs*Nc - Fc*lamc + Fb*lamb
return np.array([dNrnddt, dNrndt, dFadt, dFbdt, dFcdt])
#initial conditions
Y0 = np.zeros(5)
t = np.arange(0, 3600*5, 60) # time grid
parameters = Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob
soln = odeint(rate_of_change, Y0, t, args=parameters)
df = pd.DataFrame(index=t/60.0, data=soln)
df.columns = 'Nrnd,Nrn,Fa,Fb,Fc'.split(',')
df['Nrn_ext'] = [Nrn_ext(itm) for itm in t]
df['count rate'] = eff*(df.Fa*lama + df.Fc*lamc)
f, ax = plt.subplots()
df[['Nrn_ext','Nrnd','Nrn']].plot(ax=ax)
# add computed Na,Nb,Nc
df['Na'] = 0.
df['Nb'] = 0.
df['Nc'] = 0.
for ix, itm in df.iterrows():
Nrn = itm['Nrn']
Na,Nb,Nc = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn)
df.Na[ix] = Na
df.Nb[ix] = Nb
df.Nc[ix] = Nc
#end of adding Na Nb Nc
f, ax = plt.subplots()
df[['Na','Nb','Nc']].plot(ax=ax)
f, ax = plt.subplots()
df[['Fa','Fb','Fc']].plot(ax=ax)
f, ax = plt.subplots()
df[['count rate']].plot(ax=ax)
def detector_model(t, Y0 = np.zeros(5),
Q = 800.0 / 60.0 / 1000.0,
rs = 0.7,
lamp = np.log(2.0)/120.0,
eff = 0.33, # Whittlestone's paper
Q_external = 40.0 / 60.0 / 1000.0,
V_delay = 200.0 / 1000.0,
V_tank = 750.0 / 1000.0,
t_delay = 60.0,
recoil_prob = 0.02,
Nrn_ext=Nrn_ext_spike):
Na_factor, Nb_factor, Nc_factor = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn=1.0)
parameters = Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Na_factor, Nb_factor, Nc_factor, Nrn_ext
soln = odeint(rate_of_change_opt, Y0, t-t_delay, args=parameters, hmax=1.0)
df = pd.DataFrame(index=t/60.0, data=soln)
df.columns = 'Nrnd,Nrn,Fa,Fb,Fc'.split(',')
df['Nrn_ext'] = [Nrn_ext(itm) for itm in t]
df['count rate'] = eff*(df.Fa*lama + df.Fc*lamc)
df['Na'] = 0.
df['Nb'] = 0.
df['Nc'] = 0.
for ix, itm in df.iterrows():
Nrn = itm['Nrn']
Na,Nb,Nc = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn)
df.Na[ix] = Na
df.Nb[ix] = Nb
df.Nc[ix] = Nc
return df
# perturb some things
df = detector_model(t)
df_standard = df.copy()
dfp = detector_model(t, Q=Q*2)
df['count rate Q*2'] = dfp['count rate']
dfp = detector_model(t, rs=1.0)
df['count rate rs=1'] = dfp['count rate']
dfp = detector_model(t, lamp=np.log(2.0)/20.0)
df['count rate plateout=20s'] = dfp['count rate']
dfp = detector_model(t, Q_external=Q_external*1.1)
df['count rate Q_external*1.1'] = dfp['count rate']
dfp = detector_model(t, recoil_prob=0.5)
df['count rate recoil_prob=0.5'] = dfp['count rate']
cols = [itm for itm in df.columns if 'count rate' in itm]
df[cols].plot()
norm = df['count rate'].max()
for itm in cols:
df[itm] = df[itm]/norm #df[itm].mean()
df[cols].plot()
for itm in cols:
df[itm] /= df[itm].mean()
df[cols].plot()
df[['count rate plateout=20s', 'count rate']].plot()
df = detector_model(t)
# compare with experiment
import util
fnames = ['../data-controlled-test-2/T1Mar15e.CSV',
'../data-controlled-test-2/T1Apr15e.CSV']
dfobs = [util.load_radon(itm) for itm in fnames]
dfobs = pd.concat(dfobs)
#
# ... how much do the calibration peaks vary?
#
# 1-min injections (expected to vary a bit)
ts0 = datetime.datetime(2015,3,18,13)
nspikes = 9
cp = datetime.timedelta(hours=6)
oneday = datetime.timedelta(days=1)
totcounts = [dfobs[ts0+ii*oneday:ts0+ii*oneday+cp].lld.sum()
for ii in range(nspikes)]
totcounts_spike = np.array(totcounts)
# one hour injections (from flushed source, less variation expected)
ti0 = datetime.datetime(2015,3,27,13)
ninj = 6
totcounts = [dfobs[ti0+ii*oneday:ti0+ii*oneday+cp].lld.sum()
for ii in range(ninj)]
totcounts_inj = np.array(totcounts)
f, ax = plt.subplots()
ax.plot(totcounts_spike/totcounts_spike.mean(), label='spikes')
ax.plot(totcounts_inj/totcounts_inj.mean(), label='1h injection')
ax.legend()
t0 = datetime.datetime(2015,3,17,13)
dt = datetime.timedelta(hours=5)
dt -= datetime.timedelta(minutes=1)
f, ax = plt.subplots()
for ii in range(6):
dfrel = dfobs[t0:t0+dt]
dfrel.index = (dfrel.index.values - dfrel.index.values[0]) / 1e9 / 60
dfrel.lld.plot(ax=ax, label=str(ii))
t0 += datetime.timedelta(days=1)
plt.legend()
# like prev, but normalise
t0 = datetime.datetime(2015,3,18,13)
f, ax = plt.subplots()
for ii in range(8):
dfrel = dfobs[t0:t0+dt]
dfrel.lld /= dfrel.lld.mean()
dfrel.index = (dfrel.index.values - dfrel.index.values[0]) / 1e9 / 60
dfrel.lld.plot(ax=ax, label=str(ii))
t0 += datetime.timedelta(days=1)
plt.legend()
df['observed count rate'] = dfrel.lld.values
# normalise
df['observed count rate'] = df['observed count rate'] / \
df['observed count rate'].mean() * df['count rate'].mean()
# model with parameters from W&Z's "new version"
df_nv = detector_model(t, rs=0.987, recoil_prob=0,
lamp=np.log(2)/(6*60), #for a ~10% loss to plateout in 60sec
V_tank=730/1000.0)
df['"new version" count rate'] = df_nv['count rate'] / \
df_nv['count rate'].mean() * df['count rate'].mean()
df[['observed count rate', 'count rate', '"new version" count rate']].plot()
plt.show()
#
# ... try to optimise parameter values
#
def fit_to_obs(df):
dfrel = df.copy()
from scipy.optimize import minimize
Na_factor, Nb_factor, Nc_factor = calc_NaNbNc(Q, rs, lamp, eff, Q_external, V_delay, V_tank, recoil_prob, Nrn=1.0)
def minfunc(x):
Y0 = np.zeros(5)
t = np.arange(0, 3600*5, 60) # time grid
Q_external_scale, t_delay, recoil_prob, lamp_scale = x
# link screen efficiency to recoil_prob
#rs = 1 - 2*recoil_prob * corr #corr was a fit parameter, seemed to be close to 1
rs = 1 - 2*recoil_prob
#recoil_prob = 0.02
parameters = Q, rs, lamp*lamp_scale, eff, Q_external*Q_external_scale, V_delay, V_tank, recoil_prob, Na_factor, Nb_factor, Nc_factor, Nrn_ext
soln = odeint(rate_of_change_opt, Y0, t-t_delay, args=tuple(parameters), hmax=30)
dfs =
|
pd.DataFrame(index=t/60.0, data=soln)
|
pandas.DataFrame
|
import os
import pandas as pd
import torch
from scipy.spatial import distance
from contextualized_topic_models.models.ctm import CombinedTM
from contextualized_topic_models.utils.preprocessing import WhiteSpacePreprocessing
from contextualized_topic_models.utils.data_preparation import TopicModelDataPreparation
from contextualized_topic_models.utils.data_preparation import bert_embeddings_from_file
from data import prune_vocabulary2
from evaluate import evaluate_scores, compute_jsd, compute_kld2
import argparse
argparser = argparse.ArgumentParser()
argparser.add_argument('--data_path', default='project_dir/datasets/semeval-multilingual-news', type=str)
argparser.add_argument('--articles_file', default='test_split_batch2_translated_mbart.csv', type=str)
argparser.add_argument('--sbert_model', default='multi-qa-mpnet-base-dot-v1', type=str)
argparser.add_argument('--save_dir', default='bin/results/ctm', type=str)
argparser.add_argument('--num_topics', default=100, type=int)
argparser.add_argument('--num_epochs', default=200, type=int)
#argparser.add_argument('--test_articles_file', default='test_articles.csv', type=str)
#argparser.add_argument('--test_pairs_file', default='test.csv', type=str)
args = argparser.parse_args()
print("\n" + "-"*5, "Train Combined CTM - monolingual only", "-"*5)
print("data_path:", args.data_path)
print("articles_file:", args.articles_file)
print("sbert_model:", args.sbert_model)
print("save_dir:", args.save_dir)
print("num_topics:", args.num_topics)
print("num_epochs:", args.num_epochs)
print("-"*50 + "\n")
df = pd.read_csv(os.path.join(args.data_path, args.articles_file))
df = df.dropna()
if 'trg_text' in df.columns:
documents_raw = list(df.trg_text)
else:
documents_raw = list(df.text)
print('documents_raw:', len(documents_raw))
# ----- Preprocessing -----
# articles_unproc, articles_proc = prune_vocabulary2(documents_raw)
# text_for_contextual = articles_unproc
# text_for_bow = articles_proc
# preprocess documents
preproc_pipeline = WhiteSpacePreprocessing(documents=documents_raw, vocabulary_size=5000)
preprocessed_docs, unpreprocessed_docs, vocab = preproc_pipeline.preprocess()
text_for_bow = preprocessed_docs
text_for_contextual = unpreprocessed_docs
print('text_for_contextual:', len(text_for_contextual))
print('text_for_bow:', len(text_for_bow))
print('vocab:', len(vocab))
qt = TopicModelDataPreparation(args.sbert_model)
training_dataset = qt.fit(text_for_contextual=text_for_contextual, text_for_bow=text_for_bow)
#print("-"*10, "final vocab size:", len(qt.vocab), "-"*10)
# ----- Training -----
# initialize model
ctm = CombinedTM(bow_size=len(qt.vocab),
contextual_size=768,
n_components=args.num_topics,
num_epochs=args.num_epochs)
# run model
ctm.fit(train_dataset=training_dataset,
save_dir=args.save_dir)
# see topics
ctm.get_topics()
# ----- Inference -----
# load test articles
test_art_file = "test_split_batch2_translated_mbart.csv"
test_path = os.path.join(args.data_path, test_art_file)
test_df = pd.read_csv(test_path)
if 'text' in test_df.columns:
test_articles = list(test_df['text'])
else:
test_articles = list(test_df['trg_text'])
test_ids = list(test_df['id'])
print("Test articles:", len(test_articles))
# process test docs using the same DataPrep pipeline from training
testing_dataset = qt.transform(text_for_contextual=test_articles, text_for_bow=test_articles)
# get document-topic distribution
doc_topics = ctm.get_doc_topic_distribution(testing_dataset, n_samples=50)
print("doc_topics:", doc_topics.shape)
encdf = pd.DataFrame(doc_topics)
encdf['id'] = test_ids
topics_outfile = os.path.join(args.data_path, "combinedCTM_K" + str(args.num_topics) + "_" + args.sbert_model + ".csv")
encdf.to_csv(topics_outfile, index=False)
print("Saved topic distributions to", topics_outfile, "!")
# make topic distributions more sparse and normalise
#doc_topics[doc_topics < 1/num_topics] = 0
#doc_topics = doc_topics/doc_topics.sum(axis=1)[:, np.newaxis]
# compute JSD or cosine sim between topic distributions
test_pairs_file = "test_split_batch2.csv"
test_pairs_path = os.path.join(args.data_path, test_pairs_file)
test_pairs_df =
|
pd.read_csv(test_pairs_path)
|
pandas.read_csv
|
import numpy as np
import pytest
from pandas.compat import range, u, zip
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.core.common as com
from pandas.core.indexing import IndexingError
from pandas.util import testing as tm
@pytest.fixture
def frame_random_data_integer_multi_index():
levels = [[0, 1], [0, 1, 2]]
codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, codes=codes)
return DataFrame(np.random.randn(6, 2), index=index)
@pytest.fixture
def dataframe_with_duplicate_index():
"""Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
data = [['a', 'd', 'e', 'c', 'f', 'b'],
[1, 4, 5, 3, 6, 2],
[1, 4, 5, 3, 6, 2]]
index = ['h1', 'h3', 'h5']
columns = MultiIndex(
levels=[['A', 'B'], ['A1', 'A2', 'B1', 'B2']],
codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
names=['main', 'sub'])
return DataFrame(data, index=index, columns=columns)
@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x],
lambda s, x: s.loc[:, x],
lambda s, x: s.xs(x, level=1)])
@pytest.mark.parametrize('level1_value, expected', [
(0, Series([1], index=[0])),
(1, Series([2, 3], index=[1, 2]))
])
def test_series_getitem_multiindex(access_method, level1_value, expected):
# GH 6018
# series regression getitem with a multi-index
s = Series([1, 2, 3])
s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)])
result = access_method(s, level1_value)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('level0_value', ['D', 'A'])
def test_getitem_duplicates_multiindex(level0_value):
# GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
# the appropriate error, only in PY3 of course!
index = MultiIndex(levels=[[level0_value, 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index), 1)
df = DataFrame(arr, index=index, columns=['val'])
# confirm indexing on missing value raises KeyError
if level0_value != 'A':
msg = "'A'"
with pytest.raises(KeyError, match=msg):
df.val['A']
msg = "'X'"
with pytest.raises(KeyError, match=msg):
df.val['X']
result = df.val[level0_value]
expected = Series(arr.ravel()[0:3], name='val', index=Index(
[26, 37, 57], name='day'))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer, is_level1, expected_error', [
([], False, None), # empty ok
(['A'], False, None),
(['A', 'D'], False, None),
(['D'], False, r"\['D'\] not in index"), # not any values found
(pd.IndexSlice[:, ['foo']], True, None),
(pd.IndexSlice[:, ['foo', 'bah']], True, None)
])
def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1,
expected_error):
# GH 7866
# multi-index slicing with missing indexers
idx = MultiIndex.from_product([['A', 'B', 'C'],
['foo', 'bar', 'baz']],
names=['one', 'two'])
s = Series(np.arange(9, dtype='int64'), index=idx).sort_index()
if indexer == []:
expected = s.iloc[[]]
elif is_level1:
expected = Series([0, 3, 6], index=MultiIndex.from_product(
[['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index()
else:
exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']],
names=['one', 'two'])
expected = Series(np.arange(3, dtype='int64'),
index=exp_idx).sort_index()
if expected_error is not None:
with pytest.raises(KeyError, match=expected_error):
s.loc[indexer]
else:
result = s.loc[indexer]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('columns_indexer', [
([], slice(None)),
(['foo'], [])
])
def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
# GH 8737
# empty indexer
multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'],
['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sort_index(level=0, axis=1)
expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
result = df.loc[:, columns_indexer]
tm.assert_frame_equal(result, expected)
def test_getitem_duplicates_multiindex_non_scalar_type_object():
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median], ['mean', 'median']],
columns=MultiIndex.from_tuples([('functs', 'mean'),
('functs', 'median')]),
index=['function', 'name'])
result = df.loc['function', ('functs', 'mean')]
expected = np.mean
assert result == expected
def test_getitem_simple(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data.T
expected = df.values[:, 0]
result = df['foo', 'one'].values
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('indexer,msg', [
(lambda df: df[('foo', 'four')], r"\('foo', 'four'\)"),
(lambda df: df['foobar'], "'foobar'")
])
def test_getitem_simple_key_error(
multiindex_dataframe_random_data, indexer, msg):
df = multiindex_dataframe_random_data.T
with pytest.raises(KeyError, match=msg):
indexer(df)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3],
lambda s: s.loc[2000, 3]
])
def test_series_getitem(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer', [
lambda s: s[2000, 3, 10],
lambda s: s.loc[2000, 3, 10]
])
def test_series_getitem_returns_scalar(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.iloc[49]
result = indexer(s)
assert result == expected
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
@pytest.mark.parametrize('indexer', [
lambda s: s.loc[[(2000, 3, 10), (2000, 3, 13)]],
lambda s: s.ix[[(2000, 3, 10), (2000, 3, 13)]]
])
def test_series_getitem_fancy(
multiindex_year_month_day_dataframe_random_data, indexer):
s = multiindex_year_month_day_dataframe_random_data['A']
expected = s.reindex(s.index[49:51])
result = indexer(s)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,error,msg', [
(lambda s: s.__getitem__((2000, 3, 4)), KeyError, '356'),
(lambda s: s[(2000, 3, 4)], KeyError, '356'),
(lambda s: s.loc[(2000, 3, 4)], IndexingError, 'Too many indexers'),
(lambda s: s.__getitem__(len(s)), IndexError, 'index out of bounds'),
(lambda s: s[len(s)], IndexError, 'index out of bounds'),
(lambda s: s.iloc[len(s)], IndexError,
'single positional indexer is out-of-bounds')
])
def test_series_getitem_indexing_errors(
multiindex_year_month_day_dataframe_random_data, indexer, error, msg):
s = multiindex_year_month_day_dataframe_random_data['A']
with pytest.raises(error, match=msg):
indexer(s)
def test_series_getitem_corner_generator(
multiindex_year_month_day_dataframe_random_data):
s = multiindex_year_month_day_dataframe_random_data['A']
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_frame_getitem_multicolumn_empty_level():
df = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
df.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = df['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=df.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_getitem_tuple_plus_slice():
# GH 671
df = DataFrame({'a': np.arange(10),
'b': np.arange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)}
).set_index(['a', 'b'])
expected = df.loc[0, 0]
result = df.loc[(0, 0), :]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('indexer,expected_slice', [
(lambda df: df['foo'], slice(3)),
(lambda df: df['bar'], slice(3, 5)),
(lambda df: df.loc[:, 'bar'], slice(3, 5))
])
def test_getitem_toplevel(
multiindex_dataframe_random_data, indexer, expected_slice):
df = multiindex_dataframe_random_data.T
expected = df.reindex(columns=df.columns[expected_slice])
expected.columns = expected.columns.droplevel(0)
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_getitem_int(frame_random_data_integer_multi_index):
df = frame_random_data_integer_multi_index
result = df.loc[1]
expected = df[-3:]
expected.index = expected.index.droplevel(0)
|
tm.assert_frame_equal(result, expected)
|
pandas.util.testing.assert_frame_equal
|
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in_old(slide, label, root_dir):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 1000
else:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(slide, label, root_dir, age=None, BMI=None):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 2000
else:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
idsa['age'] = age
idsa['BMI'] = BMI
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
return idsa
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = pd.concat([balanced, ref], sort=False)
return balanced
# Prepare label at per patient level
def big_image_sum(pmd, path='../tiles/', ref_file='../Fusion_dummy_His_MUT_joined.csv'):
ref = pd.read_csv(ref_file, header=0)
big_images = []
if pmd == 'subtype':
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
if row['subtype_POLE'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_MSI'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Endometrioid'] == 1:
big_images.append([row['name'], 2, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Serous-like'] == 1:
big_images.append([row['name'], 3, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd == 'histology':
ref = ref.loc[ref['histology_Mixed'] == 0]
for idx, row in ref.iterrows():
if row['histology_Endometrioid'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
if row['histology_Serous'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
# ref = ref.loc[ref['histology_Endometrioid'] == 1]
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['subtype_{}'.format(pmd)]), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
elif pmd == 'MSIst':
ref = ref.loc[ref['MSIst_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['MSIst_MSI-H']), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
else:
ref = ref.dropna(subset=[pmd])
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row[pmd]), path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'age', 'BMI'])
return datapd
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(alll, path, cls, pmd, batchsize=24):
if pmd == 'subtype':
split = pd.read_csv('../split/ST.csv', header=0)
elif pmd == 'histology':
split = pd.read_csv('../split/his.csv', header=0)
elif pmd == 'Serous-like':
split = pd.read_csv('../split/CNVH.csv', header=0)
elif pmd == 'Endometrioid':
split = pd.read_csv('../split/CNVL.csv', header=0)
else:
split = pd.read_csv('../split/{}.csv'.format(pmd), header=0)
train = split.loc[split['set'] == 'train']['slide'].tolist()
validation = split.loc[split['set'] == 'validation']['slide'].tolist()
test = split.loc[split['set'] == 'test']['slide'].tolist()
trlist = []
telist = []
valist = []
subset = alll
valist.append(subset[subset['slide'].isin(validation)])
telist.append(subset[subset['slide'].isin(test)])
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# Training and validation on TCGA; Testing on CPTAC
def set_sep_idp(alll, path, cls, cut=0.1, batchsize=64):
trlist = []
telist = []
valist = []
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut)]
valist.append(subset[subset['slide'].isin(validation)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
telist.append(CPTAC)
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles = pd.concat([test_tiles, tile_ids])
for idx, row in train.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
train_tiles = pd.concat([train_tiles, tile_ids])
for idx, row in validation.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
validation_tiles = pd.concat([validation_tiles, tile_ids])
train_tiles = balance(train_tiles, cls=cls)
validation_tiles = balance(validation_tiles, cls=cls)
# No shuffle on test set
train_tiles = sku.shuffle(train_tiles)
validation_tiles = sku.shuffle(validation_tiles)
if train_tiles.shape[0] > int(batchsize * 80000 / 3):
train_tiles = train_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate training set!')
if validation_tiles.shape[0] > int(batchsize * 80000 / 30):
validation_tiles = validation_tiles.sample(int(batchsize * 80000 / 30), replace=False)
print('Truncate validation set!')
if test_tiles.shape[0] > int(batchsize * 80000 / 3):
test_tiles = test_tiles.sample(int(batchsize * 80000 / 3), replace=False)
print('Truncate test set!')
test_tiles.to_csv(path + '/te_sample.csv', header=True, index=False)
train_tiles.to_csv(path + '/tr_sample.csv', header=True, index=False)
validation_tiles.to_csv(path + '/va_sample.csv', header=True, index=False)
return train_tiles, test_tiles, validation_tiles
# seperate into training and testing; each type is the same separation ratio on big images
# test and train csv files contain tiles' path.
def set_sep(alll, path, cls, cut=0.2, batchsize=24):
trlist = []
telist = []
valist = []
TCGA = alll[alll['slide'].str.contains("TCGA")]
CPTAC = alll[~alll['slide'].str.contains("TCGA")]
for i in range(cls):
subset = TCGA.loc[TCGA['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq)*cut/2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq)*cut/2):int(len(unq)*cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq)*cut):]
trlist.append(subset[subset['slide'].isin(train)])
subset = CPTAC.loc[CPTAC['label'] == i]
unq = list(subset.slide.unique())
np.random.shuffle(unq)
validation = unq[:int(len(unq) * cut / 2)]
valist.append(subset[subset['slide'].isin(validation)])
test = unq[int(len(unq) * cut / 2):int(len(unq) * cut)]
telist.append(subset[subset['slide'].isin(test)])
train = unq[int(len(unq) * cut):]
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = pd.concat(trlist)
validation = pd.concat(valist)
test_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
train_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
validation_tiles = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for idx, row in test.iterrows():
tile_ids = paired_tile_ids_in(row['slide'], row['label'], row['path'], row['age'], row['BMI'])
test_tiles =
|
pd.concat([test_tiles, tile_ids])
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 20 14:50:39 2021
Copyright Notice
=================
Copyright 2021 National Technology and Engineering Solutions of Sandia, LLC.
Under the terms of Contract DE-NA0003525, there is a non-exclusive license
for use of this work by or on behalf of the U.S. Government.
Export of this program may require a license from the
United States Government.
Please refer to the LICENSE.md file for a full description of the license
terms for MEWS.
The license for MEWS is the Modified BSD License and copyright information
must be replicated in any derivative works that use the source code.
@author: dlvilla
MEWS = Multi-senario Extreme Weather Simulator
"""
import os
from datetime import datetime
import types
import numpy as np
import pandas as pd
from calendar import isleap
from copy import deepcopy
from mews.epw import epw
from mews.errors import EPWMissingDataFromFile, EPWFileReadFailure, EPWRepeatDateError
from mews.weather.doe2weather import DOE2Weather
import warnings
from copy import deepcopy
class Alter(object):
"""
Alter energy plus or doe2 weather files - shorter or longer than a year permitted
"""
def __init__(self,weather_file_path,replace_year=None,check_types=True,
isdoe2=False,use_exe=False,doe2_bin2txt_path=r"../third_party_software/BIN2TXT.EXE",
doe2_start_datetime=None,doe2_tz=None,doe2_hour_in_file=8760,doe2_dst=None):
"""
Alter(weather_file_path,replace_year,check_types)
This is effectively the "read" function
If replace_year causes new coincidence with leap years, then an
extra day is added (Feb 28th if repeated). If a replace_year moves
from a leap year to a non-leap year, then Feb 29th is deleted if it
is present.
Inputs
------
weather_file_path : str : a valid path to an energy plus weather file
replace_year : int : optional : default = None
If None, then leave the year in the energy plus file unaltered
If an int, then change the year to the value for all rows
check_types : bool : optional : default = True
True - check types in all functions (slow but safe)
False - do not check types (likely to get less understandable errors
if something goes wrong)
clear_alterations : optional: bool : remove previous alterations from the current
object. Reissuing a read command to an Alter object allows a new set
of weather to recieve the same alterations a previous set recieved
ONLY NEEDED IF DOE2
isdoe2 : optional: bool : read in a DOE2 weather file instead of an EPW and then
wrangle that data into an EP format for the self.epwobj database
use_exe : optional : bool : determine whether to use BIN2TXT and TXT2BIN
executables (True) or native Python to read BIN files.
doe2_bin2txt_path : optional : string : a valid path to an executable
that converts a DOE2 *.BIN weather file to an ASCII text file.
The executable is assumed to be named BIN2TXT.EXE and comes with
any distribution of DOE2 which can be obtained by forming a license
agreement with <NAME> and Associates (www.doe2.com). A folder
in mews "third_party_software" can be used to put the *.EXE
doe2_start_datetime : optional : datetime : Input the start time for
the weather file. required if isdoe2=True.
doe2_hour_in_file : optional : must be 8760 or 8784 for leap years.
required if isdoe2=True
doe2_timezone : optional : required if isdoe2=True: str : name of time
zone applicable to the doe2 file
doe2_dst : optional : required if isdoe2=True: list/tuple with 2 entries
that are datetimes with begin and end times for day light savings
time.
returns
------
Alter object
Methods
-------
add_alteration - alter weather file by adding a shape function and delta
to one weather variable
remove_alteration - undo an add_alteration via name of the alteration
reindex_2_datetime - return a dataframe that has date-time as the index
"""
obj = DOE2Weather()
self.df2bin = obj.df2bin
self.bin2df = obj.bin2df
self.read(weather_file_path,replace_year,check_types,True,isdoe2,use_exe,
doe2_bin2txt_path,doe2_start_datetime,doe2_tz,
doe2_hour_in_file,doe2_dst)
def _leap_year_replacements(self,df,year,isdoe2):
Feb28 = df[(df["Day"] == 28) & (df["Month"] == 2) & (df["Year"]==year)]
Mar1 = df[(df["Month"] == 3) & (df["Day"] == 1) & (df["Year"]==year)]
Feb29 = df[(df["Month"] == 2) & (df["Day"] == 29) & (df["Year"]==year)]
hasFeb29 = len(Feb29) > 0
hasMar1 = len(Mar1) > 0
hasFeb28 = len(Feb28) > 0
if isleap(year) and (not hasFeb29) and (hasMar1 and hasFeb28):
# add Feb29 by replicating Feb28th
indFeb28 = Feb28.index[-1]
sdict = {}
for name,col in Feb28.iteritems():
col.index = np.arange(indFeb28+1,indFeb28+25)
sdict[name] = col
Feb29 = pd.DataFrame(sdict)
Feb29["Day"] = 29
sdict = {}
for name,col in df.loc[indFeb28+1:].iteritems():
col.index = np.arange(indFeb28+25,
indFeb28+25 + (df.index[-1] - indFeb28))
sdict[name] = col
restOfYear = pd.DataFrame(sdict)
df_new = pd.concat([df.loc[0:indFeb28],Feb29,restOfYear])
elif not isleap(year) and hasFeb29:
df_new = df.drop(Feb29.index)
else:
df_new = df
if isdoe2:
# keep custom attributes like "headers" that were added elsewhere
df_new.headers = df.headers
return df_new
def _init_check_inputs(self,replace_year,check_types):
if not isinstance(check_types,bool):
raise TypeError("'check_types' input must be a boolean!")
if check_types:
if isinstance(replace_year, int):
if replace_year < 0:
positive_message = "'replace_year' must be positive"
raise ValueError(positive_message)
elif replace_year is None:
pass
else:
raise TypeError("'replace_year' must be a positive integer or None")
def __str__(self):
wfp_str = ""
alt_str = "None"
if hasattr(self,'alterations'):
if len(self.alterations) > 0:
alt_str = str(self.alterations)
if hasattr(self,'wfp'):
wfp_str = self.wfp
return ("mews.weather.alter.Alter: \n\nWeather for: \n\n'{0}'\n\nwith".format(wfp_str)
+" alterations:\n\n{0}".format(alt_str))
def _get_date_ind(self,year=None,month=None,day=None,hour=None,num=None):
"""
Query for year, month, day, hour. If an input is not provided
query for any value of that entry
"""
df = self.epwobj.dataframe
all_true = df["Year"] == df["Year"]
if year is None:
yr_comparison = all_true
else:
yr_comparison = df["Year"] == year
if month is None:
mo_comparison = all_true
else:
mo_comparison = df["Month"] == month
if day is None:
day_comparison = all_true
else:
day_comparison = df["Day"] == day
if hour is None:
hr_comparison = all_true
else:
hr_comparison = df["Hour"] == hour
ind_list = df[yr_comparison & mo_comparison & day_comparison
& hr_comparison].index
if num is None:
return ind_list
else:
return ind_list[:num]
def add_alteration(self,
year,
day,
month,
hour,
duration,
peak_delta,
shape_func=lambda x: np.sin(x*np.pi),
column='Dry Bulb Temperature',
alteration_name=None,
averaging_steps=1):
"""
add_alteration(year, day, month, hour, duration, peak_delta, shape_func, column):
Inputs
------
year : int : year of start date
day : int : day of month on which the heat wave starts
month : int : month of year on which the heat wave starts
hour : int : hour of day on which the heat wave starts (1-24)
duration : int : number of hours that the heat wave lasts. if = -1
then the change is applied to the end of the weather file
peak_delta : float : peak value change from original weather
at "shape_func" maximum
shape_func : function|list|tuple|np.array : a function or array
whose range interval [0,1] will be mapped to [0,duration] in hours.
The function will be normalized to have a peak of 1 for its
peak value over [0,1]. For example, a sine function could be
lambda x: sin(pi*x). This shape is applied in adding the heat wave.
from the start time to duration_hours later. If the input is an
array, it must have 'duration' number of entries.
column : str : optional : default = 'Dry Bulb Temperature'
must be an entry in the column names of the energy plus weather
file.
alteration_name : any type that can be a key for dict : optional :
Default = None:
If none then name is "Alteration X" where X is the current
number of alterations + 1.
If any other type, it must not be a repeat of previously
added alterations.
averaging_steps : int : optional Default = 1
The number of steps to average the weather signal over when
adding the heat wave. For example, if heat wave statistics
come from daily data, then additions need to be made w/r to
the daily average and this should be 24
Outputs
-------
None - internal epwobj is altered use "write" to write the result.
"""
df = self.epwobj.dataframe
# special handling
if alteration_name is None:
num_alterations = len(self.alterations)
alteration_name = "Alteration {0:d}".format(num_alterations + 1)
if duration == -1:
duration = len(df) - self._get_date_ind(year,month,day,hour)[-1]
## TYPE CHECKING
if self.check_types:
try:
start_date = datetime(year,month,day,hour)
except ValueError:
raise ValueError("hour must be in 0 .. 23")
if duration < 0:
raise ValueError("The 'duration' input must be positive and less than a year (in hours)")
correct_type_found = True
if not isinstance(shape_func,(types.FunctionType)):
if isinstance(shape_func,list):
shape_func = np.array(shape_func)
if isinstance(shape_func,np.ndarray):
num = len(shape_func)
if num != duration:
raise ValueError("If the shape_func is provided as a "
+"list, it must have length of the"
+" 'duration' input")
else:
correct_type_found = False
if not correct_type_found:
raise TypeError("The shape_func must be of type 'function','list', or 'numpy.ndarray'")
if not column in df.columns:
raise ValueError("The 'column' input must be within the list:" + str(df.columns))
if alteration_name in self.alterations:
raise ValueError("The 'alteration_name' {} is already taken!".format(alteration_name))
# END TYPE CHECKING
if self.base1:
hourin = hour + 1
else:
hourin = hour
bind_list = self._get_date_ind(year,month,day,hourin)
if len(bind_list) > 1:
raise EPWRepeatDateError("The date: {0} has a repeat entry!".format(
str(datetime(year,month,day,hour))))
elif len(bind_list) == 0:
raise Exception("The requested alteration dates are outside the"+
" range of weather data in the current data!")
else:
bind = bind_list[0]
eind = bind + duration
if eind > len(df)+1: # remember eind is not included in the range.
raise pd.errors.OutOfBoundsTimedelta("The specified start time and"
+"duration exceed the bounds"
+"of the weather file's data!")
if isinstance(shape_func,types.FunctionType):
func_values = np.array([shape_func(x/duration) for x in range(duration)])
else:
func_values = shape_func
extremum = max(abs(func_values.max()),abs(func_values.min()))
if extremum == 0:
raise ZeroDivisionError("The shape_func input has an extremum of"
+" zero. This can occur for very short"
+" alterations where the shape function"
+" begins and ends with zero or by"
+" passing all zeros to a shape function.")
else:
normalized_func_values = peak_delta * np.abs(func_values) / extremum
addseg = pd.DataFrame(normalized_func_values,index=range(bind,eind),columns=[column])
if averaging_steps > 1:
#TODO - this needs to be moved elsewhere. You have a lot of work
# to do to add daily average based heat waves correctly.
df_avg = df.loc[:,column].rolling(window=averaging_steps).mean()
# assure first steps have numeric values
df_avg.iloc[:averaging_steps] = df.loc[:,column].iloc[:averaging_steps]
df_diff = df.loc[bind:eind-1,column] - df_avg.loc[bind:eind-1]
if addseg.sum().values[0] < 0:
# cold snap
scale = ((addseg.min() - df_diff.min())/addseg.min()).values[0]
else:
# heat wave
if addseg.max()[0] <= 0:
scale = 0.0
else:
scale = ((addseg.max() - df_diff.max())/addseg.max()).values[0]
if scale > 0:
addsegmod = addseg * scale
else:
addsegmod = addseg * 0.0
else:
addsegmod = addseg
#df_org = deepcopy(df.loc[bind:eind-1,column])
df.loc[bind:eind-1,column] = df.loc[bind:eind-1,column] + addsegmod.loc[bind:eind-1,column]
self.alterations[alteration_name] = addseg
def read(self,weather_file_path,replace_year=None,check_types=True,
clear_alterations=False,isdoe2=False,use_exe=False,doe2_bin2txt_path=r"../third_party_software/BIN2TXT.EXE",
doe2_start_datetime=None,doe2_tz=None,doe2_hour_in_file=8760,doe2_dst=None):
"""
read a new Energy Plus Weather (epw) file (or doe2 *.bin) while optionally
keeping previously added alterations in obj.alterations
obj.read(weather_file_path,replace_year=None,check_types=True,
clear_alterations=False,isdoe2=False,
use_exe=False,doe2_bin2txt_path=r"../third_party_software/BIN2TXT.EXE",
doe2_start_datetime=None,doe2_tz=None,doe2_hour_in_file=8760,doe2_dst=None)
The input has three valid modes:
Energy Plus: only input weather_file_path and optionally:
replace_year, check_types, and clear_alterations
DOE2 native python: All Energy Plus AND: isdoe2=True:
use_exe=False (default) all other inputs are optional:
DOE2 using exe: All inputs (even optional ones) are REQUIRED
except doe2_bin2txt_path, replace_year, check_types, and
clear_alterations remain optional.
Once you have used one mode, there is no crossing over to another mode
Warning: This function resets the entire object and is equivalent
to creating a new object except that the previously entered
alterations are left intact. This allows for these altertions
to be applied in proportionately the same positions for a new
weather history.
If replace_year causes new coincidence with leap years, then an
extra day is added (Feb 28th if repeated). If a replace_year moves
from a leap year to a non-leap year, then Feb 29th is deleted if it
is present.
Inputs
------
weather_file_path : str : a valid path to an energy plus weather file
or, if isdoe2=True, then to a DOE2 bin weather file. Many additioanl
inputs are needed for the doe2 option.
replace_year : optional : int : optional - required if isdoe2=True : default = None
If None, then leave the year in the energy plus file unaltered
If an int, then change the year to the value for all rows
If a tup, then first entry is the begin year and second is the hour
at which to change to the next year.
This is useful to give TMY or other multi-year compilations a single
year within a scenario history.
check_types : optional: bool : optional : default = True
True - check types in all functions (slower but safer)
False - do not check types (likely to get less understandable errors
if something goes wrong)
clear_alterations : optional: bool : remove previous alterations from the current
object. Reissuing a read command to an Alter object allows a new set
of weather to recieve the same alterations a previous set recieved
isdoe2 : optional: bool : read in a DOE2 weather file instead of an EPW and then
wrangle that data into an EP format for the self.epwobj database
use_exe : optional : bool : True = Use BIN2TXT.EXE to read DOE-2 BIN file
False = Use Python to read DOE-2 BIN (PREFERRED).
doe2_bin2txt_path : optional : string : a valid path to an executable
that converts a DOE2 *.BIN weather file to an ASCII text file.
The executable is assumed to be named BIN2TXT.EXE and comes with
any distribution of DOE2 which can be obtained by forming a license
agreement with <NAME> and Associates (www.doe2.com). A folder
in mews "third_party_software" can be used to put the *.EXE
doe2_start_datetime : optional : datetime : Input the start time for
the weather file. if not entered, then the value in the BIN file
is used.
doe2_hour_in_file : optional : must be 8760 or 8784 for leap years.
required if isdoe2=True. This allows a non-leap year to be forced
into a leap year for consistency. Feb28th is just repeated for such
cases.
doe2_timezone : optional : required if isdoe2=True: str : name of time
zone applicable to the doe2 file
doe2_dst : optional : required if isdoe2=True: list/tuple with 2 entries
that are datetimes with begin and end times for day light savings
time.
returns
------
None - obj.epwobj has a new dataset afterwards.
"""
epwobj = epw()
self.epwobj = epwobj
self.isdoe2 = isdoe2
if isdoe2:
if doe2_bin2txt_path == r"../third_party_software/BIN2TXT.EXE":
doe2_bin2txt_path = os.path.join(os.path.dirname(__file__),doe2_bin2txt_path)
self._doe2_check_types(check_types,weather_file_path,doe2_start_datetime, doe2_hour_in_file,
doe2_bin2txt_path,doe2_tz,doe2_dst,use_exe)
df = self.bin2df(weather_file_path,doe2_start_datetime, doe2_hour_in_file,
doe2_bin2txt_path,doe2_tz,doe2_dst,use_exe)
# add Year column which is expected by the routine
df["Year"] = int(replace_year)
df["Month"] = df["MONTH (1-12)"].astype(int)
df["Day"] = df["DAY OF MONTH"].astype(int)
df["Hour"] = df["HOUR OF DAY"].astype(int)
df["Date"] = df.index
df.index = pd.RangeIndex(start=0,stop=len(df.index))
# this must be done now as well as later to keep the code
# consistent.
epwobj.dataframe = df
else:
try:
epwobj.read(weather_file_path)
except UnicodeDecodeError:
raise EPWFileReadFailure("The file '{0}' was not read successfully "
+"by the epw package it is corrupt or "
+"the wrong format!".format(weather_file_path))
except FileNotFoundError as fileerror:
raise fileerror
except:
raise EPWFileReadFailure("The file '{0}' was not read successfully "
+"by the epw package for an unknown "
+"reason!".format(weather_file_path))
df = epwobj.dataframe
# verify no NaN's
if df.isna().sum().sum() != 0:
raise EPWMissingDataFromFile("NaN's are present after reading the "
+"weather file. Only fully populated "
+"data sets are allowed!")
self._init_check_inputs(replace_year, check_types)
if not replace_year is None:
# Prepare for leap year alterations
new_year_ind = self._get_date_ind(month=1,day=1,hour=1)
if len(new_year_ind) == 0:
df["Year"] = replace_year
df = self._leap_year_replacements(df, replace_year, isdoe2)
else:
# ADD A START POINT FOR THE REPLACE YEAR IF THE FILE DOES NOT BEGIN WITH
# JAN 1ST
if new_year_ind[0] != 0:
new_year_ind = new_year_ind.insert(0,0)
# loop over years.
for idx, ind in enumerate(new_year_ind):
if idx < len(new_year_ind) - 1:
df.loc[ind:new_year_ind[idx+1],"Year"] = replace_year + idx
else:
df.loc[ind:,"Year"] = replace_year + idx
# This has to be done separately or the new_year_ind will be
# knocked out of place.
for idx, ind in enumerate(new_year_ind):
df = self._leap_year_replacements(df, replace_year + idx, isdoe2)
epwobj.dataframe = df
epwobj.original_dataframe = deepcopy(df)
self.check_types = check_types
self.wfp = weather_file_path
if (hasattr(self,'alterations') and clear_alterations) or not hasattr(self,'alterations'):
self.alterations = {} # provides a registry of alterations
#TODO -bring WNTR registry class into this tool.
else:
for name,alteration in self.alterations.items():
for col in alteration.columns:
common_ind = alteration.index.intersection(df.index)
if len(common_ind) == 0:
UserWarning("None of the alteration '{0}' intersects"
+" with the newly read-in epw file!")
else:
if len(common_ind) != len(alteration.index):
UserWarning("Only some of the alteration '{0}' intersects"
+ "with the newly read-in epw file!")
df.loc[common_ind,col] = (df.loc[common_ind,col] +
alteration.loc[common_ind,col])
first_hour_val = np.unique(self.epwobj.dataframe["Hour"].values)[0]
if first_hour_val == 1 or first_hour_val == 0:
self.base1 = bool(first_hour_val)
else:
# in the rare case that a file does not even have 24 hours assume
# that it is base 1 (i.e. 1..24 rather than 0..23)
self.base1 = True
def _check_string_path(self,string_path):
if isinstance(string_path,str):
if not os.path.exists(string_path):
raise FileNotFoundError("The path "+string_path+
" does not exist!")
else:
raise TypeError("The input 'weather_file_path' must be a string!")
def _doe2_check_types(self,check_types,weather_file_path,doe2_start_datetime,
doe2_hour_in_file,doe2_bin2txt_path,
doe2_tz,doe2_dst,use_exe):
if check_types:
self._check_string_path(weather_file_path)
if isinstance(weather_file_path,str):
if not os.path.exists(weather_file_path):
raise FileNotFoundError("The path "+weather_file_path+
" does not exist!")
else:
raise TypeError("The input 'weather_file_path' must be a string!")
if use_exe:
self._check_string_path(doe2_bin2txt_path)
if not isinstance(doe2_start_datetime,datetime):
raise TypeError("The input 'doe2_start_datetime' must be a datetime object!")
if doe2_hour_in_file != 8760 and doe2_hour_in_file != 8784:
raise ValueError("The input 'doe2_hour_in_file' must be an "+
"integer of value 8760 for normal years or"+
" 8784 for leap years")
if not isinstance(doe2_tz,str):
raise TypeError("The input 'doe2_tz' must be a string!")
if not isinstance(doe2_dst,(list,tuple)):
raise TypeError("The input 'doe2_dst' must be a list or tuple of 2-elements")
if len(doe2_dst) != 2:
raise ValueError("The input 'doe2_dst' must have 2-elements")
if not isinstance(doe2_dst[0],datetime) or not isinstance(doe2_dst[1],datetime):
raise TypeError("The input 'doe2_dst' must have 2-elements that are datetime objects!")
def remove_alteration(self,alteration_name):
"""
Remove an alteration that has already been added
obj.remove_alteration(alteration_name)
Parameters
----------
alteration_name : str
a name that must exist in the obj.alterations already added
Returns
-------
None.
"""
if alteration_name in self.alterations:
df = self.epwobj.dataframe
addseg = self.alterations.pop(alteration_name) # pop returns and removes
column = addseg.columns[0]
bind = addseg.index[0]
eind = addseg.index[-1]
df.loc[bind:eind,column] = df.loc[bind:eind,column] - addseg.loc[bind:eind,column]
else:
raise ValueError("The alteration {0} does not exist. Valid"
+" alterations names are:\n\n{1}"
.format(alteration_name,str(self.alterations)))
def reindex_2_datetime(self,tzname=None,original=False):
"""
obj.reindex_2_datetime(tzname=None)
Parameters
----------
tzname : str : optional : a valid time zone name
Default - None - keep data time zone naive
Returns
-------
df_out - Dataframe with a DatetimeIndex index and weather data.
"""
if original:
df = self.epwobj.original_dataframe
else:
df = self.epwobj.dataframe
begin_end_times = [datetime(df["Year"].values[ind], df["Month"].values[ind],
df["Day"].values[ind], df["Hour"].values[ind]-1) for ind in [0,-1]]
df_out = deepcopy(df)
datind =
|
pd.date_range(begin_end_times[0],begin_end_times[1], freq='H',tz=tzname)
|
pandas.date_range
|
# Copyright 2019-2020 The Lux Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .context import lux
import pytest
import pandas as pd
import numpy as np
from lux.interestingness.interestingness import interestingness
# The following test cases are labelled for vis with <Ndim, Nmsr, Nfilter>
def test_interestingness_1_0_0(global_var):
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent([lux.Clause(attribute="Origin")])
df._ipython_display_()
# check that top recommended enhance graph score is not none and that ordering makes intuitive sense
assert interestingness(df.recommendation["Enhance"][0], df) != None
rank1 = -1
rank2 = -1
rank3 = -1
for f in range(0, len(df.recommendation["Enhance"])):
vis = df.recommendation["Enhance"][f]
if vis.get_attr_by_channel("x")[0].attribute == "Displacement":
rank1 = f
if vis.get_attr_by_channel("x")[0].attribute == "Weight":
rank2 = f
if vis.get_attr_by_channel("x")[0].attribute == "Acceleration":
rank3 = f
assert rank1 < rank2 and rank1 < rank3 and rank2 < rank3
# check that top recommended filter graph score is not none and that ordering makes intuitive sense
assert interestingness(df.recommendation["Filter"][0], df) != None
rank1 = -1
rank2 = -1
rank3 = -1
for f in range(0, len(df.recommendation["Filter"])):
vis = df.recommendation["Filter"][f]
if len(vis.get_attr_by_attr_name("Cylinders")) > 0:
if int(vis._inferred_intent[2].value) == 8:
rank1 = f
if int(vis._inferred_intent[2].value) == 6:
rank3 = f
if "ford" in str(df.recommendation["Filter"][f]._inferred_intent[2].value):
rank2 = f
assert rank1 < rank2 and rank1 < rank3 and rank2 < rank3
df.clear_intent()
def test_interestingness_1_0_1(global_var):
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent(
[
lux.Clause(attribute="Origin", filter_op="=", value="USA"),
lux.Clause(attribute="Cylinders"),
]
)
df._ipython_display_()
assert df.current_vis[0].score == 0
df.clear_intent()
def test_interestingness_0_1_0(global_var):
df = pytest.car_df
df["Year"] = pd.to_datetime(df["Year"], format="%Y")
df.set_intent([lux.Clause(attribute="Horsepower")])
df._ipython_display_()
# check that top recommended enhance graph score is not none and that ordering makes intuitive sense
assert interestingness(df.recommendation["Enhance"][0], df) != None
rank1 = -1
rank2 = -1
rank3 = -1
for f in range(0, len(df.recommendation["Enhance"])):
if (
df.recommendation["Enhance"][f].mark == "scatter"
and df.recommendation["Enhance"][f]._inferred_intent[1].attribute == "Weight"
):
rank1 = f
if (
df.recommendation["Enhance"][f].mark == "scatter"
and df.recommendation["Enhance"][f]._inferred_intent[1].attribute == "Acceleration"
):
rank2 = f
if (
df.recommendation["Enhance"][f].mark == "line"
and df.recommendation["Enhance"][f]._inferred_intent[0].attribute == "Year"
):
rank3 = f
assert rank1 < rank2 and rank1 < rank3 and rank2 < rank3
# check that top recommended filter graph score is not none and that ordering makes intuitive sense
assert interestingness(df.recommendation["Filter"][0], df) != None
rank1 = -1
rank2 = -1
rank3 = -1
for f in range(0, len(df.recommendation["Filter"])):
if df.recommendation["Filter"][f]._inferred_intent[2].value == 4:
rank1 = f
if str(df.recommendation["Filter"][f]._inferred_intent[2].value) == "Europe":
rank2 = f
if "1970" in str(df.recommendation["Filter"][f]._inferred_intent[2].value):
rank3 = f
assert rank1 < rank2 and rank1 < rank3 and rank2 < rank3
df.clear_intent()
def test_interestingness_0_1_1(global_var):
df = pytest.car_df
df["Year"] =
|
pd.to_datetime(df["Year"], format="%Y")
|
pandas.to_datetime
|
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=
|
pd.read_csv(DataSetName[4],index_col=0,header=0)
|
pandas.read_csv
|
from audit_scores import get_urls
import requests
import pandas as pd
final_urls = get_urls()[1:]
scores = {key: [] for key in ["Country","URL","Score"]}
key = key = 'INSERT KEY'
for url in final_urls:
api_url = f"https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url={url}&key={key}"
response = requests.get(api_url)
response = response.json()
try:
overall_score = (
response["lighthouseResult"]["categories"]["performance"]["score"] * 100
)
except (KeyError, TypeError):
# print("\nError \n" + url)
continue
# print("\n" + url + "\n" + str(overall_score))
scores['Score'].append(overall_score)
scores['URL'].append(url)
df =
|
pd.DataFrame(scores)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas
import numpy
import sys
import unittest
from datetime import datetime
from pandas.testing import assert_frame_equal, assert_series_equal
import os
import copy
sys.path.append("..")
import warnings
import nPYc
from nPYc.enumerations import SampleType
from nPYc.enumerations import AssayRole
from nPYc.enumerations import VariableType
from generateTestDataset import generateTestDataset
import tempfile
from isatools import isatab
class test_msdataset_synthetic(unittest.TestCase):
"""
Test MSDataset object functions with synthetic data
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata = pandas.DataFrame(
{'Sample File Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'Sample Base Name': ['Unittest_file_001', 'Unittest_file_002', 'Unittest_file_003'],
'AssayRole': [AssayRole.Assay, AssayRole.PrecisionReference, AssayRole.PrecisionReference],
'SampleType': [SampleType.StudySample, SampleType.StudyPool, SampleType.ExternalReference],
'Sample Name': ['Sample1', 'Sample2', 'Sample3'], 'Acqu Date': ['26-May-17', '26-May-17', '26-May-17'],
'Acqu Time': ['16:42:57', '16:58:49', '17:14:41'], 'Vial': ['1:A,1', '1:A,2', '1:A,3'],
'Instrument': ['XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest', 'XEVO-TOF#UnitTest'],
'Acquired Time': [datetime(2017, 5, 26, 16, 42, 57), datetime(2017, 5, 26, 16, 58, 49),
datetime(2017, 5, 26, 17, 14, 41)], 'Run Order': [0, 1, 2], 'Batch': [1, 1, 2],
'Correction Batch': [numpy.nan, 1, 2], 'Matrix': ['U', 'U', 'U'],
'Subject ID': ['subject1', 'subject1', 'subject2'], 'Sample ID': ['sample1', 'sample2', 'sample3'],
'Dilution': [numpy.nan, '60.0', '100.0'],'Exclusion Details': ['','','']})
self.msData.featureMetadata = pandas.DataFrame(
{'Feature Name': ['Feature1', 'Feature2', 'Feature3'], 'Retention Time': [6.2449, 2.7565, 5.0564],
'm/z': [249.124281, 381.433191, 471.132083]})
self.msData.featureMetadata['Exclusion Details'] = None
self.msData.featureMetadata['User Excluded'] = False
self.msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.msData.featureMetadata.index)
self.msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.msData.featureMetadata.index)
self.msData._intensityData = numpy.array([[10.2, 20.95, 30.37], [10.1, 20.03, 30.74], [3.065, 15.83, 30.16]])
# Attributes
self.msData.Attributes['FeatureExtractionSoftware'] = 'UnitTestSoftware'
# excluded data
self.msData.sampleMetadataExcluded = []
self.msData.intensityDataExcluded = []
self.msData.featureMetadataExcluded = []
self.msData.excludedFlag = []
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[0, :])
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata)
self.msData.excludedFlag.append('Samples')
self.msData.featureMetadataExcluded.append(self.msData.featureMetadata[[True, False, False]])
self.msData.intensityDataExcluded.append(self.msData._intensityData[:, 0])
self.msData.sampleMetadataExcluded.append(self.msData.sampleMetadata)
self.msData.excludedFlag.append('Features')
# finish
self.msData.VariableType = VariableType.Discrete
self.msData.initialiseMasks()
def test_rsd_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='No reference samples'):
msData.sampleMetadata = pandas.DataFrame(None)
with self.assertRaises(ValueError):
msData.rsdSP
with self.subTest(msg='Only one reference sample'):
msData.sampleMetadata = pandas.DataFrame([[nPYc.enumerations.AssayRole.PrecisionReference, nPYc.enumerations.SampleType.StudyPool]], columns=['AssayRole', 'SampleType'])
with self.assertRaises(ValueError):
msData.rsdSP
def test_getsamplemetadatafromfilename(self):
"""
Test we are parsing NPC MS filenames correctly (PCSOP.081).
"""
# Create an empty object with simple filenames
msData = nPYc.MSDataset('', fileType='empty')
msData.sampleMetadata['Sample File Name'] = ['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02_x',
'Test2_RPOS_ToF02_U2W03_b',
'Test3_RNEG_ToF03_S3W04_2',
'Test4_RPOS_ToF04_B1S1_SR_q',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01_9',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21']
msData._getSampleMetadataFromFilename(msData.Attributes['filenameSpec'])
##
# Check basename
##
basename = pandas.Series(['Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_LPOS_ToF04_P4W05_LTR',
'Test5_LNEG_ToF05_U5W06_SR',
'Test6_HPOS_ToF06_S4W05_MR',
'Test1_HPOS_ToF01_P1W02',
'Test2_RPOS_ToF02_U2W03',
'Test3_RNEG_ToF03_S3W04',
'Test4_RPOS_ToF04_B1S1_SR',
'Test5_LPOS_ToF05_B2E2_SR',
'Test6_LNEG_ToF06_B3SRD01',
'Test1_HPOS_ToF06_Blank01',
'Test1_HPOS_ToF06_IC02',
'Test1_HPOS_ToF06_EIC21'],
name='Sample Base Name',
dtype='str')
assert_series_equal(msData.sampleMetadata['Sample Base Name'], basename)
##
# Check Study
##
study = pandas.Series(['Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test2',
'Test3',
'Test4',
'Test5',
'Test6',
'Test1',
'Test1',
'Test1'],
name='Study',
dtype='str')
assert_series_equal(msData.sampleMetadata['Study'], study)
##
#
##
chromatography = pandas.Series(['H',
'R',
'R',
'L',
'L',
'H',
'H',
'R',
'R',
'R',
'L',
'L',
'H',
'H',
'H'],
name='Chromatography',
dtype='str')
assert_series_equal(msData.sampleMetadata['Chromatography'], chromatography)
##
#
##
ionisation = pandas.Series(['POS',
'POS',
'NEG',
'POS',
'NEG',
'POS',
'POS',
'POS',
'NEG',
'POS',
'POS',
'NEG',
'POS',
'POS',
'POS'],
name='Ionisation',
dtype='str')
assert_series_equal(msData.sampleMetadata['Ionisation'], ionisation)
##
#
##
instrument = pandas.Series(['ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF01',
'ToF02',
'ToF03',
'ToF04',
'ToF05',
'ToF06',
'ToF06',
'ToF06',
'ToF06'],
name='Instrument',
dtype='str')
assert_series_equal(msData.sampleMetadata['Instrument'], instrument)
##
#
##
reRun = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'b',
'',
'q',
'',
'',
'',
'',
''],
name='Re-Run',
dtype='str')
assert_series_equal(msData.sampleMetadata['Re-Run'], reRun)
##
#
##
suplemental = pandas.Series(['',
'',
'',
'',
'',
'',
'',
'',
'2',
'',
'',
'9',
'',
'',
''],
name='Suplemental Injections',
dtype='str')
assert_series_equal(msData.sampleMetadata['Suplemental Injections'], suplemental)
##
#
##
skipped = pandas.Series([False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
False,
False],
name='Skipped',
dtype='bool')
assert_series_equal(msData.sampleMetadata['Skipped'], skipped)
##
#
##
matrix = pandas.Series(['P',
'U',
'S',
'P',
'U',
'S',
'P',
'U',
'S',
'',
'',
'',
'',
'',
''],
name='Matrix',
dtype='str')
assert_series_equal(msData.sampleMetadata['Matrix'], matrix)
##
#
##
well = pandas.Series([2,
3,
4,
5,
6,
5,
2,
3,
4,
1,
2,
1,
-1,
-1,
-1],
name='Well',
dtype='int')
assert_series_equal(msData.sampleMetadata['Well'], well, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Well'].dtype.kind, well.dtype.kind)
##
#
##
plate = pandas.Series([1,
2,
3,
4,
5,
4,
1,
2,
3,
1,
2,
3,
1,
2,
21],
name='Plate',
dtype='int')
assert_series_equal(msData.sampleMetadata['Plate'], plate, check_dtype=False)
self.assertEqual(msData.sampleMetadata['Plate'].dtype.kind, well.dtype.kind)
##
#
##
batch = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
2.0,
3.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Batch',
dtype='float')
assert_series_equal(msData.sampleMetadata['Batch'], batch)
##
#
##
dilution = pandas.Series([numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
numpy.nan,
1.0,
numpy.nan,
numpy.nan,
numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(msData.sampleMetadata['Dilution'], dilution)
##
#
##
assayRole = pandas.Series([AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.Assay,
AssayRole.Assay],
name='AssayRole',
dtype=object)
assert_series_equal(msData.sampleMetadata['AssayRole'], assayRole)
##
#
##
sampleType = pandas.Series([SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.StudyPool,
SampleType.MethodReference,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.ProceduralBlank,
SampleType.StudyPool,
SampleType.StudyPool],
name='SampleType',
dtype=object)
assert_series_equal(msData.sampleMetadata['SampleType'], sampleType)
def test_updateMasks_features(self):
msData = nPYc.MSDataset('', fileType='empty')
msData.Attributes['artifactualFilter'] = True
##
# Variables:
# Good Corr, Good RSD
# Poor Corr, Good RSD
# Good Corr, Poor RSD
# Poor Corr, Poor RSD
# Good Corr, Good RSD, below blank
##
msData.intensityData = numpy.array([[100, 23, 99, 51, 100],
[90, 54, 91, 88, 91],
[50, 34, 48, 77, 49],
[10, 66, 11, 56, 11],
[1, 12, 2, 81, 2],
[50, 51, 2, 12, 49],
[51, 47, 1, 100, 50],
[47, 50, 70, 21, 48],
[51, 49, 77, 91, 50],
[48, 49, 12, 2, 49],
[50, 48, 81, 2, 51],
[54, 53, 121, 52, 53],
[57, 49, 15, 51, 56],
[140, 41, 97, 47, 137],
[52, 60, 42, 60, 48],
[12, 48, 8, 56, 12],
[1, 2, 1, 1.21, 51],
[2, 1, 1.3, 1.3, 63]],
dtype=float)
msData.sampleMetadata = pandas.DataFrame(data=[[100, 1, 1, 1, AssayRole.LinearityReference, SampleType.StudyPool],
[90, 1, 1, 2, AssayRole.LinearityReference, SampleType.StudyPool],
[50, 1, 1, 3, AssayRole.LinearityReference, SampleType.StudyPool],
[10, 1, 1, 4, AssayRole.LinearityReference, SampleType.StudyPool],
[1, 1, 1, 5, AssayRole.LinearityReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.PrecisionReference, SampleType.StudyPool],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[numpy.nan, 1, 1, 1, AssayRole.Assay, SampleType.StudySample],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank],
[0, 1, 1, 1, AssayRole.Assay, SampleType.ProceduralBlank]],
columns=['Dilution', 'Batch', 'Correction Batch', 'Well', 'AssayRole', 'SampleType'])
msData.featureMetadata = pandas.DataFrame(data=[['Feature_1', 0.5, 100., 0.3],
['Feature_2', 0.55, 100.04, 0.3],
['Feature_3', 0.75, 200., 0.1],
['Feature_4', 0.9, 300., 0.1],
['Feature_5', 0.95, 300.08, 0.1]],
columns=['Feature Name','Retention Time','m/z','Peak Width'])
msData.featureMetadata['Exclusion Details'] = None
msData.featureMetadata['User Excluded'] = False
msData.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=msData.featureMetadata.index)
msData.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=msData.featureMetadata.index)
msData.initialiseMasks()
with self.subTest(msg='Default Parameters'):
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax RSD threshold'):
expectedFeatureMask = numpy.array([True, False, True, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=90, varianceRatio=0.1, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax correlation threshold'):
expectedFeatureMask = numpy.array([True, True, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter': True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=1.1, corrThreshold=0))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='High variance ratio'):
expectedFeatureMask = numpy.array([False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(rsdThreshold=30, varianceRatio=100, corrThreshold=0.7))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Lax blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':True}, **dict(blankThreshold=0.5))
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='No blank filter'):
expectedFeatureMask = numpy.array([True, False, False, False, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': False,'blankFilter':False})
numpy.testing.assert_array_equal(expectedFeatureMask, msData.featureMask)
with self.subTest(msg='Default withArtifactualFiltering'):
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1],[3,4]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter':True, 'correlationToDilutionFilter':True, 'varianceRatioFilter':True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData._tempArtifactualLinkageMatrix)
with self.subTest(msg='Altered withArtifactualFiltering parameters'):
expectedArtifactualLinkageMatrix = pandas.DataFrame(data=[[0,1]],columns=['node1','node2'])
msData.initialiseMasks()
msData.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True}, **dict(deltaMzArtifactual=300,
overlapThresholdArtifactual=0.1,
corrThresholdArtifactual=0.2))
self.assertEqual(msData.Attributes['filterParameters']['deltaMzArtifactual'], 300)
self.assertEqual(msData.Attributes['filterParameters']['overlapThresholdArtifactual'], 0.1)
self.assertEqual(msData.Attributes['filterParameters']['corrThresholdArtifactual'], 0.2)
assert_frame_equal(expectedArtifactualLinkageMatrix, msData._artifactualLinkageMatrix)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=False'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = False
expectedFeatureMask = numpy.array([True, False, False, False, False], dtype=bool)
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': False, 'blankFilter': True})
numpy.testing.assert_array_equal(expectedFeatureMask, msData2.featureMask)
with self.subTest(msg='withArtifactualFiltering=None, Attribute[artifactualFilter]=True'):
msData2 = copy.deepcopy(msData)
msData2.Attributes['artifactualFilter'] = True
expectedTempArtifactualLinkageMatrix = pandas.DataFrame(data=[[0, 1], [3, 4]], columns=['node1', 'node2'])
msData2.initialiseMasks()
msData2.updateMasks(featureFilters={'rsdFilter': True, 'correlationToDilutionFilter': True, 'varianceRatioFilter': True,
'artifactualFilter': True,'blankFilter':True})
assert_frame_equal(expectedTempArtifactualLinkageMatrix, msData2._tempArtifactualLinkageMatrix)
def test_updateMasks_samples(self):
from nPYc.enumerations import VariableType, DatasetLevel, AssayRole, SampleType
msData = nPYc.MSDataset('', fileType='empty')
msData.intensityData = numpy.zeros([18, 5],dtype=float)
msData.sampleMetadata['AssayRole'] = pandas.Series([AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.LinearityReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.Assay,
AssayRole.PrecisionReference,
AssayRole.PrecisionReference],
name='AssayRole',
dtype=object)
msData.sampleMetadata['SampleType'] = pandas.Series([SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudyPool,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.StudySample,
SampleType.ExternalReference,
SampleType.MethodReference],
name='SampleType',
dtype=object)
with self.subTest(msg='Default Parameters'):
expectedSampleMask = numpy.array([True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False)
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export SP and ER'):
expectedSampleMask = numpy.array([False, False, False, False, False, True, True, True, True, True, True, False, False, False, False, False, True, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool, SampleType.ExternalReference],
assayRoles=[AssayRole.PrecisionReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
with self.subTest(msg='Export Dilution Samples only'):
expectedSampleMask = numpy.array([True, True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, False, False], dtype=bool)
msData.initialiseMasks()
msData.updateMasks(withArtifactualFiltering=False, filterFeatures=False,
sampleTypes=[SampleType.StudyPool],
assayRoles=[AssayRole.LinearityReference])
numpy.testing.assert_array_equal(expectedSampleMask, msData.sampleMask)
def test_updateMasks_raises(self):
msData = nPYc.MSDataset('', fileType='empty')
with self.subTest(msg='Correlation'):
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=-1.01))
self.assertRaises(ValueError, msData.updateMasks, **dict(corrThreshold=1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(corrThreshold='0.7'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Blanks'):
self.assertRaises(TypeError, msData.updateMasks, **dict(blankThreshold='A string'))
with self.subTest(msg='RSD'):
self.assertRaises(ValueError, msData.updateMasks, **dict(rsdThreshold=-1.01))
self.assertRaises(TypeError, msData.updateMasks, **dict(rsdThreshold='30'))
with self.subTest(msg='Variance Ratio'):
self.assertRaises(TypeError, msData.updateMasks, **dict(varianceRatio='1.1'))
with self.subTest(msg='ArtifactualParameters'):
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':'A string', 'rsdFilter':False, 'blankFilter': False,
'correlationToDilutionFilter':False, 'varianceRatioFilter':False}, **dict(blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=1.01, blankThreshold=False))
self.assertRaises(ValueError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual=-0.01, blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(corrThresholdArtifactual='0.7', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(deltaMzArtifactual='100', blankThreshold=False))
self.assertRaises(TypeError, msData.updateMasks, featureFilters={'artifactualFilter':True}, **dict(overlapThresholdArtifactual='0.5', blankThreshold=False))
def test_applyMasks(self):
fit = numpy.random.randn(self.msData.noSamples, self.msData.noFeatures)
self.msData.fit = copy.deepcopy(fit)
deletedFeatures = numpy.random.randint(0, self.msData.noFeatures, size=2)
self.msData.featureMask[deletedFeatures] = False
fit = numpy.delete(fit, deletedFeatures, 1)
self.msData.applyMasks()
numpy.testing.assert_array_almost_equal(self.msData.fit, fit)
def test_correlationToDilution(self):
from nPYc.utilities._internal import _vcorrcoef
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset', sop='GenericMS')
dataset.sampleMetadata['SampleType'] = nPYc.enumerations.SampleType.StudyPool
dataset.sampleMetadata['AssayRole'] = nPYc.enumerations.AssayRole.LinearityReference
dataset.sampleMetadata['Well'] = 1
dataset.sampleMetadata['Dilution'] = numpy.linspace(1, noSamp, num=noSamp)
correlations = dataset.correlationToDilution
with self.subTest(msg='Checking default path'):
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
with self.subTest(msg='Checking corr exclusions'):
dataset.corrExclusions = None
numpy.testing.assert_array_almost_equal(correlations, _vcorrcoef(dataset.intensityData, dataset.sampleMetadata['Dilution'].values))
def test_correlateToDilution_raises(self):
noSamp = numpy.random.randint(30, high=500, size=None)
noFeat = numpy.random.randint(200, high=400, size=None)
dataset = generateTestDataset(noSamp, noFeat, dtype='MSDataset')
with self.subTest(msg='Unknown correlation type'):
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution, method='unknown')
with self.subTest(msg='No LR samples'):
dataset.sampleMetadata['AssayRole'] = AssayRole.Assay
self.assertRaises(ValueError, dataset._MSDataset__correlateToDilution)
with self.subTest(msg='No Dilution field'):
dataset.sampleMetadata.drop(['Dilution'], axis=1, inplace=True)
self.assertRaises(KeyError, dataset._MSDataset__correlateToDilution)
def test_validateObject(self):
with self.subTest(msg='validateObject successful on correct dataset'):
goodDataset = copy.deepcopy(self.msData)
self.assertEqual(goodDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True), {'Dataset': True, 'BasicMSDataset':True ,'QC':True, 'sampleMetadata':True})
with self.subTest(msg='BasicMSDataset fails on empty MSDataset'):
badDataset = nPYc.MSDataset('', fileType='empty')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset':False ,'QC':False, 'sampleMetadata':False})
with self.subTest(msg='check raise no warnings with raiseWarning=False'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 0)
with self.subTest(msg='check fail and raise warnings on bad Dataset'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'featureMetadata')
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': False, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 5)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.featureMetadata'" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to Dataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not conform to basic MSDataset" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have QC parameters" in str(w[3].message)
assert issubclass(w[4].category, UserWarning)
assert "Does not have sample metadata information" in str(w[4].message)
with self.subTest(msg='check raise warnings BasicMSDataset'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 4)
assert issubclass(w[0].category, UserWarning)
assert "Failure, no attribute 'self.Attributes['rtWindow']" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not conform to basic MSDataset:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have QC parameters" in str(w[2].message)
assert issubclass(w[3].category, UserWarning)
assert "Does not have sample metadata information" in str(w[3].message)
with self.subTest(msg='check raise warnings QC parameters'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 3)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata['Batch']' is <class 'str'>" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have QC parameters:" in str(w[1].message)
assert issubclass(w[2].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[2].message)
with self.subTest(msg='check raise warnings sampleMetadata'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# warning
result = badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=True)
# check it generally worked
self.assertEqual(result, {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
# check each warning
self.assertEqual(len(w), 2)
assert issubclass(w[0].category, UserWarning)
assert "Failure, 'self.sampleMetadata' lacks a 'Subject ID' column" in str(w[0].message)
assert issubclass(w[1].category, UserWarning)
assert "Does not have sample metadata information:" in str(w[1].message)
with self.subTest(msg='self.Attributes[\'rtWindow\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rtWindow']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rtWindow\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rtWindow'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'msPrecision\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['msPrecision']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'msPrecision\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['msPrecision'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'varianceRatio\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['varianceRatio']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'varianceRatio\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['varianceRatio'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'blankThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['blankThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'blankThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['blankThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrMethod\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrMethod']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrMethod\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrMethod'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'rsdThreshold\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['rsdThreshold']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'rsdThreshold\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['rsdThreshold'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'deltaMzArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['deltaMzArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'deltaMzArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['deltaMzArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'overlapThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['overlapThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'overlapThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['overlapThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'corrThresholdArtifactual\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['corrThresholdArtifactual']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'corrThresholdArtifactual\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['corrThresholdArtifactual'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'FeatureExtractionSoftware\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['FeatureExtractionSoftware']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'FeatureExtractionSoftware\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['FeatureExtractionSoftware'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Raw Data Path\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Raw Data Path']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Raw Data Path\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Raw Data Path'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.Attributes[\'Feature Names\'] does not exist'):
badDataset = copy.deepcopy(self.msData)
del badDataset.Attributes['Feature Names']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.Attributes[\'Feature Names\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.Attributes['Feature Names'] = 5.0
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.VariableType is not an enum VariableType'):
badDataset = copy.deepcopy(self.msData)
badDataset.VariableType = 'not an enum'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.corrExclusions does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'corrExclusions')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._correlationToDilution does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_correlationToDilution')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._correlationToDilution is not a numpy.ndarray'):
badDataset = copy.deepcopy(self.msData)
badDataset._correlationToDilution = 'not a numpy.ndarray'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._artifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_artifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._artifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._artifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self._tempArtifactualLinkageMatrix does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, '_tempArtifactualLinkageMatrix')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self._tempArtifactualLinkageMatrix is not a pandas.DataFrame'):
badDataset = copy.deepcopy(self.msData)
badDataset._tempArtifactualLinkageMatrix = 'not a pandas.DataFrame'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.fileName does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'fileName')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.fileName is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.fileName = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='self.filePath does not exist'):
badDataset = copy.deepcopy(self.msData)
delattr(badDataset, 'filePath')
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(AttributeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.filePath is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.filePath = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample File Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample File Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'AssayRole\'] is not an enum \'AssayRole\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['AssayRole'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'SampleType\'] is not an enum \'SampleType\''):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['SampleType'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Dilution\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Dilution'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Correction Batch\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Correction Batch'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Run Order\'] is not an int'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Run Order'] = 'not an int'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Acquired Time\'] is not a datetime'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Acquired Time'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample Base Name\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample Base Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Matrix column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Matrix'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Matrix\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Matrix'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata does not have a Subject ID column'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata.drop(['Subject ID'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Subject ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Subject ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMetadata[\'Sample ID\'] is not str'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMetadata['Sample ID'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': True, 'QC': True, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop([0], axis=0, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not a str'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = 5.
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Feature Name\'] is not unique'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Feature Name'] = ['Feature1','Feature1','Feature1']
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a m/z column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['m/z'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'m/z\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['m/z'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata does not have a Retention Time column'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata.drop(['Retention Time'], axis=1, inplace=True)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(LookupError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMetadata[\'Retention Time\'] is not an int or float'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMetadata['Retention Time'] = 'not an int or float'
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(TypeError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.sampleMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.sampleMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask has not been initialised'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.array(False, dtype=bool)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
with self.subTest(msg='if self.featureMask does not have the same number of samples as self._intensityData'):
badDataset = copy.deepcopy(self.msData)
badDataset.featureMask = numpy.squeeze(numpy.ones([5, 1], dtype=bool), axis=1)
self.assertEqual(badDataset.validateObject(verbose=False, raiseError=False, raiseWarning=False), {'Dataset': True, 'BasicMSDataset': False, 'QC': False, 'sampleMetadata': False})
self.assertRaises(ValueError, badDataset.validateObject, verbose=False, raiseError=True, raiseWarning=False)
class test_msdataset_batch_inference(unittest.TestCase):
"""
Check batches are generated and amended correctly
"""
def setUp(self):
self.msData = nPYc.MSDataset('', fileType='empty')
self.msData.sampleMetadata['Sample File Name'] = ['Test_RPOS_ToF04_B1S1_SR',
'Test_RPOS_ToF04_B1S2_SR',
'Test_RPOS_ToF04_B1S3_SR',
'Test_RPOS_ToF04_B1S4_SR',
'Test_RPOS_ToF04_B1S5_SR',
'Test_RPOS_ToF04_P1W01',
'Test_RPOS_ToF04_P1W02_SR',
'Test_RPOS_ToF04_P1W03',
'Test_RPOS_ToF04_B1E1_SR',
'Test_RPOS_ToF04_B1E2_SR',
'Test_RPOS_ToF04_B1E3_SR',
'Test_RPOS_ToF04_B1E4_SR',
'Test_RPOS_ToF04_B1E5_SR',
'Test_RPOS_ToF04_B2S1_SR',
'Test_RPOS_ToF04_B2S2_SR',
'Test_RPOS_ToF04_B2S3_SR',
'Test_RPOS_ToF04_B2S4_SR',
'Test_RPOS_ToF04_B2S5_SR',
'Test_RPOS_ToF04_P2W01',
'Test_RPOS_ToF04_P2W02_SR',
'Test_RPOS_ToF04_P3W03',
'Test_RPOS_ToF04_B2S1_SR_2',
'Test_RPOS_ToF04_B2S2_SR_2',
'Test_RPOS_ToF04_B2S3_SR_2',
'Test_RPOS_ToF04_B2S4_SR_2',
'Test_RPOS_ToF04_B2S5_SR_2',
'Test_RPOS_ToF04_P3W03_b',
'Test_RPOS_ToF04_B2E1_SR',
'Test_RPOS_ToF04_B2E2_SR',
'Test_RPOS_ToF04_B2E3_SR',
'Test_RPOS_ToF04_B2E4_SR',
'Test_RPOS_ToF04_B2E5_SR',
'Test_RPOS_ToF04_B2SRD1']
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData.sampleMetadata['Run Order'] = self.msData.sampleMetadata.index + 1
def test_fillbatches_correctionbatch(self):
self.msData._fillBatches()
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_fillbatches_warns(self):
self.msData.sampleMetadata.drop('Run Order', axis=1, inplace=True)
self.assertWarnsRegex(UserWarning, 'Unable to infer batches without run order, skipping\.', self.msData._fillBatches)
def test_amendbatches(self):
"""
"""
self.msData._fillBatches()
self.msData.amendBatches(20)
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
def test_msdataset_addsampleinfo_batches(self):
self.msData.addSampleInfo(descriptionFormat='Batches')
correctionBatch = pandas.Series([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, numpy.nan],
name='Correction Batch',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Correction Batch'], correctionBatch)
class test_msdataset_import_undefined(unittest.TestCase):
"""
Test we raise an error when passing an fileType we don't understand.
"""
def test_raise_notimplemented(self):
self.assertRaises(NotImplementedError, nPYc.MSDataset, os.path.join('nopath'), fileType='Unknown filetype')
class test_msdataset_import_QI(unittest.TestCase):
"""
Test import from QI csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_QI.csv'), fileType='QI')
self.msData.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (115, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_Blank01', 'UnitTest1_LPOS_ToF02_Blank02',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W08_x',
'UnitTest1_LPOS_ToF02_S1W11_LTR', 'UnitTest1_LPOS_ToF02_S1W12_SR',
'UnitTest1_LPOS_ToF02_ERROR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
with self.subTest(msg='Checking Peak Widths'):
peakWidth = pandas.Series([0.03931667,
0.01403333,
0.01683333,
0.01683333],
name='Peak Width',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Peak Width'], peakWidth)
with self.subTest(msg='Checking m/z'):
mz = pandas.Series([262.0378339,
293.1811941,
145.0686347,
258.1033447],
name='m/z',
dtype='float')
assert_series_equal(self.msData.featureMetadata['m/z'], mz)
with self.subTest(msg='Checking Retention Time'):
rt = pandas.Series([3.17485,
3.17485,
3.17485,
3.17485],
name='Retention Time',
dtype='float')
assert_series_equal(self.msData.featureMetadata['Retention Time'], rt)
with self.subTest(msg='Checking Isotope Distribution'):
isotope = pandas.Series(['100 - 36.9',
'100 - 11.9',
'100 - 8.69',
'100 - 73.4'],
name='Isotope Distribution',
dtype='str')
assert_series_equal(self.msData.featureMetadata['Isotope Distribution'], isotope)
def test_dilutionlevels(self):
dilution = pandas.Series([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 10., 10., 10., 10., 10., 10., 10., 10., 10., 10., 20., 20., 20., 20., 20.,
40., 40., 40., 60., 60., 60., 80., 80., 80., 80., 80., 100., 100., 100., 100., 100., 100., 100., 100., 100., 100.,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan,
numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan],
name='Dilution',
dtype='float')
assert_series_equal(self.msData.sampleMetadata['Dilution'], dilution)
def test_feature_correlation(self):
self.msData.addSampleInfo(descriptionFormat='Raw Data', filePath=os.path.join('..','..','npc-standard-project','Raw_Data'))
self.msData.addSampleInfo(descriptionFormat='Batches')
with self.subTest(msg='Testing Pearson correlations'):
correlations = numpy.array([0.99999997, 0.32017508, 1., -0.0693418])
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
with self.subTest(msg='Testing Spearman correlations'):
correlations = numpy.array([0.9992837, 0.34708745, 1., -0.038844])
self.msData.Attributes['corrMethod'] = 'spearman'
numpy.testing.assert_array_almost_equal(self.msData.correlationToDilution, correlations)
def test_variabletype(self):
self.assertEqual(self.msData.VariableType, nPYc.enumerations.VariableType.Discrete)
class test_msdataset_import_xcms(unittest.TestCase):
"""
Test import from XCMS csv files
"""
def setUp(self):
self.msData = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms.csv'), fileType='XCMS', noFeatureParams=9)
self.msData_PeakTable = nPYc.MSDataset(os.path.join('..','..','npc-standard-project','Derived_Data','UnitTest1_PCSOP.069_xcms_peakTable.csv'), fileType='XCMS', noFeatureParams=8)
self.msData.addSampleInfo(descriptionFormat='Filenames')
self.msData_PeakTable.addSampleInfo(descriptionFormat='Filenames')
def test_dimensions(self):
self.assertEqual((self.msData.noSamples, self.msData.noFeatures), (111, 4))
self.assertEqual((self.msData_PeakTable.noSamples, self.msData_PeakTable.noFeatures), (111, 4))
def test_samples(self):
samples = pandas.Series(['UnitTest1_LPOS_ToF02_B1SRD01', 'UnitTest1_LPOS_ToF02_B1SRD02',
'UnitTest1_LPOS_ToF02_B1SRD03', 'UnitTest1_LPOS_ToF02_B1SRD04',
'UnitTest1_LPOS_ToF02_B1SRD05', 'UnitTest1_LPOS_ToF02_B1SRD06',
'UnitTest1_LPOS_ToF02_B1SRD07', 'UnitTest1_LPOS_ToF02_B1SRD08',
'UnitTest1_LPOS_ToF02_B1SRD09', 'UnitTest1_LPOS_ToF02_B1SRD10',
'UnitTest1_LPOS_ToF02_B1SRD11', 'UnitTest1_LPOS_ToF02_B1SRD12',
'UnitTest1_LPOS_ToF02_B1SRD13', 'UnitTest1_LPOS_ToF02_B1SRD14',
'UnitTest1_LPOS_ToF02_B1SRD15', 'UnitTest1_LPOS_ToF02_B1SRD16',
'UnitTest1_LPOS_ToF02_B1SRD17', 'UnitTest1_LPOS_ToF02_B1SRD18',
'UnitTest1_LPOS_ToF02_B1SRD19', 'UnitTest1_LPOS_ToF02_B1SRD20',
'UnitTest1_LPOS_ToF02_B1SRD21', 'UnitTest1_LPOS_ToF02_B1SRD22',
'UnitTest1_LPOS_ToF02_B1SRD23', 'UnitTest1_LPOS_ToF02_B1SRD24',
'UnitTest1_LPOS_ToF02_B1SRD25', 'UnitTest1_LPOS_ToF02_B1SRD26',
'UnitTest1_LPOS_ToF02_B1SRD27', 'UnitTest1_LPOS_ToF02_B1SRD28',
'UnitTest1_LPOS_ToF02_B1SRD29', 'UnitTest1_LPOS_ToF02_B1SRD30',
'UnitTest1_LPOS_ToF02_B1SRD31', 'UnitTest1_LPOS_ToF02_B1SRD32',
'UnitTest1_LPOS_ToF02_B1SRD33', 'UnitTest1_LPOS_ToF02_B1SRD34',
'UnitTest1_LPOS_ToF02_B1SRD35', 'UnitTest1_LPOS_ToF02_B1SRD36',
'UnitTest1_LPOS_ToF02_B1SRD37', 'UnitTest1_LPOS_ToF02_B1SRD38',
'UnitTest1_LPOS_ToF02_B1SRD39', 'UnitTest1_LPOS_ToF02_B1SRD40',
'UnitTest1_LPOS_ToF02_B1SRD41', 'UnitTest1_LPOS_ToF02_B1SRD42',
'UnitTest1_LPOS_ToF02_B1SRD43', 'UnitTest1_LPOS_ToF02_B1SRD44',
'UnitTest1_LPOS_ToF02_B1SRD45', 'UnitTest1_LPOS_ToF02_B1SRD46',
'UnitTest1_LPOS_ToF02_B1SRD47', 'UnitTest1_LPOS_ToF02_B1SRD48',
'UnitTest1_LPOS_ToF02_B1SRD49', 'UnitTest1_LPOS_ToF02_B1SRD50',
'UnitTest1_LPOS_ToF02_B1SRD51', 'UnitTest1_LPOS_ToF02_B1SRD52',
'UnitTest1_LPOS_ToF02_B1SRD53', 'UnitTest1_LPOS_ToF02_B1SRD54',
'UnitTest1_LPOS_ToF02_B1SRD55', 'UnitTest1_LPOS_ToF02_B1SRD56',
'UnitTest1_LPOS_ToF02_B1SRD57', 'UnitTest1_LPOS_ToF02_B1SRD58',
'UnitTest1_LPOS_ToF02_B1SRD59', 'UnitTest1_LPOS_ToF02_B1SRD60',
'UnitTest1_LPOS_ToF02_B1SRD61', 'UnitTest1_LPOS_ToF02_B1SRD62',
'UnitTest1_LPOS_ToF02_B1SRD63', 'UnitTest1_LPOS_ToF02_B1SRD64',
'UnitTest1_LPOS_ToF02_B1SRD65', 'UnitTest1_LPOS_ToF02_B1SRD66',
'UnitTest1_LPOS_ToF02_B1SRD67', 'UnitTest1_LPOS_ToF02_B1SRD68',
'UnitTest1_LPOS_ToF02_B1SRD69', 'UnitTest1_LPOS_ToF02_B1SRD70',
'UnitTest1_LPOS_ToF02_B1SRD71', 'UnitTest1_LPOS_ToF02_B1SRD72',
'UnitTest1_LPOS_ToF02_B1SRD73', 'UnitTest1_LPOS_ToF02_B1SRD74',
'UnitTest1_LPOS_ToF02_B1SRD75', 'UnitTest1_LPOS_ToF02_B1SRD76',
'UnitTest1_LPOS_ToF02_B1SRD77', 'UnitTest1_LPOS_ToF02_B1SRD78',
'UnitTest1_LPOS_ToF02_B1SRD79', 'UnitTest1_LPOS_ToF02_B1SRD80',
'UnitTest1_LPOS_ToF02_B1SRD81', 'UnitTest1_LPOS_ToF02_B1SRD82',
'UnitTest1_LPOS_ToF02_B1SRD83', 'UnitTest1_LPOS_ToF02_B1SRD84',
'UnitTest1_LPOS_ToF02_B1SRD85', 'UnitTest1_LPOS_ToF02_B1SRD86',
'UnitTest1_LPOS_ToF02_B1SRD87', 'UnitTest1_LPOS_ToF02_B1SRD88',
'UnitTest1_LPOS_ToF02_B1SRD89', 'UnitTest1_LPOS_ToF02_B1SRD90',
'UnitTest1_LPOS_ToF02_B1SRD91', 'UnitTest1_LPOS_ToF02_B1SRD92',
'UnitTest1_LPOS_ToF02_B1E1_SR', 'UnitTest1_LPOS_ToF02_B1E2_SR',
'UnitTest1_LPOS_ToF02_B1E3_SR', 'UnitTest1_LPOS_ToF02_B1E4_SR',
'UnitTest1_LPOS_ToF02_B1E5_SR', 'UnitTest1_LPOS_ToF02_B1S1_SR',
'UnitTest1_LPOS_ToF02_B1S2_SR', 'UnitTest1_LPOS_ToF02_B1S3_SR',
'UnitTest1_LPOS_ToF02_B1S4_SR', 'UnitTest1_LPOS_ToF02_B1S5_SR',
'UnitTest1_LPOS_ToF02_S1W01', 'UnitTest1_LPOS_ToF02_S1W02',
'UnitTest1_LPOS_ToF02_S1W03', 'UnitTest1_LPOS_ToF02_S1W04',
'UnitTest1_LPOS_ToF02_S1W05', 'UnitTest1_LPOS_ToF02_S1W06',
'UnitTest1_LPOS_ToF02_S1W07', 'UnitTest1_LPOS_ToF02_S1W11_LTR',
'UnitTest1_LPOS_ToF02_S1W12_SR'],
name='Sample File Name',
dtype=str)
assert_series_equal(self.msData.sampleMetadata['Sample File Name'], samples)
assert_series_equal(self.msData_PeakTable.sampleMetadata['Sample File Name'], samples)
def test_featuremetadata_import(self):
with self.subTest(msg='Checking Feature Names'):
features = pandas.Series(['3.17_262.0378m/z',
'3.17_293.1812m/z',
'3.17_145.0686m/z',
'3.17_258.1033m/z'],
name='Feature Name',
dtype='str')
|
assert_series_equal(self.msData.featureMetadata['Feature Name'], features)
|
pandas.testing.assert_series_equal
|
"""A collection of shared utilities for all encoders, not intended for external use."""
from abc import abstractmethod
from enum import Enum, auto
import pandas as pd
import numpy as np
import sklearn.base
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.exceptions import NotFittedError
from typing import Dict, List, Optional, Union
from scipy.sparse import csr_matrix
__author__ = 'willmcginnis'
def convert_cols_to_list(cols):
if isinstance(cols, pd.Series):
return cols.tolist()
elif isinstance(cols, np.ndarray):
return cols.tolist()
elif np.isscalar(cols):
return [cols]
elif isinstance(cols, set):
return list(cols)
elif isinstance(cols, tuple):
return list(cols)
elif
|
pd.api.types.is_categorical_dtype(cols)
|
pandas.api.types.is_categorical_dtype
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
def salvar_csv(x_mean,y_mean,z_mean,nome):
dfdict = {}
dfdict["Mean x"] = x_mean
dfdict["Mean y"] = y_mean
dfdict["Mean z"] = z_mean
df =
|
pd.DataFrame(dfdict)
|
pandas.DataFrame
|
# nicher
'''
This script contains functions for calculating species distribution models
(SDMs), also known as ecological niche models (ENMs) - hence, Nicher. This
script is intended to accept a digital elevation model (DEM) geotiff and any
pre-generated derivatives (e.g. slope, aspect, topographic wetness index, etc.)
that are generated from it. The methodology is based on the approach used by
MaxEnt (https://biodiversityinformatics.amnh.org/open_source/maxent/), but replaces
the Logistic Regression technique with ExtraTrees and RandomForest from Sklearn.
Various functions in Nicher are based on the excellent RSGISLib SDM library
(https://www.rsgislib.org/rsgislib_sdm.html), especially the creation of response
curves and lek matrices. If you cite this library, please also cite RSGISLib.
See associated Jupyter Notebook Nicher.ipynb for a basic tutorial on the
main functions and order of execution.
Links:
MaxEnt: https://biodiversityinformatics.amnh.org/open_source/maxent
RSGISLib: https://www.rsgislib.org/rsgislib_sdm
Contacts:
<NAME>: <EMAIL>
'''
# import required libraries
import os
import sys
import random
import math
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import ExtraTreesClassifier as et
from sklearn.ensemble import RandomForestClassifier as rt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import log_loss
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import normalized_mutual_info_score
from sklearn.metrics import cohen_kappa_score
from scipy.stats import pointbiserialr
sys.path.append('../../shared')
import tools
# increase number of pandas rows displayed
|
pd.set_option('display.max_rows', 1000)
|
pandas.set_option
|
import numpy as np
from PIL import Image
import cv2
import io
import time
import pandas as pd
from random import randint
import os
import selenium
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import tensorflow as tf
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import SGD, Adam, Nadam
from tensorflow.keras.callbacks import TensorBoard
from collections import deque
import random
import pickle
import base64
from io import BytesIO
import json
# Path Variables
GAME_URL = "http://wayou.github.io/t-rex-runner/"
CHROME_DRIVER_PATH = "./chromedriver"
LOSS_FILE_PATH = "./objects/loss_df.csv"
ACTIONS_FILE_PATH = "./objects/actions_df.csv"
Q_VALUE_FILE_PATH = "./objects/q_values.csv"
SCORE_FILE_PATH = "./objects/scores_df.csv"
# Script to create id for canvas for faster selections from Document Object MOdel (DOM)
init_script = "document.getElementsByClassName('runner-canvas')[0].id = 'runner-canvas'"
# Script to get image from canvas
getbase64Script = "canvasRunner = document.getElementById('runner-canvas'); \
return canvasRunner.toDataURL().substring(22)"
# Game Parameter Constants
ACTIONS = 2 # Possible actions: "Jump" or "Do Nothing"
GAMMA = 0.9 # Decay rate of past observations, original 0.9
OBSERVATION = 100. # Timesteps to observe before training
EXPLORE = 100000 # Frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # Final value of epsilon
INITIAL_EPSILON = 0.1 # Initial value of epsilon
REPLAY_MEMORY = 80000 # Number of previous transitions to remember
BATCH = 32 # Size of minibatch
FRAME_PER_ACTION = 1
LEARNING_RATE = 0.0003
img_rows, img_cols = 80, 80
img_channels = 4 # We stack 4 frames
# Initialize log structures from file if they exist or else create new
loss_df = pd.read_csv(LOSS_FILE_PATH) if os.path.isfile(
LOSS_FILE_PATH) else pd.DataFrame(columns=["loss"])
score_df = pd.read_csv(SCORE_FILE_PATH) if os.path.isfile(
SCORE_FILE_PATH) else pd.DataFrame(columns=["Scores"])
actions_df =
|
pd.read_csv(ACTIONS_FILE_PATH)
|
pandas.read_csv
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
import array as arr
import datetime
import io
import operator
import random
import re
import string
import textwrap
from copy import copy
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from numba import cuda
import cudf
from cudf.core._compat import PANDAS_GE_110, PANDAS_GE_120
from cudf.core.column import column
from cudf.tests import utils
from cudf.tests.utils import (
ALL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
assert_eq,
assert_exceptions_equal,
does_not_raise,
gen_rand,
)
def test_init_via_list_of_tuples():
data = [
(5, "cats", "jump", np.nan),
(2, "dogs", "dig", 7.5),
(3, "cows", "moo", -2.1, "occasionally"),
]
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def _dataframe_na_data():
return [
pd.DataFrame(
{
"a": [0, 1, 2, np.nan, 4, None, 6],
"b": [np.nan, None, "u", "h", "d", "a", "m"],
},
index=["q", "w", "e", "r", "t", "y", "u"],
),
pd.DataFrame({"a": [0, 1, 2, 3, 4], "b": ["a", "b", "u", "h", "d"]}),
pd.DataFrame(
{
"a": [None, None, np.nan, None],
"b": [np.nan, None, np.nan, None],
}
),
pd.DataFrame({"a": []}),
pd.DataFrame({"a": [np.nan], "b": [None]}),
pd.DataFrame({"a": ["a", "b", "c", None, "e"]}),
pd.DataFrame({"a": ["a", "b", "c", "d", "e"]}),
]
@pytest.mark.parametrize("rows", [0, 1, 2, 100])
def test_init_via_list_of_empty_tuples(rows):
data = [()] * rows
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(
pdf,
gdf,
check_like=True,
check_column_type=False,
check_index_type=False,
)
@pytest.mark.parametrize(
"dict_of_series",
[
{"a": pd.Series([1.0, 2.0, 3.0])},
{"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 4.0], index=[1, 2, 3]),
},
{"a": [1, 2, 3], "b": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6])},
{
"a": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"b": pd.Series([1.0, 2.0, 4.0], index=["c", "d", "e"]),
},
{
"a": pd.Series(
["a", "b", "c"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
"b": pd.Series(
["a", " b", "d"],
index=pd.MultiIndex.from_tuples([(1, 2), (1, 3), (2, 3)]),
),
},
],
)
def test_init_from_series_align(dict_of_series):
pdf = pd.DataFrame(dict_of_series)
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
for key in dict_of_series:
if isinstance(dict_of_series[key], pd.Series):
dict_of_series[key] = cudf.Series(dict_of_series[key])
gdf = cudf.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
("dict_of_series", "expectation"),
[
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 5, 6]),
},
pytest.raises(
ValueError, match="Cannot align indices with non-unique values"
),
),
(
{
"a": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
"b": pd.Series(["a", "b", "c"], index=[4, 4, 5]),
},
does_not_raise(),
),
],
)
def test_init_from_series_align_nonunique(dict_of_series, expectation):
with expectation:
gdf = cudf.DataFrame(dict_of_series)
if expectation == does_not_raise():
pdf = pd.DataFrame(dict_of_series)
assert_eq(pdf, gdf)
def test_init_unaligned_with_index():
pdf = pd.DataFrame(
{
"a": pd.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": pd.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
gdf = cudf.DataFrame(
{
"a": cudf.Series([1.0, 2.0, 3.0], index=[4, 5, 6]),
"b": cudf.Series([1.0, 2.0, 3.0], index=[1, 2, 3]),
},
index=[7, 8, 9],
)
assert_eq(pdf, gdf, check_dtype=False)
def test_series_basic():
# Make series from buffer
a1 = np.arange(10, dtype=np.float64)
series = cudf.Series(a1)
assert len(series) == 10
np.testing.assert_equal(series.to_array(), np.hstack([a1]))
def test_series_from_cupy_scalars():
data = [0.1, 0.2, 0.3]
data_np = np.array(data)
data_cp = cupy.array(data)
s_np = cudf.Series([data_np[0], data_np[2]])
s_cp = cudf.Series([data_cp[0], data_cp[2]])
assert_eq(s_np, s_cp)
@pytest.mark.parametrize("a", [[1, 2, 3], [1, 10, 30]])
@pytest.mark.parametrize("b", [[4, 5, 6], [-11, -100, 30]])
def test_append_index(a, b):
df = pd.DataFrame()
df["a"] = a
df["b"] = b
gdf = cudf.DataFrame()
gdf["a"] = a
gdf["b"] = b
# Check the default index after appending two columns(Series)
expected = df.a.append(df.b)
actual = gdf.a.append(gdf.b)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
expected = df.a.append(df.b, ignore_index=True)
actual = gdf.a.append(gdf.b, ignore_index=True)
assert len(expected) == len(actual)
assert_eq(expected.index, actual.index)
def test_series_init_none():
# test for creating empty series
# 1: without initializing
sr1 = cudf.Series()
got = sr1.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
# 2: Using `None` as an initializer
sr2 = cudf.Series(None)
got = sr2.to_string()
expect = "Series([], dtype: float64)"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_basic():
np.random.seed(0)
df = cudf.DataFrame()
# Populate with cuda memory
df["keys"] = np.arange(10, dtype=np.float64)
np.testing.assert_equal(df["keys"].to_array(), np.arange(10))
assert len(df) == 10
# Populate with numpy array
rnd_vals = np.random.random(10)
df["vals"] = rnd_vals
np.testing.assert_equal(df["vals"].to_array(), rnd_vals)
assert len(df) == 10
assert tuple(df.columns) == ("keys", "vals")
# Make another dataframe
df2 = cudf.DataFrame()
df2["keys"] = np.array([123], dtype=np.float64)
df2["vals"] = np.array([321], dtype=np.float64)
# Concat
df = cudf.concat([df, df2])
assert len(df) == 11
hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])
hvals = np.asarray(rnd_vals.tolist() + [321])
np.testing.assert_equal(df["keys"].to_array(), hkeys)
np.testing.assert_equal(df["vals"].to_array(), hvals)
# As matrix
mat = df.as_matrix()
expect = np.vstack([hkeys, hvals]).T
np.testing.assert_equal(mat, expect)
# test dataframe with tuple name
df_tup = cudf.DataFrame()
data = np.arange(10)
df_tup[(1, "foobar")] = data
np.testing.assert_equal(data, df_tup[(1, "foobar")].to_array())
df = cudf.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
pdf = pd.DataFrame(pd.DataFrame({"a": [1, 2, 3], "c": ["a", "b", "c"]}))
assert_eq(df, pdf)
gdf = cudf.DataFrame({"id": [0, 1], "val": [None, None]})
gdf["val"] = gdf["val"].astype("int")
assert gdf["val"].isnull().all()
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"columns", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_columns(pdf, columns, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(columns=columns, inplace=inplace)
actual = gdf.drop(columns=columns, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_0(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=0, inplace=inplace)
actual = gdf.drop(labels=labels, axis=0, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"index",
[[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_index(pdf, index, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace)
actual = gdf.drop(index=index, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5},
index=pd.MultiIndex(
levels=[
["lama", "cow", "falcon"],
["speed", "weight", "length"],
],
codes=[
[0, 0, 0, 1, 1, 1, 2, 2, 2, 1],
[0, 1, 2, 0, 1, 2, 0, 1, 2, 1],
],
),
)
],
)
@pytest.mark.parametrize(
"index,level",
[
("cow", 0),
("lama", 0),
("falcon", 0),
("speed", 1),
("weight", 1),
("length", 1),
pytest.param(
"cow",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"lama",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
pytest.param(
"falcon",
None,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/36293"
),
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_multiindex(pdf, index, level, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(index=index, inplace=inplace, level=level)
actual = gdf.drop(index=index, inplace=inplace, level=level)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"pdf",
[
pd.DataFrame({"a": range(10), "b": range(10, 20), "c": range(1, 11)}),
pd.DataFrame(
{"a": range(10), "b": range(10, 20), "d": ["a", "v"] * 5}
),
],
)
@pytest.mark.parametrize(
"labels", [["a"], ["b"], "a", "b", ["a", "b"]],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_dataframe_drop_labels_axis_1(pdf, labels, inplace):
pdf = pdf.copy()
gdf = cudf.from_pandas(pdf)
expected = pdf.drop(labels=labels, axis=1, inplace=inplace)
actual = gdf.drop(labels=labels, axis=1, inplace=inplace)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
def test_dataframe_drop_error():
df = cudf.DataFrame({"a": [1], "b": [2], "c": [3]})
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "d"}),
rfunc_args_and_kwargs=([], {"columns": "d"}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
rfunc_args_and_kwargs=([], {"columns": ["a", "d", "b"]}),
expected_error_message="column 'd' does not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}),
expected_error_message="Cannot specify both",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"axis": 1}),
rfunc_args_and_kwargs=([], {"axis": 1}),
expected_error_message="Need to specify at least",
)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([[2, 0]],),
rfunc_args_and_kwargs=([[2, 0]],),
expected_error_message="One or more values not found in axis",
)
def test_dataframe_drop_raises():
df = cudf.DataFrame(
{"a": [1, 2, 3], "c": [10, 20, 30]}, index=["x", "y", "z"]
)
pdf = df.to_pandas()
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=(["p"],),
rfunc_args_and_kwargs=(["p"],),
expected_error_message="One or more values not found in axis",
)
# label dtype mismatch
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([3],),
rfunc_args_and_kwargs=([3],),
expected_error_message="One or more values not found in axis",
)
expect = pdf.drop("p", errors="ignore")
actual = df.drop("p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"columns": "p"}),
rfunc_args_and_kwargs=([], {"columns": "p"}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(columns="p", errors="ignore")
actual = df.drop(columns="p", errors="ignore")
assert_eq(actual, expect)
assert_exceptions_equal(
lfunc=pdf.drop,
rfunc=df.drop,
lfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
rfunc_args_and_kwargs=([], {"labels": "p", "axis": 1}),
expected_error_message="column 'p' does not exist",
)
expect = pdf.drop(labels="p", axis=1, errors="ignore")
actual = df.drop(labels="p", axis=1, errors="ignore")
assert_eq(actual, expect)
def test_dataframe_column_add_drop_via_setitem():
df = cudf.DataFrame()
data = np.asarray(range(10))
df["a"] = data
df["b"] = data
assert tuple(df.columns) == ("a", "b")
del df["a"]
assert tuple(df.columns) == ("b",)
df["c"] = data
assert tuple(df.columns) == ("b", "c")
df["a"] = data
assert tuple(df.columns) == ("b", "c", "a")
def test_dataframe_column_set_via_attr():
data_0 = np.asarray([0, 2, 4, 5])
data_1 = np.asarray([1, 4, 2, 3])
data_2 = np.asarray([2, 0, 3, 0])
df = cudf.DataFrame({"a": data_0, "b": data_1, "c": data_2})
for i in range(10):
df.c = df.a
assert assert_eq(df.c, df.a, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
df.c = df.b
assert assert_eq(df.c, df.b, check_names=False)
assert tuple(df.columns) == ("a", "b", "c")
def test_dataframe_column_drop_via_attr():
df = cudf.DataFrame({"a": []})
with pytest.raises(AttributeError):
del df.a
assert tuple(df.columns) == tuple("a")
@pytest.mark.parametrize("axis", [0, "index"])
def test_dataframe_index_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper={1: 5, 2: 6}, axis=axis)
got = gdf.rename(mapper={1: 5, 2: 6}, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(index={1: 5, 2: 6})
got = gdf.rename(index={1: 5, 2: 6})
assert_eq(expect, got)
expect = pdf.rename({1: 5, 2: 6})
got = gdf.rename({1: 5, 2: 6})
assert_eq(expect, got)
# `pandas` can support indexes with mixed values. We throw a
# `NotImplementedError`.
with pytest.raises(NotImplementedError):
gdf.rename(mapper={1: "x", 2: "y"}, axis=axis)
def test_dataframe_MI_rename():
gdf = cudf.DataFrame(
{"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)}
)
gdg = gdf.groupby(["a", "b"]).count()
pdg = gdg.to_pandas()
expect = pdg.rename(mapper={1: 5, 2: 6}, axis=0)
got = gdg.rename(mapper={1: 5, 2: 6}, axis=0)
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [1, "columns"])
def test_dataframe_column_rename(axis):
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.rename(mapper=lambda name: 2 * name, axis=axis)
got = gdf.rename(mapper=lambda name: 2 * name, axis=axis)
assert_eq(expect, got)
expect = pdf.rename(columns=lambda name: 2 * name)
got = gdf.rename(columns=lambda name: 2 * name)
assert_eq(expect, got)
rename_mapper = {"a": "z", "b": "y", "c": "x"}
expect = pdf.rename(columns=rename_mapper)
got = gdf.rename(columns=rename_mapper)
assert_eq(expect, got)
def test_dataframe_pop():
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": ["x", "y", "z"], "c": [7.0, 8.0, 9.0]}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# Test non-existing column error
with pytest.raises(KeyError) as raises:
gdf.pop("fake_colname")
raises.match("fake_colname")
# check pop numeric column
pdf_pop = pdf.pop("a")
gdf_pop = gdf.pop("a")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check string column
pdf_pop = pdf.pop("b")
gdf_pop = gdf.pop("b")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check float column and empty dataframe
pdf_pop = pdf.pop("c")
gdf_pop = gdf.pop("c")
assert_eq(pdf_pop, gdf_pop)
assert_eq(pdf, gdf)
# check empty dataframe edge case
empty_pdf = pd.DataFrame(columns=["a", "b"])
empty_gdf = cudf.DataFrame(columns=["a", "b"])
pb = empty_pdf.pop("b")
gb = empty_gdf.pop("b")
assert len(pb) == len(gb)
assert empty_pdf.empty and empty_gdf.empty
@pytest.mark.parametrize("nelem", [0, 3, 100, 1000])
def test_dataframe_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df["a"].dtype is np.dtype(np.int32)
df["b"] = df["a"].astype(np.float32)
assert df["b"].dtype is np.dtype(np.float32)
np.testing.assert_equal(df["a"].to_array(), df["b"].to_array())
@pytest.mark.parametrize("nelem", [0, 100])
def test_index_astype(nelem):
df = cudf.DataFrame()
data = np.asarray(range(nelem), dtype=np.int32)
df["a"] = data
assert df.index.dtype is np.dtype(np.int64)
df.index = df.index.astype(np.float32)
assert df.index.dtype is np.dtype(np.float32)
df["a"] = df["a"].astype(np.float32)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
df["b"] = df["a"]
df = df.set_index("b")
df["a"] = df["a"].astype(np.int16)
df.index = df.index.astype(np.int16)
np.testing.assert_equal(df.index.to_array(), df["a"].to_array())
def test_dataframe_to_string():
pd.options.display.max_rows = 5
pd.options.display.max_columns = 8
# Test basic
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
string = str(df)
assert string.splitlines()[-1] == "[6 rows x 2 columns]"
# Test skipped columns
df = cudf.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [11, 12, 13, 14, 15, 16],
"c": [11, 12, 13, 14, 15, 16],
"d": [11, 12, 13, 14, 15, 16],
}
)
string = df.to_string()
assert string.splitlines()[-1] == "[6 rows x 4 columns]"
# Test masked
df = cudf.DataFrame(
{"a": [1, 2, 3, 4, 5, 6], "b": [11, 12, 13, 14, 15, 16]}
)
data = np.arange(6)
mask = np.zeros(1, dtype=cudf.utils.utils.mask_dtype)
mask[0] = 0b00101101
masked = cudf.Series.from_masked_array(data, mask)
assert masked.null_count == 2
df["c"] = masked
# check data
values = masked.copy()
validids = [0, 2, 3, 5]
densearray = masked.to_array()
np.testing.assert_equal(data[validids], densearray)
# valid position is corret
for i in validids:
assert data[i] == values[i]
# null position is correct
for i in range(len(values)):
if i not in validids:
assert values[i] is cudf.NA
pd.options.display.max_rows = 10
got = df.to_string()
expect = """
a b c
0 1 11 0
1 2 12 <NA>
2 3 13 2
3 4 14 3
4 5 15 <NA>
5 6 16 5
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_to_string_wide(monkeypatch):
monkeypatch.setenv("COLUMNS", "79")
# Test basic
df = cudf.DataFrame()
for i in range(100):
df["a{}".format(i)] = list(range(3))
pd.options.display.max_columns = 0
got = df.to_string()
expect = """
a0 a1 a2 a3 a4 a5 a6 a7 ... a92 a93 a94 a95 a96 a97 a98 a99
0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0
1 1 1 1 1 1 1 1 1 ... 1 1 1 1 1 1 1 1
2 2 2 2 2 2 2 2 2 ... 2 2 2 2 2 2 2 2
[3 rows x 100 columns]
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_empty_to_string():
# Test for printing empty dataframe
df = cudf.DataFrame()
got = df.to_string()
expect = "Empty DataFrame\nColumns: []\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_emptycolumns_to_string():
# Test for printing dataframe having empty columns
df = cudf.DataFrame()
df["a"] = []
df["b"] = []
got = df.to_string()
expect = "Empty DataFrame\nColumns: [a, b]\nIndex: []\n"
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy():
# Test for copying the dataframe using python copy pkg
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = copy(df)
df2["b"] = [4, 5, 6]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_copy_shallow():
# Test for copy dataframe using class method
df = cudf.DataFrame()
df["a"] = [1, 2, 3]
df2 = df.copy()
df2["b"] = [4, 2, 3]
got = df.to_string()
expect = """
a
0 1
1 2
2 3
"""
# values should match despite whitespace difference
assert got.split() == expect.split()
def test_dataframe_dtypes():
dtypes = pd.Series(
[np.int32, np.float32, np.float64], index=["c", "a", "b"]
)
df = cudf.DataFrame(
{k: np.ones(10, dtype=v) for k, v in dtypes.iteritems()}
)
assert df.dtypes.equals(dtypes)
def test_dataframe_add_col_to_object_dataframe():
# Test for adding column to an empty object dataframe
cols = ["a", "b", "c"]
df = pd.DataFrame(columns=cols, dtype="str")
data = {k: v for (k, v) in zip(cols, [["a"] for _ in cols])}
gdf = cudf.DataFrame(data)
gdf = gdf[:0]
assert gdf.dtypes.equals(df.dtypes)
gdf["a"] = [1]
df["a"] = [10]
assert gdf.dtypes.equals(df.dtypes)
gdf["b"] = [1.0]
df["b"] = [10.0]
assert gdf.dtypes.equals(df.dtypes)
def test_dataframe_dir_and_getattr():
df = cudf.DataFrame(
{
"a": np.ones(10),
"b": np.ones(10),
"not an id": np.ones(10),
"oop$": np.ones(10),
}
)
o = dir(df)
assert {"a", "b"}.issubset(o)
assert "not an id" not in o
assert "oop$" not in o
# Getattr works
assert df.a.equals(df["a"])
assert df.b.equals(df["b"])
with pytest.raises(AttributeError):
df.not_a_column
@pytest.mark.parametrize("order", ["C", "F"])
def test_empty_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
# Check fully empty dataframe.
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 0)
df = cudf.DataFrame()
nelem = 123
for k in "abc":
df[k] = np.random.random(nelem)
# Check all columns in empty dataframe.
mat = df.head(0).as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (0, 3)
@pytest.mark.parametrize("order", ["C", "F"])
def test_dataframe_as_gpu_matrix(order):
df = cudf.DataFrame()
nelem = 123
for k in "abcd":
df[k] = np.random.random(nelem)
# Check all columns
mat = df.as_gpu_matrix(order=order).copy_to_host()
assert mat.shape == (nelem, 4)
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
# Check column subset
mat = df.as_gpu_matrix(order=order, columns=["a", "c"]).copy_to_host()
assert mat.shape == (nelem, 2)
for i, k in enumerate("ac"):
np.testing.assert_array_equal(df[k].to_array(), mat[:, i])
def test_dataframe_as_gpu_matrix_null_values():
df = cudf.DataFrame()
nelem = 123
na = -10000
refvalues = {}
for k in "abcd":
df[k] = data = np.random.random(nelem)
bitmask = utils.random_bitmask(nelem)
df[k] = df[k].set_mask(bitmask)
boolmask = np.asarray(
utils.expand_bits_to_bytes(bitmask)[:nelem], dtype=np.bool_
)
data[~boolmask] = na
refvalues[k] = data
# Check null value causes error
with pytest.raises(ValueError) as raises:
df.as_gpu_matrix()
raises.match("column 'a' has null values")
for k in df.columns:
df[k] = df[k].fillna(na)
mat = df.as_gpu_matrix().copy_to_host()
for i, k in enumerate(df.columns):
np.testing.assert_array_equal(refvalues[k], mat[:, i])
def test_dataframe_append_empty():
pdf = pd.DataFrame(
{
"key": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],
"value": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
gdf["newcol"] = 100
pdf["newcol"] = 100
assert len(gdf["newcol"]) == len(pdf)
assert len(pdf["newcol"]) == len(pdf)
assert_eq(gdf, pdf)
def test_dataframe_setitem_from_masked_object():
ary = np.random.randn(100)
mask = np.zeros(100, dtype=bool)
mask[:20] = True
np.random.shuffle(mask)
ary[mask] = np.nan
test1_null = cudf.Series(ary, nan_as_null=True)
assert test1_null.nullable
assert test1_null.null_count == 20
test1_nan = cudf.Series(ary, nan_as_null=False)
assert test1_nan.null_count == 0
test2_null = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=True
)
assert test2_null["a"].nullable
assert test2_null["a"].null_count == 20
test2_nan = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ary}), nan_as_null=False
)
assert test2_nan["a"].null_count == 0
gpu_ary = cupy.asarray(ary)
test3_null = cudf.Series(gpu_ary, nan_as_null=True)
assert test3_null.nullable
assert test3_null.null_count == 20
test3_nan = cudf.Series(gpu_ary, nan_as_null=False)
assert test3_nan.null_count == 0
test4 = cudf.DataFrame()
lst = [1, 2, None, 4, 5, 6, None, 8, 9]
test4["lst"] = lst
assert test4["lst"].nullable
assert test4["lst"].null_count == 2
def test_dataframe_append_to_empty():
pdf = pd.DataFrame()
pdf["a"] = []
pdf["b"] = [1, 2, 3]
gdf = cudf.DataFrame()
gdf["a"] = []
gdf["b"] = [1, 2, 3]
assert_eq(gdf, pdf)
def test_dataframe_setitem_index_len1():
gdf = cudf.DataFrame()
gdf["a"] = [1]
gdf["b"] = gdf.index._values
np.testing.assert_equal(gdf.b.to_array(), [0])
def test_empty_dataframe_setitem_df():
gdf1 = cudf.DataFrame()
gdf2 = cudf.DataFrame({"a": [1, 2, 3, 4, 5]})
gdf1["a"] = gdf2["a"]
assert_eq(gdf1, gdf2)
def test_assign():
gdf = cudf.DataFrame({"x": [1, 2, 3]})
gdf2 = gdf.assign(y=gdf.x + 1)
assert list(gdf.columns) == ["x"]
assert list(gdf2.columns) == ["x", "y"]
np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000])
def test_dataframe_hash_columns(nrows):
gdf = cudf.DataFrame()
data = np.asarray(range(nrows))
data[0] = data[-1] # make first and last the same
gdf["a"] = data
gdf["b"] = gdf.a + 100
out = gdf.hash_columns(["a", "b"])
assert isinstance(out, cupy.ndarray)
assert len(out) == nrows
assert out.dtype == np.int32
# Check default
out_all = gdf.hash_columns()
np.testing.assert_array_equal(cupy.asnumpy(out), cupy.asnumpy(out_all))
# Check single column
out_one = cupy.asnumpy(gdf.hash_columns(["a"]))
# First matches last
assert out_one[0] == out_one[-1]
# Equivalent to the cudf.Series.hash_values()
np.testing.assert_array_equal(cupy.asnumpy(gdf.a.hash_values()), out_one)
@pytest.mark.parametrize("nrows", [3, 10, 100, 1000])
@pytest.mark.parametrize("nparts", [1, 2, 8, 13])
@pytest.mark.parametrize("nkeys", [1, 2])
def test_dataframe_hash_partition(nrows, nparts, nkeys):
np.random.seed(123)
gdf = cudf.DataFrame()
keycols = []
for i in range(nkeys):
keyname = "key{}".format(i)
gdf[keyname] = np.random.randint(0, 7 - i, nrows)
keycols.append(keyname)
gdf["val1"] = np.random.randint(0, nrows * 2, nrows)
got = gdf.partition_by_hash(keycols, nparts=nparts)
# Must return a list
assert isinstance(got, list)
# Must have correct number of partitions
assert len(got) == nparts
# All partitions must be DataFrame type
assert all(isinstance(p, cudf.DataFrame) for p in got)
# Check that all partitions have unique keys
part_unique_keys = set()
for p in got:
if len(p):
# Take rows of the keycolumns and build a set of the key-values
unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))
# Ensure that none of the key-values have occurred in other groups
assert not (unique_keys & part_unique_keys)
part_unique_keys |= unique_keys
assert len(part_unique_keys)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_value(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["val"] = gdf["val"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.key])
expected_value = row.key + 100 if valid else np.nan
got_value = row.val
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("nrows", [3, 10, 50])
def test_dataframe_hash_partition_masked_keys(nrows):
gdf = cudf.DataFrame()
gdf["key"] = np.arange(nrows)
gdf["val"] = np.arange(nrows) + 100
bitmask = utils.random_bitmask(nrows)
bytemask = utils.expand_bits_to_bytes(bitmask)
gdf["key"] = gdf["key"].set_mask(bitmask)
parted = gdf.partition_by_hash(["key"], nparts=3, keep_index=False)
# Verify that the valid mask is correct
for p in parted:
df = p.to_pandas()
for row in df.itertuples():
valid = bool(bytemask[row.val - 100])
# val is key + 100
expected_value = row.val - 100 if valid else np.nan
got_value = row.key
assert (expected_value == got_value) or (
np.isnan(expected_value) and np.isnan(got_value)
)
@pytest.mark.parametrize("keep_index", [True, False])
def test_dataframe_hash_partition_keep_index(keep_index):
gdf = cudf.DataFrame(
{"val": [1, 2, 3, 4], "key": [3, 2, 1, 4]}, index=[4, 3, 2, 1]
)
expected_df1 = cudf.DataFrame(
{"val": [1], "key": [3]}, index=[4] if keep_index else None
)
expected_df2 = cudf.DataFrame(
{"val": [2, 3, 4], "key": [2, 1, 4]},
index=[3, 2, 1] if keep_index else range(1, 4),
)
expected = [expected_df1, expected_df2]
parts = gdf.partition_by_hash(["key"], nparts=2, keep_index=keep_index)
for exp, got in zip(expected, parts):
assert_eq(exp, got)
def test_dataframe_hash_partition_empty():
gdf = cudf.DataFrame({"val": [1, 2], "key": [3, 2]}, index=["a", "b"])
parts = gdf.iloc[:0].partition_by_hash(["key"], nparts=3)
assert len(parts) == 3
for part in parts:
assert_eq(gdf.iloc[:0], part)
@pytest.mark.parametrize("dtype1", utils.supported_numpy_dtypes)
@pytest.mark.parametrize("dtype2", utils.supported_numpy_dtypes)
def test_dataframe_concat_different_numerical_columns(dtype1, dtype2):
df1 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype1)))
df2 = pd.DataFrame(dict(x=pd.Series(np.arange(5)).astype(dtype2)))
if dtype1 != dtype2 and "datetime" in dtype1 or "datetime" in dtype2:
with pytest.raises(TypeError):
cudf.concat([df1, df2])
else:
pres = pd.concat([df1, df2])
gres = cudf.concat([cudf.from_pandas(df1), cudf.from_pandas(df2)])
assert_eq(cudf.from_pandas(pres), gres)
def test_dataframe_concat_different_column_types():
df1 = cudf.Series([42], dtype=np.float64)
df2 = cudf.Series(["a"], dtype="category")
with pytest.raises(ValueError):
cudf.concat([df1, df2])
df2 = cudf.Series(["a string"])
with pytest.raises(TypeError):
cudf.concat([df1, df2])
@pytest.mark.parametrize(
"df_1", [cudf.DataFrame({"a": [1, 2], "b": [1, 3]}), cudf.DataFrame({})]
)
@pytest.mark.parametrize(
"df_2", [cudf.DataFrame({"a": [], "b": []}), cudf.DataFrame({})]
)
def test_concat_empty_dataframe(df_1, df_2):
got = cudf.concat([df_1, df_2])
expect = pd.concat([df_1.to_pandas(), df_2.to_pandas()], sort=False)
# ignoring dtypes as pandas upcasts int to float
# on concatenation with empty dataframes
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"df1_d",
[
{"a": [1, 2], "b": [1, 2], "c": ["s1", "s2"], "d": [1.0, 2.0]},
{"b": [1.9, 10.9], "c": ["s1", "s2"]},
{"c": ["s1"], "b": [None], "a": [False]},
],
)
@pytest.mark.parametrize(
"df2_d",
[
{"a": [1, 2, 3]},
{"a": [1, None, 3], "b": [True, True, False], "c": ["s3", None, "s4"]},
{"a": [], "b": []},
{},
],
)
def test_concat_different_column_dataframe(df1_d, df2_d):
got = cudf.concat(
[cudf.DataFrame(df1_d), cudf.DataFrame(df2_d), cudf.DataFrame(df1_d)],
sort=False,
)
expect = pd.concat(
[pd.DataFrame(df1_d), pd.DataFrame(df2_d), pd.DataFrame(df1_d)],
sort=False,
)
# numerical columns are upcasted to float in cudf.DataFrame.to_pandas()
# casts nan to 0 in non-float numerical columns
numeric_cols = got.dtypes[got.dtypes != "object"].index
for col in numeric_cols:
got[col] = got[col].astype(np.float64).fillna(np.nan)
assert_eq(got, expect, check_dtype=False)
@pytest.mark.parametrize(
"ser_1", [pd.Series([1, 2, 3]), pd.Series([], dtype="float64")]
)
@pytest.mark.parametrize("ser_2", [pd.Series([], dtype="float64")])
def test_concat_empty_series(ser_1, ser_2):
got = cudf.concat([cudf.Series(ser_1), cudf.Series(ser_2)])
expect = pd.concat([ser_1, ser_2])
assert_eq(got, expect)
def test_concat_with_axis():
df1 = pd.DataFrame(dict(x=np.arange(5), y=np.arange(5)))
df2 = pd.DataFrame(dict(a=np.arange(5), b=np.arange(5)))
concat_df = pd.concat([df1, df2], axis=1)
cdf1 = cudf.from_pandas(df1)
cdf2 = cudf.from_pandas(df2)
# concat only dataframes
concat_cdf = cudf.concat([cdf1, cdf2], axis=1)
assert_eq(concat_cdf, concat_df)
# concat only series
concat_s = pd.concat([df1.x, df1.y], axis=1)
cs1 = cudf.Series.from_pandas(df1.x)
cs2 = cudf.Series.from_pandas(df1.y)
concat_cdf_s = cudf.concat([cs1, cs2], axis=1)
assert_eq(concat_cdf_s, concat_s)
# concat series and dataframes
s3 = pd.Series(np.random.random(5))
cs3 = cudf.Series.from_pandas(s3)
concat_cdf_all = cudf.concat([cdf1, cs3, cdf2], axis=1)
concat_df_all = pd.concat([df1, s3, df2], axis=1)
assert_eq(concat_cdf_all, concat_df_all)
# concat manual multi index
midf1 = cudf.from_pandas(df1)
midf1.index = cudf.MultiIndex(
levels=[[0, 1, 2, 3], [0, 1]], codes=[[0, 1, 2, 3, 2], [0, 1, 0, 1, 0]]
)
midf2 = midf1[2:]
midf2.index = cudf.MultiIndex(
levels=[[3, 4, 5], [2, 0]], codes=[[0, 1, 2], [1, 0, 1]]
)
mipdf1 = midf1.to_pandas()
mipdf2 = midf2.to_pandas()
assert_eq(cudf.concat([midf1, midf2]), pd.concat([mipdf1, mipdf2]))
assert_eq(cudf.concat([midf2, midf1]), pd.concat([mipdf2, mipdf1]))
assert_eq(
cudf.concat([midf1, midf2, midf1]), pd.concat([mipdf1, mipdf2, mipdf1])
)
# concat groupby multi index
gdf1 = cudf.DataFrame(
{
"x": np.random.randint(0, 10, 10),
"y": np.random.randint(0, 10, 10),
"z": np.random.randint(0, 10, 10),
"v": np.random.randint(0, 10, 10),
}
)
gdf2 = gdf1[5:]
gdg1 = gdf1.groupby(["x", "y"]).min()
gdg2 = gdf2.groupby(["x", "y"]).min()
pdg1 = gdg1.to_pandas()
pdg2 = gdg2.to_pandas()
assert_eq(cudf.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]))
assert_eq(cudf.concat([gdg2, gdg1]), pd.concat([pdg2, pdg1]))
# series multi index concat
gdgz1 = gdg1.z
gdgz2 = gdg2.z
pdgz1 = gdgz1.to_pandas()
pdgz2 = gdgz2.to_pandas()
assert_eq(cudf.concat([gdgz1, gdgz2]), pd.concat([pdgz1, pdgz2]))
assert_eq(cudf.concat([gdgz2, gdgz1]), pd.concat([pdgz2, pdgz1]))
@pytest.mark.parametrize("nrows", [0, 3, 10, 100, 1000])
def test_nonmatching_index_setitem(nrows):
np.random.seed(0)
gdf = cudf.DataFrame()
gdf["a"] = np.random.randint(2147483647, size=nrows)
gdf["b"] = np.random.randint(2147483647, size=nrows)
gdf = gdf.set_index("b")
test_values = np.random.randint(2147483647, size=nrows)
gdf["c"] = test_values
assert len(test_values) == len(gdf["c"])
assert (
gdf["c"]
.to_pandas()
.equals(cudf.Series(test_values).set_index(gdf._index).to_pandas())
)
def test_from_pandas():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
gdf = cudf.DataFrame.from_pandas(df)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = df.x
gs = cudf.Series.from_pandas(s)
assert isinstance(gs, cudf.Series)
assert_eq(s, gs)
@pytest.mark.parametrize("dtypes", [int, float])
def test_from_records(dtypes):
h_ary = np.ndarray(shape=(10, 4), dtype=dtypes)
rec_ary = h_ary.view(np.recarray)
gdf = cudf.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
df = pd.DataFrame.from_records(rec_ary, columns=["a", "b", "c", "d"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame.from_records(rec_ary)
df = pd.DataFrame.from_records(rec_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
@pytest.mark.parametrize("columns", [None, ["first", "second", "third"]])
@pytest.mark.parametrize(
"index",
[
None,
["first", "second"],
"name",
"age",
"weight",
[10, 11],
["abc", "xyz"],
],
)
def test_from_records_index(columns, index):
rec_ary = np.array(
[("Rex", 9, 81.0), ("Fido", 3, 27.0)],
dtype=[("name", "U10"), ("age", "i4"), ("weight", "f4")],
)
gdf = cudf.DataFrame.from_records(rec_ary, columns=columns, index=index)
df = pd.DataFrame.from_records(rec_ary, columns=columns, index=index)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_construction_from_cupy_arrays():
h_ary = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
d_ary = cupy.asarray(h_ary)
gdf = cudf.DataFrame(d_ary, columns=["a", "b", "c"])
df = pd.DataFrame(h_ary, columns=["a", "b", "c"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
df = pd.DataFrame(h_ary)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary, index=["a", "b"])
df = pd.DataFrame(h_ary, index=["a", "b"])
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=0, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=0, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
gdf = cudf.DataFrame(d_ary)
gdf = gdf.set_index(keys=1, drop=False)
df = pd.DataFrame(h_ary)
df = df.set_index(keys=1, drop=False)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
def test_dataframe_cupy_wrong_dimensions():
d_ary = cupy.empty((2, 3, 4), dtype=np.int32)
with pytest.raises(
ValueError, match="records dimension expected 1 or 2 but found: 3"
):
cudf.DataFrame(d_ary)
def test_dataframe_cupy_array_wrong_index():
d_ary = cupy.empty((2, 3), dtype=np.int32)
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index=["a"])
with pytest.raises(
ValueError,
match="Length mismatch: Expected axis has 2 elements, "
"new values have 1 elements",
):
cudf.DataFrame(d_ary, index="a")
def test_index_in_dataframe_constructor():
a = pd.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
b = cudf.DataFrame({"x": [1, 2, 3]}, index=[4.0, 5.0, 6.0])
assert_eq(a, b)
assert_eq(a.loc[4:], b.loc[4:])
dtypes = NUMERIC_TYPES + DATETIME_TYPES + ["bool"]
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
padf = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
gdf = cudf.DataFrame.from_arrow(padf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(df, gdf)
s = pa.Array.from_pandas(df.a)
gs = cudf.Series.from_arrow(s)
assert isinstance(gs, cudf.Series)
# For some reason PyArrow to_pandas() converts to numpy array and has
# better type compatibility
np.testing.assert_array_equal(s.to_pandas(), gs.to_array())
@pytest.mark.parametrize("nelem", [0, 2, 3, 100, 1000])
@pytest.mark.parametrize("data_type", dtypes)
def test_to_arrow(nelem, data_type):
df = pd.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
pa_i = pa.Array.from_pandas(df.index)
pa_gi = gdf.index.to_arrow()
assert isinstance(pa_gi, pa.Array)
assert pa.Array.equals(pa_i, pa_gi)
@pytest.mark.parametrize("data_type", dtypes)
def test_to_from_arrow_nulls(data_type):
if data_type == "longlong":
data_type = "int64"
if data_type == "bool":
s1 = pa.array([True, None, False, None, True], type=data_type)
else:
dtype = np.dtype(data_type)
if dtype.type == np.datetime64:
time_unit, _ = np.datetime_data(dtype)
data_type = pa.timestamp(unit=time_unit)
s1 = pa.array([1, None, 3, None, 5], type=data_type)
gs1 = cudf.Series.from_arrow(s1)
assert isinstance(gs1, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s1.buffers()[0]).view("u1")[0],
gs1._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s1, gs1.to_arrow())
s2 = pa.array([None, None, None, None, None], type=data_type)
gs2 = cudf.Series.from_arrow(s2)
assert isinstance(gs2, cudf.Series)
# We have 64B padded buffers for nulls whereas Arrow returns a minimal
# number of bytes, so only check the first byte in this case
np.testing.assert_array_equal(
np.asarray(s2.buffers()[0]).view("u1")[0],
gs2._column.mask_array_view.copy_to_host().view("u1")[0],
)
assert pa.Array.equals(s2, gs2.to_arrow())
def test_to_arrow_categorical():
df = pd.DataFrame()
df["a"] = pd.Series(["a", "b", "c"], dtype="category")
gdf = cudf.DataFrame.from_pandas(df)
pa_df = pa.Table.from_pandas(
df, preserve_index=False
).replace_schema_metadata(None)
pa_gdf = gdf.to_arrow(preserve_index=False).replace_schema_metadata(None)
assert isinstance(pa_gdf, pa.Table)
assert pa.Table.equals(pa_df, pa_gdf)
pa_s = pa.Array.from_pandas(df.a)
pa_gs = gdf["a"].to_arrow()
assert isinstance(pa_gs, pa.Array)
assert pa.Array.equals(pa_s, pa_gs)
def test_from_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert_eq(
pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical
gd_cat.to_pandas(),
)
def test_to_arrow_missing_categorical():
pd_cat = pd.Categorical(["a", "b", "c"], categories=["a", "b"])
pa_cat = pa.array(pd_cat, from_pandas=True)
gd_cat = cudf.Series(pa_cat)
assert isinstance(gd_cat, cudf.Series)
assert pa.Array.equals(pa_cat, gd_cat.to_arrow())
@pytest.mark.parametrize("data_type", dtypes)
def test_from_scalar_typing(data_type):
if data_type == "datetime64[ms]":
scalar = (
np.dtype("int64")
.type(np.random.randint(0, 5))
.astype("datetime64[ms]")
)
elif data_type.startswith("datetime64"):
scalar = np.datetime64(datetime.date.today()).astype("datetime64[ms]")
data_type = "datetime64[ms]"
else:
scalar = np.dtype(data_type).type(np.random.randint(0, 5))
gdf = cudf.DataFrame()
gdf["a"] = [1, 2, 3, 4, 5]
gdf["b"] = scalar
assert gdf["b"].dtype == np.dtype(data_type)
assert len(gdf["b"]) == len(gdf["a"])
@pytest.mark.parametrize("data_type", NUMERIC_TYPES)
def test_from_python_array(data_type):
np_arr = np.random.randint(0, 100, 10).astype(data_type)
data = memoryview(np_arr)
data = arr.array(data.format, data)
gs = cudf.Series(data)
np.testing.assert_equal(gs.to_array(), np_arr)
def test_series_shape():
ps = pd.Series([1, 2, 3, 4])
cs = cudf.Series([1, 2, 3, 4])
assert ps.shape == cs.shape
def test_series_shape_empty():
ps = pd.Series(dtype="float64")
cs = cudf.Series([])
assert ps.shape == cs.shape
def test_dataframe_shape():
pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.shape == gdf.shape
def test_dataframe_shape_empty():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
assert pdf.shape == gdf.shape
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("nulls", ["none", "some", "all"])
def test_dataframe_transpose(nulls, num_cols, num_rows, dtype):
pdf = pd.DataFrame()
null_rep = np.nan if dtype in ["float32", "float64"] else None
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(np.random.randint(0, 26, num_rows).astype(dtype))
if nulls == "some":
idx = np.random.choice(
num_rows, size=int(num_rows / 2), replace=False
)
data[idx] = null_rep
elif nulls == "all":
data[:] = null_rep
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function)
assert_eq(expect, got_property)
@pytest.mark.parametrize("num_cols", [1, 2, 10])
@pytest.mark.parametrize("num_rows", [1, 2, 20])
def test_dataframe_transpose_category(num_cols, num_rows):
pdf = pd.DataFrame()
for i in range(num_cols):
colname = string.ascii_lowercase[i]
data = pd.Series(list(string.ascii_lowercase), dtype="category")
data = data.sample(num_rows, replace=True).reset_index(drop=True)
pdf[colname] = data
gdf = cudf.DataFrame.from_pandas(pdf)
got_function = gdf.transpose()
got_property = gdf.T
expect = pdf.transpose()
assert_eq(expect, got_function.to_pandas())
assert_eq(expect, got_property.to_pandas())
def test_generated_column():
gdf = cudf.DataFrame({"a": (i for i in range(5))})
assert len(gdf) == 5
@pytest.fixture
def pdf():
return pd.DataFrame({"x": range(10), "y": range(10)})
@pytest.fixture
def gdf(pdf):
return cudf.DataFrame.from_pandas(pdf)
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize(
"func",
[
lambda df, **kwargs: df.min(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.product(**kwargs),
lambda df, **kwargs: df.cummin(**kwargs),
lambda df, **kwargs: df.cummax(**kwargs),
lambda df, **kwargs: df.cumsum(**kwargs),
lambda df, **kwargs: df.cumprod(**kwargs),
lambda df, **kwargs: df.mean(**kwargs),
lambda df, **kwargs: df.sum(**kwargs),
lambda df, **kwargs: df.max(**kwargs),
lambda df, **kwargs: df.std(ddof=1, **kwargs),
lambda df, **kwargs: df.var(ddof=1, **kwargs),
lambda df, **kwargs: df.std(ddof=2, **kwargs),
lambda df, **kwargs: df.var(ddof=2, **kwargs),
lambda df, **kwargs: df.kurt(**kwargs),
lambda df, **kwargs: df.skew(**kwargs),
lambda df, **kwargs: df.all(**kwargs),
lambda df, **kwargs: df.any(**kwargs),
],
)
@pytest.mark.parametrize("skipna", [True, False, None])
def test_dataframe_reductions(data, func, skipna):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf, skipna=skipna), func(gdf, skipna=skipna))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("func", [lambda df: df.count()])
def test_dataframe_count_reduction(data, func):
pdf = pd.DataFrame(data=data)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(func(pdf), func(gdf))
@pytest.mark.parametrize(
"data",
[
{"x": [np.nan, 2, 3, 4, 100, np.nan], "y": [4, 5, 6, 88, 99, np.nan]},
{"x": [1, 2, 3], "y": [4, 5, 6]},
{"x": [np.nan, np.nan, np.nan], "y": [np.nan, np.nan, np.nan]},
{"x": [], "y": []},
{"x": []},
],
)
@pytest.mark.parametrize("ops", ["sum", "product", "prod"])
@pytest.mark.parametrize("skipna", [True, False, None])
@pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 10])
def test_dataframe_min_count_ops(data, ops, skipna, min_count):
psr = pd.DataFrame(data)
gsr = cudf.DataFrame(data)
if PANDAS_GE_120 and psr.shape[0] * psr.shape[1] < min_count:
pytest.xfail("https://github.com/pandas-dev/pandas/issues/39738")
assert_eq(
getattr(psr, ops)(skipna=skipna, min_count=min_count),
getattr(gsr, ops)(skipna=skipna, min_count=min_count),
check_dtype=False,
)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_df(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf, pdf)
g = binop(gdf, gdf)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_df(pdf, gdf, binop):
d = binop(pdf, pdf + 1)
g = binop(gdf, gdf + 1)
assert_eq(d, g)
@pytest.mark.parametrize(
"binop",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_binops_series(pdf, gdf, binop):
pdf = pdf + 1.0
gdf = gdf + 1.0
d = binop(pdf.x, pdf.y)
g = binop(gdf.x, gdf.y)
assert_eq(d, g)
@pytest.mark.parametrize("binop", [operator.and_, operator.or_, operator.xor])
def test_bitwise_binops_series(pdf, gdf, binop):
d = binop(pdf.x, pdf.y + 1)
g = binop(gdf.x, gdf.y + 1)
assert_eq(d, g)
@pytest.mark.parametrize("unaryop", [operator.neg, operator.inv, operator.abs])
def test_unaryops_df(pdf, gdf, unaryop):
d = unaryop(pdf - 5)
g = unaryop(gdf - 5)
assert_eq(d, g)
@pytest.mark.parametrize(
"func",
[
lambda df: df.empty,
lambda df: df.x.empty,
lambda df: df.x.fillna(123, limit=None, method=None, axis=None),
lambda df: df.drop("x", axis=1, errors="raise"),
],
)
def test_unary_operators(func, pdf, gdf):
p = func(pdf)
g = func(gdf)
assert_eq(p, g)
def test_is_monotonic(gdf):
pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
gdf = cudf.DataFrame.from_pandas(pdf)
assert not gdf.index.is_monotonic
assert not gdf.index.is_monotonic_increasing
assert not gdf.index.is_monotonic_decreasing
def test_iter(pdf, gdf):
assert list(pdf) == list(gdf)
def test_iteritems(gdf):
for k, v in gdf.iteritems():
assert k in gdf.columns
assert isinstance(v, cudf.Series)
assert_eq(v, gdf[k])
@pytest.mark.parametrize("q", [0.5, 1, 0.001, [0.5], [], [0.005, 0.5, 1]])
@pytest.mark.parametrize("numeric_only", [True, False])
def test_quantile(q, numeric_only):
ts = pd.date_range("2018-08-24", periods=5, freq="D")
td = pd.to_timedelta(np.arange(5), unit="h")
pdf = pd.DataFrame(
{"date": ts, "delta": td, "val": np.random.randn(len(ts))}
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf["date"].quantile(q), gdf["date"].quantile(q))
assert_eq(pdf["delta"].quantile(q), gdf["delta"].quantile(q))
assert_eq(pdf["val"].quantile(q), gdf["val"].quantile(q))
if numeric_only:
assert_eq(pdf.quantile(q), gdf.quantile(q))
else:
q = q if isinstance(q, list) else [q]
assert_eq(
pdf.quantile(
q if isinstance(q, list) else [q], numeric_only=False
),
gdf.quantile(q, numeric_only=False),
)
def test_empty_quantile():
pdf = pd.DataFrame({"x": []})
df = cudf.DataFrame({"x": []})
actual = df.quantile()
expected = pdf.quantile()
assert_eq(actual, expected)
def test_from_pandas_function(pdf):
gdf = cudf.from_pandas(pdf)
assert isinstance(gdf, cudf.DataFrame)
assert_eq(pdf, gdf)
gdf = cudf.from_pandas(pdf.x)
assert isinstance(gdf, cudf.Series)
assert_eq(pdf.x, gdf)
with pytest.raises(TypeError):
cudf.from_pandas(123)
@pytest.mark.parametrize("preserve_index", [True, False])
def test_arrow_pandas_compat(pdf, gdf, preserve_index):
pdf["z"] = range(10)
pdf = pdf.set_index("z")
gdf["z"] = range(10)
gdf = gdf.set_index("z")
pdf_arrow_table = pa.Table.from_pandas(pdf, preserve_index=preserve_index)
gdf_arrow_table = gdf.to_arrow(preserve_index=preserve_index)
assert pa.Table.equals(pdf_arrow_table, gdf_arrow_table)
gdf2 = cudf.DataFrame.from_arrow(pdf_arrow_table)
pdf2 = pdf_arrow_table.to_pandas()
assert_eq(pdf2, gdf2)
@pytest.mark.parametrize("nrows", [1, 8, 100, 1000, 100000])
def test_series_hash_encode(nrows):
data = np.asarray(range(nrows))
# Python hash returns different value which sometimes
# results in enc_with_name_arr and enc_arr to be same.
# And there is no other better way to make hash return same value.
# So using an integer name to get constant value back from hash.
s = cudf.Series(data, name=1)
num_features = 1000
encoded_series = s.hash_encode(num_features)
assert isinstance(encoded_series, cudf.Series)
enc_arr = encoded_series.to_array()
assert np.all(enc_arr >= 0)
assert np.max(enc_arr) < num_features
enc_with_name_arr = s.hash_encode(num_features, use_name=True).to_array()
assert enc_with_name_arr[0] != enc_arr[0]
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
def test_cuda_array_interface(dtype):
np_data = np.arange(10).astype(dtype)
cupy_data = cupy.array(np_data)
pd_data = pd.Series(np_data)
cudf_data = cudf.Series(cupy_data)
assert_eq(pd_data, cudf_data)
gdf = cudf.DataFrame()
gdf["test"] = cupy_data
pd_data.name = "test"
assert_eq(pd_data, gdf["test"])
@pytest.mark.parametrize("nelem", [0, 2, 3, 100])
@pytest.mark.parametrize("nchunks", [1, 2, 5, 10])
@pytest.mark.parametrize("data_type", dtypes)
def test_from_arrow_chunked_arrays(nelem, nchunks, data_type):
np_list_data = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array = pa.chunked_array(np_list_data)
expect = pd.Series(pa_chunk_array.to_pandas())
got = cudf.Series(pa_chunk_array)
assert_eq(expect, got)
np_list_data2 = [
np.random.randint(0, 100, nelem).astype(data_type)
for i in range(nchunks)
]
pa_chunk_array2 = pa.chunked_array(np_list_data2)
pa_table = pa.Table.from_arrays(
[pa_chunk_array, pa_chunk_array2], names=["a", "b"]
)
expect = pa_table.to_pandas()
got = cudf.DataFrame.from_arrow(pa_table)
assert_eq(expect, got)
@pytest.mark.skip(reason="Test was designed to be run in isolation")
def test_gpu_memory_usage_with_boolmask():
ctx = cuda.current_context()
def query_GPU_memory(note=""):
memInfo = ctx.get_memory_info()
usedMemoryGB = (memInfo.total - memInfo.free) / 1e9
return usedMemoryGB
cuda.current_context().deallocations.clear()
nRows = int(1e8)
nCols = 2
dataNumpy = np.asfortranarray(np.random.rand(nRows, nCols))
colNames = ["col" + str(iCol) for iCol in range(nCols)]
pandasDF = pd.DataFrame(data=dataNumpy, columns=colNames, dtype=np.float32)
cudaDF = cudf.core.DataFrame.from_pandas(pandasDF)
boolmask = cudf.Series(np.random.randint(1, 2, len(cudaDF)).astype("bool"))
memory_used = query_GPU_memory()
cudaDF = cudaDF[boolmask]
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col0"].index._values.data_array_view.device_ctypes_pointer
)
assert (
cudaDF.index._values.data_array_view.device_ctypes_pointer
== cudaDF["col1"].index._values.data_array_view.device_ctypes_pointer
)
assert memory_used == query_GPU_memory()
def test_boolmask(pdf, gdf):
boolmask = np.random.randint(0, 2, len(pdf)) > 0
gdf = gdf[boolmask]
pdf = pdf[boolmask]
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"mask_shape",
[
(2, "ab"),
(2, "abc"),
(3, "ab"),
(3, "abc"),
(3, "abcd"),
(4, "abc"),
(4, "abcd"),
],
)
def test_dataframe_boolmask(mask_shape):
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.random.randint(0, 10, 3)
pdf_mask = pd.DataFrame()
for col in mask_shape[1]:
pdf_mask[col] = np.random.randint(0, 2, mask_shape[0]) > 0
gdf = cudf.DataFrame.from_pandas(pdf)
gdf_mask = cudf.DataFrame.from_pandas(pdf_mask)
gdf = gdf[gdf_mask]
pdf = pdf[pdf_mask]
assert np.array_equal(gdf.columns, pdf.columns)
for col in gdf.columns:
assert np.array_equal(
gdf[col].fillna(-1).to_pandas().values, pdf[col].fillna(-1).values
)
@pytest.mark.parametrize(
"mask",
[
[True, False, True],
pytest.param(
cudf.Series([True, False, True]),
marks=pytest.mark.xfail(
reason="Pandas can't index a multiindex with a Series"
),
),
],
)
def test_dataframe_multiindex_boolmask(mask):
gdf = cudf.DataFrame(
{"w": [3, 2, 1], "x": [1, 2, 3], "y": [0, 1, 0], "z": [1, 1, 1]}
)
gdg = gdf.groupby(["w", "x"]).count()
pdg = gdg.to_pandas()
assert_eq(gdg[mask], pdg[mask])
def test_dataframe_assignment():
pdf = pd.DataFrame()
for col in "abc":
pdf[col] = np.array([0, 1, 1, -2, 10])
gdf = cudf.DataFrame.from_pandas(pdf)
gdf[gdf < 0] = 999
pdf[pdf < 0] = 999
assert_eq(gdf, pdf)
def test_1row_arrow_table():
data = [pa.array([0]), pa.array([1])]
batch = pa.RecordBatch.from_arrays(data, ["f0", "f1"])
table = pa.Table.from_batches([batch])
expect = table.to_pandas()
got = cudf.DataFrame.from_arrow(table)
assert_eq(expect, got)
def test_arrow_handle_no_index_name(pdf, gdf):
gdf_arrow = gdf.to_arrow()
pdf_arrow = pa.Table.from_pandas(pdf)
assert pa.Table.equals(pdf_arrow, gdf_arrow)
got = cudf.DataFrame.from_arrow(gdf_arrow)
expect = pdf_arrow.to_pandas()
assert_eq(expect, got)
@pytest.mark.parametrize("num_rows", [1, 3, 10, 100])
@pytest.mark.parametrize("num_bins", [1, 2, 4, 20])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"])
@pytest.mark.parametrize("series_bins", [True, False])
def test_series_digitize(num_rows, num_bins, right, dtype, series_bins):
data = np.random.randint(0, 100, num_rows).astype(dtype)
bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype)))
s = cudf.Series(data)
if series_bins:
s_bins = cudf.Series(bins)
indices = s.digitize(s_bins, right)
else:
indices = s.digitize(bins, right)
np.testing.assert_array_equal(
np.digitize(data, bins, right), indices.to_array()
)
def test_series_digitize_invalid_bins():
s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32")
bins = cudf.Series([2, None, None, 50, 90], dtype="int32")
with pytest.raises(
ValueError, match="`bins` cannot contain null entries."
):
_ = s.digitize(bins)
def test_pandas_non_contiguious():
arr1 = np.random.sample([5000, 10])
assert arr1.flags["C_CONTIGUOUS"] is True
df = pd.DataFrame(arr1)
for col in df.columns:
assert df[col].values.flags["C_CONTIGUOUS"] is False
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.to_pandas(), df)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
@pytest.mark.parametrize("null_type", [np.nan, None, "mixed"])
def test_series_all_null(num_elements, null_type):
if null_type == "mixed":
data = []
data1 = [np.nan] * int(num_elements / 2)
data2 = [None] * int(num_elements / 2)
for idx in range(len(data1)):
data.append(data1[idx])
data.append(data2[idx])
else:
data = [null_type] * num_elements
# Typecast Pandas because None will return `object` dtype
expect = pd.Series(data, dtype="float64")
got = cudf.Series(data)
assert_eq(expect, got)
@pytest.mark.parametrize("num_elements", [0, 2, 10, 100])
def test_series_all_valid_nan(num_elements):
data = [np.nan] * num_elements
sr = cudf.Series(data, nan_as_null=False)
np.testing.assert_equal(sr.null_count, 0)
def test_series_rename():
pds = pd.Series([1, 2, 3], name="asdf")
gds = cudf.Series([1, 2, 3], name="asdf")
expect = pds.rename("new_name")
got = gds.rename("new_name")
assert_eq(expect, got)
pds = pd.Series(expect)
gds = cudf.Series(got)
assert_eq(pds, gds)
pds = pd.Series(expect, name="name name")
gds = cudf.Series(got, name="name name")
assert_eq(pds, gds)
@pytest.mark.parametrize("data_type", dtypes)
@pytest.mark.parametrize("nelem", [0, 100])
def test_head_tail(nelem, data_type):
def check_index_equality(left, right):
assert left.index.equals(right.index)
def check_values_equality(left, right):
if len(left) == 0 and len(right) == 0:
return None
np.testing.assert_array_equal(left.to_pandas(), right.to_pandas())
def check_frame_series_equality(left, right):
check_index_equality(left, right)
check_values_equality(left, right)
gdf = cudf.DataFrame(
{
"a": np.random.randint(0, 1000, nelem).astype(data_type),
"b": np.random.randint(0, 1000, nelem).astype(data_type),
}
)
check_frame_series_equality(gdf.head(), gdf[:5])
check_frame_series_equality(gdf.head(3), gdf[:3])
check_frame_series_equality(gdf.head(-2), gdf[:-2])
check_frame_series_equality(gdf.head(0), gdf[0:0])
check_frame_series_equality(gdf["a"].head(), gdf["a"][:5])
check_frame_series_equality(gdf["a"].head(3), gdf["a"][:3])
check_frame_series_equality(gdf["a"].head(-2), gdf["a"][:-2])
check_frame_series_equality(gdf.tail(), gdf[-5:])
check_frame_series_equality(gdf.tail(3), gdf[-3:])
check_frame_series_equality(gdf.tail(-2), gdf[2:])
check_frame_series_equality(gdf.tail(0), gdf[0:0])
check_frame_series_equality(gdf["a"].tail(), gdf["a"][-5:])
check_frame_series_equality(gdf["a"].tail(3), gdf["a"][-3:])
check_frame_series_equality(gdf["a"].tail(-2), gdf["a"][2:])
def test_tail_for_string():
gdf = cudf.DataFrame()
gdf["id"] = cudf.Series(["a", "b"], dtype=np.object_)
gdf["v"] = cudf.Series([1, 2])
assert_eq(gdf.tail(3), gdf.to_pandas().tail(3))
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index(pdf, gdf, drop):
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_named_index(pdf, gdf, drop):
pdf.index.name = "cudf"
gdf.index.name = "cudf"
assert_eq(
pdf.reset_index(drop=drop, inplace=False),
gdf.reset_index(drop=drop, inplace=False),
)
assert_eq(
pdf.x.reset_index(drop=drop, inplace=False),
gdf.x.reset_index(drop=drop, inplace=False),
)
@pytest.mark.parametrize("drop", [True, False])
def test_reset_index_inplace(pdf, gdf, drop):
pdf.reset_index(drop=drop, inplace=True)
gdf.reset_index(drop=drop, inplace=True)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 2, 3, 4, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize(
"index",
[
"a",
["a", "b"],
pd.CategoricalIndex(["I", "II", "III", "IV", "V"]),
pd.Series(["h", "i", "k", "l", "m"]),
["b", pd.Index(["I", "II", "III", "IV", "V"])],
["c", [11, 12, 13, 14, 15]],
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5), # corner case
[pd.Series(["h", "i", "k", "l", "m"]), pd.RangeIndex(0, 5)],
[
pd.MultiIndex(
levels=[
["I", "II", "III", "IV", "V"],
["one", "two", "three", "four", "five"],
],
codes=[[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]],
names=["col1", "col2"],
),
pd.RangeIndex(0, 5),
],
],
)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_set_index(data, index, drop, append, inplace):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
expected = pdf.set_index(index, inplace=inplace, drop=drop, append=append)
actual = gdf.set_index(index, inplace=inplace, drop=drop, append=append)
if inplace:
expected = pdf
actual = gdf
assert_eq(expected, actual)
@pytest.mark.parametrize(
"data",
[
{
"a": [1, 1, 2, 2, 5],
"b": ["a", "b", "c", "d", "e"],
"c": [1.0, 2.0, 3.0, 4.0, 5.0],
}
],
)
@pytest.mark.parametrize("index", ["a", pd.Index([1, 1, 2, 2, 3])])
@pytest.mark.parametrize("verify_integrity", [True])
@pytest.mark.xfail
def test_set_index_verify_integrity(data, index, verify_integrity):
gdf = cudf.DataFrame(data)
gdf.set_index(index, verify_integrity=verify_integrity)
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("nelem", [10, 200, 1333])
def test_set_index_multi(drop, nelem):
np.random.seed(0)
a = np.arange(nelem)
np.random.shuffle(a)
df = pd.DataFrame(
{
"a": a,
"b": np.random.randint(0, 4, size=nelem),
"c": np.random.uniform(low=0, high=4, size=nelem),
"d": np.random.choice(["green", "black", "white"], nelem),
}
)
df["e"] = df["d"].astype("category")
gdf = cudf.DataFrame.from_pandas(df)
assert_eq(gdf.set_index("a", drop=drop), gdf.set_index(["a"], drop=drop))
assert_eq(
df.set_index(["b", "c"], drop=drop),
gdf.set_index(["b", "c"], drop=drop),
)
assert_eq(
df.set_index(["d", "b"], drop=drop),
gdf.set_index(["d", "b"], drop=drop),
)
assert_eq(
df.set_index(["b", "d", "e"], drop=drop),
gdf.set_index(["b", "d", "e"], drop=drop),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_0(copy):
# TODO (ptaylor): pandas changes `int` dtype to `float64`
# when reindexing and filling new label indices with NaN
gdf = cudf.datasets.randomdata(
nrows=6,
dtypes={
"a": "category",
# 'b': int,
"c": float,
"d": str,
},
)
pdf = gdf.to_pandas()
# Validate reindex returns a copy unmodified
assert_eq(pdf.reindex(copy=True), gdf.reindex(copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_1(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis defaults to 0
assert_eq(pdf.reindex(index, copy=True), gdf.reindex(index, copy=copy))
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_2(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(index, axis=0, copy=True),
gdf.reindex(index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_3(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=0
assert_eq(
pdf.reindex(columns, axis=1, copy=True),
gdf.reindex(columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_4(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis=0
assert_eq(
pdf.reindex(labels=index, axis=0, copy=True),
gdf.reindex(labels=index, axis=0, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_5(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis=1
assert_eq(
pdf.reindex(labels=columns, axis=1, copy=True),
gdf.reindex(labels=columns, axis=1, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_6(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as index when axis='index'
assert_eq(
pdf.reindex(labels=index, axis="index", copy=True),
gdf.reindex(labels=index, axis="index", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_7(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate labels are used as columns when axis='columns'
assert_eq(
pdf.reindex(labels=columns, axis="columns", copy=True),
gdf.reindex(labels=columns, axis="columns", copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_8(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes labels when index=labels
assert_eq(
pdf.reindex(index=index, copy=True),
gdf.reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_9(copy):
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes column names when columns=labels
assert_eq(
pdf.reindex(columns=columns, copy=True),
gdf.reindex(columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_10(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_dataframe_reindex_change_dtype(copy):
if PANDAS_GE_110:
kwargs = {"check_freq": False}
else:
kwargs = {}
index = pd.date_range("12/29/2009", periods=10, freq="D")
columns = ["a", "b", "c", "d", "e"]
gdf = cudf.datasets.randomdata(
nrows=6, dtypes={"a": "category", "c": float, "d": str}
)
pdf = gdf.to_pandas()
# Validate reindexes both labels and column names when
# index=index_labels and columns=column_labels
assert_eq(
pdf.reindex(index=index, columns=columns, copy=True),
gdf.reindex(index=index, columns=columns, copy=copy),
**kwargs,
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_categorical_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"a": "category"})
pdf = gdf.to_pandas()
assert_eq(pdf["a"].reindex(copy=True), gdf["a"].reindex(copy=copy))
assert_eq(
pdf["a"].reindex(index, copy=True), gdf["a"].reindex(index, copy=copy)
)
assert_eq(
pdf["a"].reindex(index=index, copy=True),
gdf["a"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_float_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"c": float})
pdf = gdf.to_pandas()
assert_eq(pdf["c"].reindex(copy=True), gdf["c"].reindex(copy=copy))
assert_eq(
pdf["c"].reindex(index, copy=True), gdf["c"].reindex(index, copy=copy)
)
assert_eq(
pdf["c"].reindex(index=index, copy=True),
gdf["c"].reindex(index=index, copy=copy),
)
@pytest.mark.parametrize("copy", [True, False])
def test_series_string_reindex(copy):
index = [-3, 0, 3, 0, -2, 1, 3, 4, 6]
gdf = cudf.datasets.randomdata(nrows=6, dtypes={"d": str})
pdf = gdf.to_pandas()
assert_eq(pdf["d"].reindex(copy=True), gdf["d"].reindex(copy=copy))
assert_eq(
pdf["d"].reindex(index, copy=True), gdf["d"].reindex(index, copy=copy)
)
assert_eq(
pdf["d"].reindex(index=index, copy=True),
gdf["d"].reindex(index=index, copy=copy),
)
def test_to_frame(pdf, gdf):
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = "foo"
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(pdf.x.to_frame(), gdf.x.to_frame())
name = False
gdf_new_name = gdf.x.to_frame(name=name)
pdf_new_name = pdf.x.to_frame(name=name)
assert_eq(gdf_new_name, pdf_new_name)
assert gdf_new_name.columns[0] is name
def test_dataframe_empty_sort_index():
pdf = pd.DataFrame({"x": []})
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf.sort_index()
got = gdf.sort_index()
assert_eq(expect, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_sort_index(
axis, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{"b": [1, 3, 2], "a": [1, 4, 3], "c": [4, 1, 5]},
index=[3.0, 1.0, np.nan],
)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
got = gdf.sort_index(
axis=axis,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
assert_eq(pdf, gdf)
else:
assert_eq(expected, got)
@pytest.mark.parametrize("axis", [0, 1, "index", "columns"])
@pytest.mark.parametrize(
"level",
[
0,
"b",
1,
["b"],
"a",
["a", "b"],
["b", "a"],
[0, 1],
[1, 0],
[0, 2],
None,
],
)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("ignore_index", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_dataframe_mulitindex_sort_index(
axis, level, ascending, inplace, ignore_index, na_position
):
pdf = pd.DataFrame(
{
"b": [1.0, 3.0, np.nan],
"a": [1, 4, 3],
1: ["a", "b", "c"],
"e": [3, 1, 4],
"d": [1, 2, 8],
}
).set_index(["b", "a", 1])
gdf = cudf.DataFrame.from_pandas(pdf)
# ignore_index is supported in v.1.0
expected = pdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
na_position=na_position,
)
if ignore_index is True:
expected = expected
got = gdf.sort_index(
axis=axis,
level=level,
ascending=ascending,
ignore_index=ignore_index,
inplace=inplace,
na_position=na_position,
)
if inplace is True:
if ignore_index is True:
pdf = pdf.reset_index(drop=True)
assert_eq(pdf, gdf)
else:
if ignore_index is True:
expected = expected.reset_index(drop=True)
assert_eq(expected, got)
@pytest.mark.parametrize("dtype", dtypes + ["category"])
def test_dataframe_0_row_dtype(dtype):
if dtype == "category":
data = pd.Series(["a", "b", "c", "d", "e"], dtype="category")
else:
data = np.array([1, 2, 3, 4, 5], dtype=dtype)
expect = cudf.DataFrame()
expect["x"] = data
expect["y"] = data
got = expect.head(0)
for col_name in got.columns:
assert expect[col_name].dtype == got[col_name].dtype
expect = cudf.Series(data)
got = expect.head(0)
assert expect.dtype == got.dtype
@pytest.mark.parametrize("nan_as_null", [True, False])
def test_series_list_nanasnull(nan_as_null):
data = [1.0, 2.0, 3.0, np.nan, None]
expect = pa.array(data, from_pandas=nan_as_null)
got = cudf.Series(data, nan_as_null=nan_as_null).to_arrow()
# Bug in Arrow 0.14.1 where NaNs aren't handled
expect = expect.cast("int64", safe=False)
got = got.cast("int64", safe=False)
assert pa.Array.equals(expect, got)
def test_column_assignment():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float}
)
new_cols = ["q", "r", "s"]
gdf.columns = new_cols
assert list(gdf.columns) == new_cols
def test_select_dtype():
gdf = cudf.datasets.randomdata(
nrows=20, dtypes={"a": "category", "b": int, "c": float, "d": str}
)
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("float64"), gdf.select_dtypes("float64"))
assert_eq(pdf.select_dtypes(np.float64), gdf.select_dtypes(np.float64))
assert_eq(
pdf.select_dtypes(include=["float64"]),
gdf.select_dtypes(include=["float64"]),
)
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["int64", "float64"]),
gdf.select_dtypes(include=["int64", "float64"]),
)
assert_eq(
pdf.select_dtypes(include=np.number),
gdf.select_dtypes(include=np.number),
)
assert_eq(
pdf.select_dtypes(include=[np.int64, np.float64]),
gdf.select_dtypes(include=[np.int64, np.float64]),
)
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(exclude=np.number),
gdf.select_dtypes(exclude=np.number),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
rfunc_args_and_kwargs=([], {"includes": ["Foo"]}),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes,
rfunc=gdf.select_dtypes,
lfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
rfunc_args_and_kwargs=(
[],
{"exclude": np.number, "include": np.number},
),
)
gdf = cudf.DataFrame(
{"A": [3, 4, 5], "C": [1, 2, 3], "D": ["a", "b", "c"]}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["object", "int", "category"]),
gdf.select_dtypes(include=["object", "int", "category"]),
)
assert_eq(
pdf.select_dtypes(include=["object"], exclude=["category"]),
gdf.select_dtypes(include=["object"], exclude=["category"]),
)
gdf = cudf.DataFrame({"a": range(10), "b": range(10, 20)})
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(include=["category"]),
gdf.select_dtypes(include=["category"]),
)
assert_eq(
pdf.select_dtypes(include=["float"]),
gdf.select_dtypes(include=["float"]),
)
assert_eq(
pdf.select_dtypes(include=["object"]),
gdf.select_dtypes(include=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"]), gdf.select_dtypes(include=["int"])
)
assert_eq(
pdf.select_dtypes(exclude=["float"]),
gdf.select_dtypes(exclude=["float"]),
)
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
assert_exceptions_equal(
lfunc=pdf.select_dtypes, rfunc=gdf.select_dtypes,
)
gdf = cudf.DataFrame(
{"a": cudf.Series([], dtype="int"), "b": cudf.Series([], dtype="str")}
)
pdf = gdf.to_pandas()
assert_eq(
pdf.select_dtypes(exclude=["object"]),
gdf.select_dtypes(exclude=["object"]),
)
assert_eq(
pdf.select_dtypes(include=["int"], exclude=["object"]),
gdf.select_dtypes(include=["int"], exclude=["object"]),
)
def test_select_dtype_datetime():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_eq(pdf.select_dtypes("datetime64"), gdf.select_dtypes("datetime64"))
assert_eq(
pdf.select_dtypes(np.dtype("datetime64")),
gdf.select_dtypes(np.dtype("datetime64")),
)
assert_eq(
pdf.select_dtypes(include="datetime64"),
gdf.select_dtypes(include="datetime64"),
)
def test_select_dtype_datetime_with_frequency():
gdf = cudf.datasets.timeseries(
start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={"x": int}
)
gdf = gdf.reset_index()
pdf = gdf.to_pandas()
assert_exceptions_equal(
pdf.select_dtypes,
gdf.select_dtypes,
(["datetime64[ms]"],),
(["datetime64[ms]"],),
)
def test_array_ufunc():
gdf = cudf.DataFrame({"x": [2, 3, 4.0], "y": [9.0, 2.5, 1.1]})
pdf = gdf.to_pandas()
assert_eq(np.sqrt(gdf), np.sqrt(pdf))
assert_eq(np.sqrt(gdf.x), np.sqrt(pdf.x))
@pytest.mark.parametrize("nan_value", [-5, -5.0, 0, 5, 5.0, None, "pandas"])
def test_series_to_gpu_array(nan_value):
s = cudf.Series([0, 1, None, 3])
np.testing.assert_array_equal(
s.to_array(nan_value), s.to_gpu_array(nan_value).copy_to_host()
)
def test_dataframe_describe_exclude():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(exclude=["float"])
pdf_results = pdf.describe(exclude=["float"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_include():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include=["int"])
pdf_results = pdf.describe(include=["int"])
assert_eq(gdf_results, pdf_results)
def test_dataframe_describe_default():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe()
pdf_results = pdf.describe()
assert_eq(pdf_results, gdf_results)
def test_series_describe_include_all():
np.random.seed(12)
data_length = 10000
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["x"] = df.x.astype("int64")
df["y"] = np.random.normal(10, 1, data_length)
df["animal"] = np.random.choice(["dog", "cat", "bird"], data_length)
pdf = df.to_pandas()
gdf_results = df.describe(include="all")
pdf_results = pdf.describe(include="all")
assert_eq(gdf_results[["x", "y"]], pdf_results[["x", "y"]])
assert_eq(gdf_results.index, pdf_results.index)
assert_eq(gdf_results.columns, pdf_results.columns)
assert_eq(
gdf_results[["animal"]].fillna(-1).astype("str"),
pdf_results[["animal"]].fillna(-1).astype("str"),
)
def test_dataframe_describe_percentiles():
np.random.seed(12)
data_length = 10000
sample_percentiles = [0.0, 0.1, 0.33, 0.84, 0.4, 0.99]
df = cudf.DataFrame()
df["x"] = np.random.normal(10, 1, data_length)
df["y"] = np.random.normal(10, 1, data_length)
pdf = df.to_pandas()
gdf_results = df.describe(percentiles=sample_percentiles)
pdf_results = pdf.describe(percentiles=sample_percentiles)
assert_eq(pdf_results, gdf_results)
def test_get_numeric_data():
pdf = pd.DataFrame(
{"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]}
)
gdf = cudf.from_pandas(pdf)
assert_eq(pdf._get_numeric_data(), gdf._get_numeric_data())
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_shift(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
shifted_outcome = gdf.a.shift(period).fillna(0)
expected_outcome = pdf.a.shift(period).fillna(0).astype(dtype)
if data_empty:
assert_eq(shifted_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(shifted_outcome, expected_outcome)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20])
@pytest.mark.parametrize("data_empty", [False, True])
def test_diff(dtype, period, data_empty):
if data_empty:
data = None
else:
if dtype == np.int8:
# to keep data in range
data = gen_rand(dtype, 100000, low=-2, high=2)
else:
data = gen_rand(dtype, 100000)
gdf = cudf.DataFrame({"a": cudf.Series(data, dtype=dtype)})
pdf = pd.DataFrame({"a": pd.Series(data, dtype=dtype)})
expected_outcome = pdf.a.diff(period)
diffed_outcome = gdf.a.diff(period).astype(expected_outcome.dtype)
if data_empty:
assert_eq(diffed_outcome, expected_outcome, check_index_type=False)
else:
assert_eq(diffed_outcome, expected_outcome)
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_isnull_isna(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.isnull(), gdf.isnull())
assert_eq(df.isna(), gdf.isna())
# Test individual columns
for col in df:
assert_eq(df[col].isnull(), gdf[col].isnull())
assert_eq(df[col].isna(), gdf[col].isna())
@pytest.mark.parametrize("df", _dataframe_na_data())
@pytest.mark.parametrize("nan_as_null", [True, False, None])
def test_dataframe_notna_notnull(df, nan_as_null):
gdf = cudf.DataFrame.from_pandas(df, nan_as_null=nan_as_null)
assert_eq(df.notnull(), gdf.notnull())
assert_eq(df.notna(), gdf.notna())
# Test individual columns
for col in df:
assert_eq(df[col].notnull(), gdf[col].notnull())
assert_eq(df[col].notna(), gdf[col].notna())
def test_ndim():
pdf = pd.DataFrame({"x": range(5), "y": range(5, 10)})
gdf = cudf.DataFrame.from_pandas(pdf)
assert pdf.ndim == gdf.ndim
assert pdf.x.ndim == gdf.x.ndim
s = pd.Series(dtype="float64")
gs = cudf.Series()
assert s.ndim == gs.ndim
@pytest.mark.parametrize(
"decimals",
[
-3,
0,
5,
pd.Series([1, 4, 3, -6], index=["w", "x", "y", "z"]),
cudf.Series([-4, -2, 12], index=["x", "y", "z"]),
{"w": -1, "x": 15, "y": 2},
],
)
def test_dataframe_round(decimals):
pdf = pd.DataFrame(
{
"w": np.arange(0.5, 10.5, 1),
"x": np.random.normal(-100, 100, 10),
"y": np.array(
[
14.123,
2.343,
np.nan,
0.0,
-8.302,
np.nan,
94.313,
-112.236,
-8.029,
np.nan,
]
),
"z": np.repeat([-0.6459412758761901], 10),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
if isinstance(decimals, cudf.Series):
pdecimals = decimals.to_pandas()
else:
pdecimals = decimals
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
# with nulls, maintaining existing null mask
for c in pdf.columns:
arr = pdf[c].to_numpy().astype("float64") # for pandas nulls
arr.ravel()[np.random.choice(10, 5, replace=False)] = np.nan
pdf[c] = gdf[c] = arr
result = gdf.round(decimals)
expected = pdf.round(pdecimals)
assert_eq(result, expected)
for c in gdf.columns:
np.array_equal(gdf[c].nullmask.to_array(), result[c].to_array())
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: all does not "
"support columns of object dtype."
)
],
),
],
)
def test_all(data):
# Pandas treats `None` in object type columns as True for some reason, so
# replacing with `False`
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data).replace(
[None], False
)
gdata = cudf.Series.from_pandas(pdata)
else:
pdata = pd.DataFrame(data, columns=["a", "b"]).replace([None], False)
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.all(bool_only=True)
expected = pdata.all(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.all(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.all(level="a")
got = gdata.all()
expected = pdata.all()
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3],
[-2, -1, 2, 3, 5],
[-2, -1, 0, 3, 5],
[0, 0, 0, 0, 0],
[0, 0, None, 0],
[True, False, False],
[True],
[False],
[],
[True, None, False],
[True, True, None],
[None, None],
[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]],
[[1, True], [2, False], [3, False]],
pytest.param(
[["a", True], ["b", False], ["c", False]],
marks=[
pytest.mark.xfail(
reason="NotImplementedError: any does not "
"support columns of object dtype."
)
],
),
],
)
@pytest.mark.parametrize("axis", [0, 1])
def test_any(data, axis):
if np.array(data).ndim <= 1:
pdata = cudf.utils.utils._create_pandas_series(data=data)
gdata = cudf.Series.from_pandas(pdata)
if axis == 1:
with pytest.raises(NotImplementedError):
gdata.any(axis=axis)
else:
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
else:
pdata = pd.DataFrame(data, columns=["a", "b"])
gdata = cudf.DataFrame.from_pandas(pdata)
# test bool_only
if pdata["b"].dtype == "bool":
got = gdata.any(bool_only=True)
expected = pdata.any(bool_only=True)
assert_eq(got, expected)
else:
with pytest.raises(NotImplementedError):
gdata.any(bool_only=False)
with pytest.raises(NotImplementedError):
gdata.any(level="a")
got = gdata.any(axis=axis)
expected = pdata.any(axis=axis)
assert_eq(got, expected)
@pytest.mark.parametrize("axis", [0, 1])
def test_empty_dataframe_any(axis):
pdf = pd.DataFrame({}, columns=["a", "b"])
gdf = cudf.DataFrame.from_pandas(pdf)
got = gdf.any(axis=axis)
expected = pdf.any(axis=axis)
assert_eq(got, expected, check_index_type=False)
@pytest.mark.parametrize("indexed", [False, True])
def test_dataframe_sizeof(indexed):
rows = int(1e6)
index = list(i for i in range(rows)) if indexed else None
gdf = cudf.DataFrame({"A": [8] * rows, "B": [32] * rows}, index=index)
for c in gdf._data.columns:
assert gdf._index.__sizeof__() == gdf._index.__sizeof__()
cols_sizeof = sum(c.__sizeof__() for c in gdf._data.columns)
assert gdf.__sizeof__() == (gdf._index.__sizeof__() + cols_sizeof)
@pytest.mark.parametrize("a", [[], ["123"]])
@pytest.mark.parametrize("b", ["123", ["123"]])
@pytest.mark.parametrize(
"misc_data",
["123", ["123"] * 20, 123, [1, 2, 0.8, 0.9] * 50, 0.9, 0.00001],
)
@pytest.mark.parametrize("non_list_data", [123, "abc", "zyx", "rapids", 0.8])
def test_create_dataframe_cols_empty_data(a, b, misc_data, non_list_data):
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = b
actual["b"] = b
assert_eq(actual, expected)
expected = pd.DataFrame({"a": []})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = misc_data
actual["b"] = misc_data
assert_eq(actual, expected)
expected = pd.DataFrame({"a": a})
actual = cudf.DataFrame.from_pandas(expected)
expected["b"] = non_list_data
actual["b"] = non_list_data
assert_eq(actual, expected)
def test_empty_dataframe_describe():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
expected = pdf.describe()
actual = gdf.describe()
assert_eq(expected, actual)
def test_as_column_types():
col = column.as_column(cudf.Series([]))
assert_eq(col.dtype, np.dtype("float64"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float64"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="float32")
assert_eq(col.dtype, np.dtype("float32"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="float32"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="str")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="str"))
assert_eq(pds, gds)
col = column.as_column(cudf.Series([]), dtype="object")
assert_eq(col.dtype, np.dtype("object"))
gds = cudf.Series(col)
pds = pd.Series(pd.Series([], dtype="object"))
assert_eq(pds, gds)
pds = pd.Series(np.array([1, 2, 3]), dtype="float32")
gds = cudf.Series(column.as_column(np.array([1, 2, 3]), dtype="float32"))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 3], dtype="float32")
gds = cudf.Series([1, 2, 3], dtype="float32")
assert_eq(pds, gds)
pds = pd.Series([], dtype="float64")
gds = cudf.Series(column.as_column(pds))
assert_eq(pds, gds)
pds = pd.Series([1, 2, 4], dtype="int64")
gds = cudf.Series(column.as_column(cudf.Series([1, 2, 4]), dtype="int64"))
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="float32")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="float32")
)
assert_eq(pds, gds)
pds = pd.Series([1.2, 18.0, 9.0], dtype="str")
gds = cudf.Series(
column.as_column(cudf.Series([1.2, 18.0, 9.0]), dtype="str")
)
assert_eq(pds, gds)
pds = pd.Series(pd.Index(["1", "18", "9"]), dtype="int")
gds = cudf.Series(
cudf.core.index.StringIndex(["1", "18", "9"]), dtype="int"
)
assert_eq(pds, gds)
def test_one_row_head():
gdf = cudf.DataFrame({"name": ["carl"], "score": [100]}, index=[123])
pdf = gdf.to_pandas()
head_gdf = gdf.head()
head_pdf = pdf.head()
assert_eq(head_pdf, head_gdf)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric(dtype, as_dtype):
psr = pd.Series([1, 2, 4, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", NUMERIC_TYPES)
def test_series_astype_numeric_to_numeric_nulls(dtype, as_dtype):
data = [1, 2, None, 3]
sr = cudf.Series(data, dtype=dtype)
got = sr.astype(as_dtype)
expect = cudf.Series([1, 2, None, 3], dtype=as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_numeric_to_other(dtype, as_dtype):
psr = pd.Series([1, 2, 3], dtype=dtype)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"str",
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05"]
else:
data = ["1", "2", "3"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"as_dtype",
[
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_series_astype_datetime_to_other(as_dtype):
data = ["2001-01-01", "2002-02-02", "2001-01-05"]
psr = pd.Series(data)
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize(
"inp",
[
("datetime64[ns]", "2011-01-01 00:00:00.000000000"),
("datetime64[us]", "2011-01-01 00:00:00.000000"),
("datetime64[ms]", "2011-01-01 00:00:00.000"),
("datetime64[s]", "2011-01-01 00:00:00"),
],
)
def test_series_astype_datetime_to_string(inp):
dtype, expect = inp
base_date = "2011-01-01"
sr = cudf.Series([base_date], dtype=dtype)
got = sr.astype(str)[0]
assert expect == got
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"uint32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_series_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
gsr = cudf.from_pandas(psr)
assert_eq(psr.astype(as_dtype), gsr.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
gsr = cudf.from_pandas(psr)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
psr.astype("int32").astype(ordered_dtype_pd).astype("int32"),
gsr.astype("int32").astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize("ordered", [True, False])
def test_series_astype_cat_ordered_to_unordered(ordered):
pd_dtype = pd.CategoricalDtype(categories=[1, 2, 3], ordered=ordered)
pd_to_dtype = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=not ordered
)
gd_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
gd_to_dtype = cudf.CategoricalDtype.from_pandas(pd_to_dtype)
psr = pd.Series([1, 2, 3], dtype=pd_dtype)
gsr = cudf.Series([1, 2, 3], dtype=gd_dtype)
expect = psr.astype(pd_to_dtype)
got = gsr.astype(gd_to_dtype)
assert_eq(expect, got)
def test_series_astype_null_cases():
data = [1, 2, None, 3]
# numerical to other
assert_eq(cudf.Series(data, dtype="str"), cudf.Series(data).astype("str"))
assert_eq(
cudf.Series(data, dtype="category"),
cudf.Series(data).astype("category"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="int32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="uint32").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data).astype("datetime64[ms]"),
)
# categorical to other
assert_eq(
cudf.Series(data, dtype="str"),
cudf.Series(data, dtype="category").astype("str"),
)
assert_eq(
cudf.Series(data, dtype="float32"),
cudf.Series(data, dtype="category").astype("float32"),
)
assert_eq(
cudf.Series(data, dtype="datetime64[ms]"),
cudf.Series(data, dtype="category").astype("datetime64[ms]"),
)
# string to other
assert_eq(
cudf.Series([1, 2, None, 3], dtype="int32"),
cudf.Series(["1", "2", None, "3"]).astype("int32"),
)
assert_eq(
cudf.Series(
["2001-01-01", "2001-02-01", None, "2001-03-01"],
dtype="datetime64[ms]",
),
cudf.Series(["2001-01-01", "2001-02-01", None, "2001-03-01"]).astype(
"datetime64[ms]"
),
)
assert_eq(
cudf.Series(["a", "b", "c", None], dtype="category").to_pandas(),
cudf.Series(["a", "b", "c", None]).astype("category").to_pandas(),
)
# datetime to other
data = [
"2001-01-01 00:00:00.000000",
"2001-02-01 00:00:00.000000",
None,
"2001-03-01 00:00:00.000000",
]
assert_eq(
cudf.Series(data),
cudf.Series(data, dtype="datetime64[us]").astype("str"),
)
assert_eq(
pd.Series(data, dtype="datetime64[ns]").astype("category"),
cudf.from_pandas(pd.Series(data, dtype="datetime64[ns]")).astype(
"category"
),
)
def test_series_astype_null_categorical():
sr = cudf.Series([None, None, None], dtype="category")
expect = cudf.Series([None, None, None], dtype="int32")
got = sr.astype("int32")
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
(
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
),
[
pd.Series([3, 3.0]),
pd.Series([2.3, 3.9]),
pd.Series([1.5, 3.9]),
pd.Series([1.0, 2]),
],
],
)
def test_create_dataframe_from_list_like(data):
pdf = pd.DataFrame(data, index=["count", "mean", "std", "min"])
gdf = cudf.DataFrame(data, index=["count", "mean", "std", "min"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(data)
gdf = cudf.DataFrame(data)
assert_eq(pdf, gdf)
def test_create_dataframe_column():
pdf = pd.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
gdf = cudf.DataFrame(columns=["a", "b", "c"], index=["A", "Z", "X"])
assert_eq(pdf, gdf)
pdf = pd.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
gdf = cudf.DataFrame(
{"a": [1, 2, 3], "b": [2, 3, 5]},
columns=["a", "b", "c"],
index=["A", "Z", "X"],
)
assert_eq(pdf, gdf)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pd.Categorical(["a", "b", "c"]),
["m", "a", "d", "v"],
],
)
def test_series_values_host_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
np.testing.assert_array_equal(pds.values, gds.values_host)
@pytest.mark.parametrize(
"data",
[
[1, 2, 4],
[],
[5.0, 7.0, 8.0],
pytest.param(
pd.Categorical(["a", "b", "c"]),
marks=pytest.mark.xfail(raises=NotImplementedError),
),
pytest.param(
["m", "a", "d", "v"],
marks=pytest.mark.xfail(raises=NotImplementedError),
),
],
)
def test_series_values_property(data):
pds = cudf.utils.utils._create_pandas_series(data=data)
gds = cudf.Series(data)
gds_vals = gds.values
assert isinstance(gds_vals, cupy.ndarray)
np.testing.assert_array_equal(gds_vals.get(), pds.values)
@pytest.mark.parametrize(
"data",
[
{"A": [1, 2, 3], "B": [4, 5, 6]},
{"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]},
{"A": [1, 2, 3], "B": [1.0, 2.0, 3.0]},
{"A": np.float32(np.arange(3)), "B": np.float64(np.arange(3))},
pytest.param(
{"A": [1, None, 3], "B": [1, 2, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [None, None, None], "B": [None, None, None]},
marks=pytest.mark.xfail(
reason="Nulls not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": [], "B": []},
marks=pytest.mark.xfail(reason="Requires at least 1 row"),
),
pytest.param(
{"A": [1, 2, 3], "B": ["a", "b", "c"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
pytest.param(
{"A": pd.Categorical(["a", "b", "c"]), "B": ["d", "e", "f"]},
marks=pytest.mark.xfail(
reason="str or categorical not supported by as_gpu_matrix"
),
),
],
)
def test_df_values_property(data):
pdf = pd.DataFrame.from_dict(data)
gdf = cudf.DataFrame.from_pandas(pdf)
pmtr = pdf.values
gmtr = gdf.values.get()
np.testing.assert_array_equal(pmtr, gmtr)
def test_value_counts():
pdf = pd.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
gdf = cudf.DataFrame(
{
"numeric": [1, 2, 3, 4, 5, 6, 1, 2, 4] * 10,
"alpha": ["u", "h", "d", "a", "m", "u", "h", "d", "a"] * 10,
}
)
assert_eq(
pdf.numeric.value_counts().sort_index(),
gdf.numeric.value_counts().sort_index(),
check_dtype=False,
)
assert_eq(
pdf.alpha.value_counts().sort_index(),
gdf.alpha.value_counts().sort_index(),
check_dtype=False,
)
@pytest.mark.parametrize(
"data",
[
[],
[0, 12, 14],
[0, 14, 12, 12, 3, 10, 12, 14],
np.random.randint(-100, 100, 200),
pd.Series([0.0, 1.0, None, 10.0]),
[None, None, None, None],
[np.nan, None, -1, 2, 3],
],
)
@pytest.mark.parametrize(
"values",
[
np.random.randint(-100, 100, 10),
[],
[np.nan, None, -1, 2, 3],
[1.0, 12.0, None, None, 120],
[0, 14, 12, 12, 3, 10, 12, 14, None],
[None, None, None],
["0", "12", "14"],
["0", "12", "14", "a"],
],
)
def test_isin_numeric(data, values):
index = np.random.randint(0, 100, len(data))
psr = cudf.utils.utils._create_pandas_series(data=data, index=index)
gsr = cudf.Series.from_pandas(psr, nan_as_null=False)
expected = psr.isin(values)
got = gsr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["2018-01-01", "2019-04-03", None, "2019-12-30"],
dtype="datetime64[ns]",
),
pd.Series(
[
"2018-01-01",
"2019-04-03",
None,
"2019-12-30",
"2018-01-01",
"2018-01-01",
],
dtype="datetime64[ns]",
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
[1514764800000000000, 1577664000000000000],
[
1514764800000000000,
1577664000000000000,
1577664000000000000,
1577664000000000000,
1514764800000000000,
],
["2019-04-03", "2019-12-30", "2012-01-01"],
[
"2012-01-01",
"2012-01-01",
"2012-01-01",
"2019-04-03",
"2019-12-30",
"2012-01-01",
],
],
)
def test_isin_datetime(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["this", "is", None, "a", "test"]),
pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]),
pd.Series(["0", "12", "14"]),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[None, None, None],
["12", "14", "19"],
pytest.param(
[12, 14, 19],
marks=pytest.mark.xfail(
not PANDAS_GE_120,
reason="pandas's failure here seems like a bug(in < 1.2) "
"given the reverse succeeds",
),
),
["is", "this", "is", "this", "is"],
],
)
def test_isin_string(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"),
pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"),
pd.Series([0, 3, 10, 12], dtype="category"),
pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["a", "b", None, "f", "words"],
["0", "12", None, "14"],
[0, 10, 12, None, 39, 40, 1000],
[0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3],
],
)
def test_isin_categorical(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.isin(values)
expected = psr.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[],
pd.Series(
["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"]
),
pd.Series([0, 15, 10], index=[0, None, 9]),
pd.Series(
range(25),
index=pd.date_range(
start="2019-01-01", end="2019-01-02", freq="H"
),
),
],
)
@pytest.mark.parametrize(
"values",
[
[],
["this", "is"],
[0, 19, 13],
["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02"],
],
)
def test_isin_index(data, values):
psr = cudf.utils.utils._create_pandas_series(data=data)
gsr = cudf.Series.from_pandas(psr)
got = gsr.index.isin(values)
expected = psr.index.isin(values)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]], names=("number", "color")
),
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
pd.MultiIndex.from_arrays(
[[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]],
names=("number", "color"),
),
],
)
@pytest.mark.parametrize(
"values,level,err",
[
(["red", "orange", "yellow"], "color", None),
(["red", "white", "yellow"], "color", None),
([0, 1, 2, 10, 11, 15], "number", None),
([0, 1, 2, 10, 11, 15], None, TypeError),
(pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError),
(pd.Index([0, 1, 2, 8, 11, 15]), "number", None),
(pd.Index(["red", "white", "yellow"]), "color", None),
([(1, "red"), (3, "red")], None, None),
(((1, "red"), (3, "red")), None, None),
(
pd.MultiIndex.from_arrays(
[[1, 2, 3], ["red", "blue", "green"]],
names=("number", "color"),
),
None,
None,
),
(
pd.MultiIndex.from_arrays([[], []], names=("number", "color")),
None,
None,
),
(
pd.MultiIndex.from_arrays(
[
[1, 2, 3, 10, 100],
["red", "blue", "green", "pink", "white"],
],
names=("number", "color"),
),
None,
None,
),
],
)
def test_isin_multiindex(data, values, level, err):
pmdx = data
gmdx = cudf.from_pandas(data)
if err is None:
expected = pmdx.isin(values, level=level)
if isinstance(values, pd.MultiIndex):
values = cudf.from_pandas(values)
got = gmdx.isin(values, level=level)
assert_eq(got, expected)
else:
assert_exceptions_equal(
lfunc=pmdx.isin,
rfunc=gmdx.isin,
lfunc_args_and_kwargs=([values], {"level": level}),
rfunc_args_and_kwargs=([values], {"level": level}),
check_exception_type=False,
expected_error_message=re.escape(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
),
)
@pytest.mark.parametrize(
"data",
[
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [8, 2, 1, 0, 2, 4, 5],
"num_wings": [2, 0, 2, 1, 2, 4, -1],
}
),
],
)
@pytest.mark.parametrize(
"values",
[
[0, 2],
{"num_wings": [0, 3]},
pd.DataFrame(
{"num_legs": [8, 2], "num_wings": [0, 2]},
index=["spider", "falcon"],
),
pd.DataFrame(
{
"num_legs": [2, 4],
"num_wings": [2, 0],
"bird_cats": pd.Series(
["sparrow", "pigeon"],
dtype="category",
index=["falcon", "dog"],
),
},
index=["falcon", "dog"],
),
["sparrow", "pigeon"],
pd.Series(["sparrow", "pigeon"], dtype="category"),
pd.Series([1, 2, 3, 4, 5]),
"abc",
123,
],
)
def test_isin_dataframe(data, values):
pdf = data
gdf = cudf.from_pandas(pdf)
if cudf.utils.dtypes.is_scalar(values):
assert_exceptions_equal(
lfunc=pdf.isin,
rfunc=gdf.isin,
lfunc_args_and_kwargs=([values],),
rfunc_args_and_kwargs=([values],),
)
else:
try:
expected = pdf.isin(values)
except ValueError as e:
if str(e) == "Lengths must match.":
pytest.xfail(
not PANDAS_GE_110,
"https://github.com/pandas-dev/pandas/issues/34256",
)
if isinstance(values, (pd.DataFrame, pd.Series)):
values = cudf.from_pandas(values)
got = gdf.isin(values)
assert_eq(got, expected)
def test_constructor_properties():
df = cudf.DataFrame()
key1 = "a"
key2 = "b"
val1 = np.array([123], dtype=np.float64)
val2 = np.array([321], dtype=np.float64)
df[key1] = val1
df[key2] = val2
# Correct use of _constructor (for DataFrame)
assert_eq(df, df._constructor({key1: val1, key2: val2}))
# Correct use of _constructor (for cudf.Series)
assert_eq(df[key1], df[key2]._constructor(val1, name=key1))
# Correct use of _constructor_sliced (for DataFrame)
assert_eq(df[key1], df._constructor_sliced(val1, name=key1))
# Correct use of _constructor_expanddim (for cudf.Series)
assert_eq(df, df[key2]._constructor_expanddim({key1: val1, key2: val2}))
# Incorrect use of _constructor_sliced (Raises for cudf.Series)
with pytest.raises(NotImplementedError):
df[key1]._constructor_sliced
# Incorrect use of _constructor_expanddim (Raises for DataFrame)
with pytest.raises(NotImplementedError):
df._constructor_expanddim
@pytest.mark.parametrize("dtype", NUMERIC_TYPES)
@pytest.mark.parametrize("as_dtype", ALL_TYPES)
def test_df_astype_numeric_to_all(dtype, as_dtype):
if "uint" in dtype:
data = [1, 2, None, 4, 7]
elif "int" in dtype or "longlong" in dtype:
data = [1, 2, None, 4, -7]
elif "float" in dtype:
data = [1.0, 2.0, None, 4.0, np.nan, -7.0]
gdf = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype=dtype)
gdf["bar"] = cudf.Series(data, dtype=dtype)
insert_data = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = insert_data.astype(as_dtype)
expect["bar"] = insert_data.astype(as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
],
)
def test_df_astype_string_to_other(as_dtype):
if "datetime64" in as_dtype:
# change None to "NaT" after this issue is fixed:
# https://github.com/rapidsai/cudf/issues/5117
data = ["2001-01-01", "2002-02-02", "2000-01-05", None]
elif as_dtype == "int32":
data = [1, 2, 3]
elif as_dtype == "category":
data = ["1", "2", "3", None]
elif "float" in as_dtype:
data = [1.0, 2.0, 3.0, np.nan]
insert_data = cudf.Series.from_pandas(pd.Series(data, dtype="str"))
expect_data = cudf.Series(data, dtype=as_dtype)
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = insert_data
gdf["bar"] = insert_data
expect["foo"] = expect_data
expect["bar"] = expect_data
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int64",
"datetime64[s]",
"datetime64[us]",
"datetime64[ns]",
"str",
"category",
],
)
def test_df_astype_datetime_to_other(as_dtype):
data = [
"1991-11-20 00:00:00.000",
"2004-12-04 00:00:00.000",
"2016-09-13 00:00:00.000",
None,
]
gdf = cudf.DataFrame()
expect = cudf.DataFrame()
gdf["foo"] = cudf.Series(data, dtype="datetime64[ms]")
gdf["bar"] = cudf.Series(data, dtype="datetime64[ms]")
if as_dtype == "int64":
expect["foo"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
expect["bar"] = cudf.Series(
[690595200000, 1102118400000, 1473724800000, None], dtype="int64"
)
elif as_dtype == "str":
expect["foo"] = cudf.Series(data, dtype="str")
expect["bar"] = cudf.Series(data, dtype="str")
elif as_dtype == "category":
expect["foo"] = cudf.Series(gdf["foo"], dtype="category")
expect["bar"] = cudf.Series(gdf["bar"], dtype="category")
else:
expect["foo"] = cudf.Series(data, dtype=as_dtype)
expect["bar"] = cudf.Series(data, dtype=as_dtype)
got = gdf.astype(as_dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"as_dtype",
[
"int32",
"float32",
"category",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
"str",
],
)
def test_df_astype_categorical_to_other(as_dtype):
if "datetime64" in as_dtype:
data = ["2001-01-01", "2002-02-02", "2000-01-05", "2001-01-01"]
else:
data = [1, 2, 3, 1]
psr = pd.Series(data, dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
assert_eq(pdf.astype(as_dtype), gdf.astype(as_dtype))
@pytest.mark.parametrize("ordered", [True, False])
def test_df_astype_to_categorical_ordered(ordered):
psr = pd.Series([1, 2, 3, 1], dtype="category")
pdf = pd.DataFrame()
pdf["foo"] = psr
pdf["bar"] = psr
gdf = cudf.DataFrame.from_pandas(pdf)
ordered_dtype_pd = pd.CategoricalDtype(
categories=[1, 2, 3], ordered=ordered
)
ordered_dtype_gd = cudf.CategoricalDtype.from_pandas(ordered_dtype_pd)
assert_eq(
pdf.astype(ordered_dtype_pd).astype("int32"),
gdf.astype(ordered_dtype_gd).astype("int32"),
)
@pytest.mark.parametrize(
"dtype,args",
[(dtype, {}) for dtype in ALL_TYPES]
+ [("category", {"ordered": True}), ("category", {"ordered": False})],
)
def test_empty_df_astype(dtype, args):
df = cudf.DataFrame()
kwargs = {}
kwargs.update(args)
assert_eq(df, df.astype(dtype=dtype, **kwargs))
@pytest.mark.parametrize(
"errors",
[
pytest.param(
"raise", marks=pytest.mark.xfail(reason="should raise error here")
),
pytest.param("other", marks=pytest.mark.xfail(raises=ValueError)),
"ignore",
pytest.param(
"warn", marks=pytest.mark.filterwarnings("ignore:Traceback")
),
],
)
def test_series_astype_error_handling(errors):
sr = cudf.Series(["random", "words"])
got = sr.astype("datetime64", errors=errors)
assert_eq(sr, got)
@pytest.mark.parametrize("dtype", ALL_TYPES)
def test_df_constructor_dtype(dtype):
if "datetime" in dtype:
data = ["1991-11-20", "2004-12-04", "2016-09-13", None]
elif dtype == "str":
data = ["a", "b", "c", None]
elif "float" in dtype:
data = [1.0, 0.5, -1.1, np.nan, None]
elif "bool" in dtype:
data = [True, False, None]
else:
data = [1, 2, 3, None]
sr = cudf.Series(data, dtype=dtype)
expect = cudf.DataFrame()
expect["foo"] = sr
expect["bar"] = sr
got = cudf.DataFrame({"foo": data, "bar": data}, dtype=dtype)
assert_eq(expect, got)
@pytest.mark.parametrize(
"data",
[
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": int}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": "category", "b": int, "c": float, "d": str}
),
cudf.datasets.randomdata(
nrows=10, dtypes={"a": bool, "b": int, "c": float, "d": str}
),
cudf.DataFrame(),
cudf.DataFrame({"a": [0, 1, 2], "b": [1, None, 3]}),
cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
),
cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False
),
}
),
],
)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops(data, op, skipna):
gdf = data
pdf = gdf.to_pandas()
if op in ("var", "std"):
expected = getattr(pdf, op)(axis=1, ddof=0, skipna=skipna)
got = getattr(gdf, op)(axis=1, ddof=0, skipna=skipna)
else:
expected = getattr(pdf, op)(axis=1, skipna=skipna)
got = getattr(gdf, op)(axis=1, skipna=skipna)
assert_eq(expected, got, check_exact=False)
@pytest.mark.parametrize(
"op", ["max", "min", "sum", "product", "mean", "var", "std"]
)
def test_rowwise_ops_nullable_dtypes_all_null(op):
gdf = cudf.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [7, np.NaN, 9, 10],
"c": [np.NaN, np.NaN, np.NaN, np.NaN],
"d": cudf.Series([None, None, None, None], dtype="int64"),
"e": [100, None, 200, None],
"f": cudf.Series([10, None, np.NaN, 11], nan_as_null=False),
}
)
expected = cudf.Series([None, None, None, None], dtype="float64")
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series(
[10.0, None, np.NaN, 2234.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"min",
cudf.Series(
[10.0, None, np.NaN, 13.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"sum",
cudf.Series(
[20.0, None, np.NaN, 2247.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"product",
cudf.Series(
[100.0, None, np.NaN, 29042.0, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"mean",
cudf.Series(
[10.0, None, np.NaN, 1123.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"var",
cudf.Series(
[0.0, None, np.NaN, 1233210.25, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
(
"std",
cudf.Series(
[0.0, None, np.NaN, 1110.5, None, np.NaN],
dtype="float64",
nan_as_null=False,
),
),
],
)
def test_rowwise_ops_nullable_dtypes_partial_null(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, 12, 13, 14, 15],
"b": cudf.Series(
[10, None, np.NaN, 2234, None, np.NaN], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"op,expected",
[
(
"max",
cudf.Series([10, None, None, 2234, None, 453], dtype="int64",),
),
("min", cudf.Series([10, None, None, 13, None, 15], dtype="int64",),),
(
"sum",
cudf.Series([20, None, None, 2247, None, 468], dtype="int64",),
),
(
"product",
cudf.Series([100, None, None, 29042, None, 6795], dtype="int64",),
),
(
"mean",
cudf.Series(
[10.0, None, None, 1123.5, None, 234.0], dtype="float32",
),
),
(
"var",
cudf.Series(
[0.0, None, None, 1233210.25, None, 47961.0], dtype="float32",
),
),
(
"std",
cudf.Series(
[0.0, None, None, 1110.5, None, 219.0], dtype="float32",
),
),
],
)
def test_rowwise_ops_nullable_int_dtypes(op, expected):
gdf = cudf.DataFrame(
{
"a": [10, 11, None, 13, None, 15],
"b": cudf.Series(
[10, None, 323, 2234, None, 453], nan_as_null=False,
),
}
)
if op in ("var", "std"):
got = getattr(gdf, op)(axis=1, ddof=0, skipna=False)
else:
got = getattr(gdf, op)(axis=1, skipna=False)
assert_eq(got.null_count, expected.null_count)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ns]"
),
"t3": cudf.Series(
["1960-08-31 06:00:00", "2030-08-02 10:00:00"], dtype="<M8[s]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[us]"
),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(
["1940-08-31 06:00:00", "2020-08-02 10:00:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"t2": cudf.Series(["1940-08-31 06:00:00", None], dtype="<M8[ms]"),
"i1": cudf.Series([1001, 2002], dtype="int64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
},
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"], dtype="<M8[ms]"
),
"i1": cudf.Series([1001, 2002], dtype="int64"),
"f1": cudf.Series([-100.001, 123.456], dtype="float64"),
"b1": cudf.Series([True, False], dtype="bool"),
},
],
)
@pytest.mark.parametrize("op", ["max", "min"])
@pytest.mark.parametrize("skipna", [True, False])
def test_rowwise_ops_datetime_dtypes(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data,op,skipna",
[
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"max",
True,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
False,
),
(
{
"t1": cudf.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ms]",
),
"t2": cudf.Series(
["1940-08-31 06:00:00", None], dtype="<M8[ms]"
),
},
"min",
True,
),
],
)
def test_rowwise_ops_datetime_dtypes_2(data, op, skipna):
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas()
got = getattr(gdf, op)(axis=1, skipna=skipna)
expected = getattr(pdf, op)(axis=1, skipna=skipna)
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
(
{
"t1": pd.Series(
["2020-08-01 09:00:00", "1920-05-01 10:30:00"],
dtype="<M8[ns]",
),
"t2": pd.Series(
["1940-08-31 06:00:00", pd.NaT], dtype="<M8[ns]"
),
}
)
],
)
def test_rowwise_ops_datetime_dtypes_pdbug(data):
pdf = pd.DataFrame(data)
gdf = cudf.from_pandas(pdf)
expected = pdf.max(axis=1, skipna=False)
got = gdf.max(axis=1, skipna=False)
if PANDAS_GE_120:
assert_eq(got, expected)
else:
# PANDAS BUG: https://github.com/pandas-dev/pandas/issues/36907
with pytest.raises(AssertionError, match="numpy array are different"):
assert_eq(got, expected)
@pytest.mark.parametrize(
"data",
[
[5.0, 6.0, 7.0],
"single value",
np.array(1, dtype="int64"),
np.array(0.6273643, dtype="float64"),
],
)
def test_insert(data):
pdf = pd.DataFrame.from_dict({"A": [1, 2, 3], "B": ["a", "b", "c"]})
gdf = cudf.DataFrame.from_pandas(pdf)
# insertion by index
pdf.insert(0, "foo", data)
gdf.insert(0, "foo", data)
assert_eq(pdf, gdf)
pdf.insert(3, "bar", data)
gdf.insert(3, "bar", data)
assert_eq(pdf, gdf)
pdf.insert(1, "baz", data)
gdf.insert(1, "baz", data)
assert_eq(pdf, gdf)
# pandas insert doesn't support negative indexing
pdf.insert(len(pdf.columns), "qux", data)
gdf.insert(-1, "qux", data)
assert_eq(pdf, gdf)
def test_cov():
gdf = cudf.datasets.randomdata(10)
pdf = gdf.to_pandas()
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.xfail(reason="cupy-based cov does not support nulls")
def test_cov_nans():
pdf = pd.DataFrame()
pdf["a"] = [None, None, None, 2.00758632, None]
pdf["b"] = [0.36403686, None, None, None, None]
pdf["c"] = [None, None, None, 0.64882227, None]
pdf["d"] = [None, -1.46863125, None, 1.22477948, -0.06031689]
gdf = cudf.from_pandas(pdf)
assert_eq(pdf.cov(), gdf.cov())
@pytest.mark.parametrize(
"gsr",
[
cudf.Series([4, 2, 3]),
cudf.Series([4, 2, 3], index=["a", "b", "c"]),
cudf.Series([4, 2, 3], index=["a", "b", "d"]),
cudf.Series([4, 2], index=["a", "b"]),
cudf.Series([4, 2, 3], index=cudf.core.index.RangeIndex(0, 3)),
pytest.param(
cudf.Series([4, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"]),
marks=pytest.mark.xfail,
),
],
)
@pytest.mark.parametrize("colnames", [["a", "b", "c"], [0, 1, 2]])
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
operator.eq,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.ne,
],
)
def test_df_sr_binop(gsr, colnames, op):
data = [[3.0, 2.0, 5.0], [3.0, None, 5.0], [6.0, 7.0, np.nan]]
data = dict(zip(colnames, data))
gsr = gsr.astype("float64")
gdf = cudf.DataFrame(data)
pdf = gdf.to_pandas(nullable=True)
psr = gsr.to_pandas(nullable=True)
expect = op(pdf, psr)
got = op(gdf, gsr).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
expect = op(psr, pdf)
got = op(gsr, gdf).to_pandas(nullable=True)
assert_eq(expect, got, check_dtype=False)
@pytest.mark.parametrize(
"op",
[
operator.add,
operator.mul,
operator.floordiv,
operator.truediv,
operator.mod,
operator.pow,
# comparison ops will temporarily XFAIL
# see PR https://github.com/rapidsai/cudf/pull/7491
pytest.param(operator.eq, marks=pytest.mark.xfail()),
pytest.param(operator.lt, marks=pytest.mark.xfail()),
pytest.param(operator.le, marks=pytest.mark.xfail()),
pytest.param(operator.gt, marks=pytest.mark.xfail()),
pytest.param(operator.ge, marks=pytest.mark.xfail()),
pytest.param(operator.ne, marks=pytest.mark.xfail()),
],
)
@pytest.mark.parametrize(
"gsr", [cudf.Series([1, 2, 3, 4, 5], index=["a", "b", "d", "0", "12"])]
)
def test_df_sr_binop_col_order(gsr, op):
colnames = [0, 1, 2]
data = [[0, 2, 5], [3, None, 5], [6, 7, np.nan]]
data = dict(zip(colnames, data))
gdf = cudf.DataFrame(data)
pdf = pd.DataFrame.from_dict(data)
psr = gsr.to_pandas()
expect = op(pdf, psr).astype("float")
out = op(gdf, gsr).astype("float")
got = out[expect.columns]
assert_eq(expect, got)
@pytest.mark.parametrize("set_index", [None, "A", "C", "D"])
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(deep, index, set_index):
# Testing numerical/datetime by comparing with pandas
# (string and categorical columns will be different)
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int64"),
"B": np.arange(rows, dtype="int32"),
"C": np.arange(rows, dtype="float64"),
}
)
df["D"] = pd.to_datetime(df.A)
if set_index:
df = df.set_index(set_index)
gdf = cudf.from_pandas(df)
if index and set_index is None:
# Special Case: Assume RangeIndex size == 0
assert gdf.index.memory_usage(deep=deep) == 0
else:
# Check for Series only
assert df["B"].memory_usage(index=index, deep=deep) == gdf[
"B"
].memory_usage(index=index, deep=deep)
# Check for entire DataFrame
assert_eq(
df.memory_usage(index=index, deep=deep).sort_index(),
gdf.memory_usage(index=index, deep=deep).sort_index(),
)
@pytest.mark.xfail
def test_memory_usage_string():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
gdf = cudf.from_pandas(df)
# Check deep=False (should match pandas)
assert gdf.B.memory_usage(deep=False, index=False) == df.B.memory_usage(
deep=False, index=False
)
# Check string column
assert gdf.B.memory_usage(deep=True, index=False) == df.B.memory_usage(
deep=True, index=False
)
# Check string index
assert gdf.set_index("B").index.memory_usage(
deep=True
) == df.B.memory_usage(deep=True, index=False)
def test_memory_usage_cat():
rows = int(100)
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(["apple", "banana", "orange"], rows),
}
)
df["B"] = df.B.astype("category")
gdf = cudf.from_pandas(df)
expected = (
gdf.B._column.cat().categories.__sizeof__()
+ gdf.B._column.cat().codes.__sizeof__()
)
# Check cat column
assert gdf.B.memory_usage(deep=True, index=False) == expected
# Check cat index
assert gdf.set_index("B").index.memory_usage(deep=True) == expected
def test_memory_usage_list():
df = cudf.DataFrame({"A": [[0, 1, 2, 3], [4, 5, 6], [7, 8], [9]]})
expected = (
df.A._column.offsets._memory_usage()
+ df.A._column.elements._memory_usage()
)
assert expected == df.A.memory_usage()
@pytest.mark.xfail
def test_memory_usage_multi():
rows = int(100)
deep = True
df = pd.DataFrame(
{
"A": np.arange(rows, dtype="int32"),
"B": np.random.choice(np.arange(3, dtype="int64"), rows),
"C": np.random.choice(np.arange(3, dtype="float64"), rows),
}
).set_index(["B", "C"])
gdf = cudf.from_pandas(df)
# Assume MultiIndex memory footprint is just that
# of the underlying columns, levels, and codes
expect = rows * 16 # Source Columns
expect += rows * 16 # Codes
expect += 3 * 8 # Level 0
expect += 3 * 8 # Level 1
assert expect == gdf.index.memory_usage(deep=deep)
@pytest.mark.parametrize(
"list_input",
[
pytest.param([1, 2, 3, 4], id="smaller"),
pytest.param([1, 2, 3, 4, 5, 6], id="larger"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_list(list_input, key):
gdf = cudf.datasets.randomdata(5)
with pytest.raises(
ValueError, match=("All columns must be of equal length")
):
gdf[key] = list_input
@pytest.mark.parametrize(
"series_input",
[
pytest.param(cudf.Series([1, 2, 3, 4]), id="smaller_cudf"),
pytest.param(cudf.Series([1, 2, 3, 4, 5, 6]), id="larger_cudf"),
pytest.param(cudf.Series([1, 2, 3], index=[4, 5, 6]), id="index_cudf"),
pytest.param(pd.Series([1, 2, 3, 4]), id="smaller_pandas"),
pytest.param(pd.Series([1, 2, 3, 4, 5, 6]), id="larger_pandas"),
pytest.param(pd.Series([1, 2, 3], index=[4, 5, 6]), id="index_pandas"),
],
)
@pytest.mark.parametrize(
"key",
[
pytest.param("list_test", id="new_column"),
pytest.param("id", id="existing_column"),
],
)
def test_setitem_diff_size_series(series_input, key):
gdf = cudf.datasets.randomdata(5)
pdf = gdf.to_pandas()
pandas_input = series_input
if isinstance(pandas_input, cudf.Series):
pandas_input = pandas_input.to_pandas()
expect = pdf
expect[key] = pandas_input
got = gdf
got[key] = series_input
# Pandas uses NaN and typecasts to float64 if there's missing values on
# alignment, so need to typecast to float64 for equality comparison
expect = expect.astype("float64")
got = got.astype("float64")
assert_eq(expect, got)
def test_tupleize_cols_False_set():
pdf = pd.DataFrame()
gdf = cudf.DataFrame()
pdf[("a", "b")] = [1]
gdf[("a", "b")] = [1]
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_init_multiindex_from_dict():
pdf = pd.DataFrame({("a", "b"): [1]})
gdf = cudf.DataFrame({("a", "b"): [1]})
assert_eq(pdf, gdf)
assert_eq(pdf.columns, gdf.columns)
def test_change_column_dtype_in_empty():
pdf = pd.DataFrame({"a": [], "b": []})
gdf = cudf.from_pandas(pdf)
assert_eq(pdf, gdf)
pdf["b"] = pdf["b"].astype("int64")
gdf["b"] = gdf["b"].astype("int64")
assert_eq(pdf, gdf)
def test_dataframe_from_table_empty_index():
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
odict = df._data
tbl = cudf._lib.table.Table(odict)
result = cudf.DataFrame._from_table(tbl) # noqa: F841
@pytest.mark.parametrize("dtype", ["int64", "str"])
def test_dataframe_from_dictionary_series_same_name_index(dtype):
pd_idx1 = pd.Index([1, 2, 0], name="test_index").astype(dtype)
pd_idx2 = pd.Index([2, 0, 1], name="test_index").astype(dtype)
pd_series1 = pd.Series([1, 2, 3], index=pd_idx1)
pd_series2 = pd.Series([1, 2, 3], index=pd_idx2)
gd_idx1 = cudf.from_pandas(pd_idx1)
gd_idx2 = cudf.from_pandas(pd_idx2)
gd_series1 = cudf.Series([1, 2, 3], index=gd_idx1)
gd_series2 = cudf.Series([1, 2, 3], index=gd_idx2)
expect = pd.DataFrame({"a": pd_series1, "b": pd_series2})
got = cudf.DataFrame({"a": gd_series1, "b": gd_series2})
if dtype == "str":
# Pandas actually loses its index name erroneously here...
expect.index.name = "test_index"
assert_eq(expect, got)
assert expect.index.names == got.index.names
@pytest.mark.parametrize(
"arg", [slice(2, 8, 3), slice(1, 20, 4), slice(-2, -6, -2)]
)
def test_dataframe_strided_slice(arg):
mul = pd.DataFrame(
{
"Index": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"AlphaIndex": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
)
pdf = pd.DataFrame(
{"Val": [10, 9, 8, 7, 6, 5, 4, 3, 2]},
index=pd.MultiIndex.from_frame(mul),
)
gdf = cudf.DataFrame.from_pandas(pdf)
expect = pdf[arg]
got = gdf[arg]
assert_eq(expect, got)
@pytest.mark.parametrize(
"data,condition,other,error",
[
(pd.Series(range(5)), pd.Series(range(5)) > 0, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, None, None),
(pd.Series(range(5)), pd.Series(range(5)) > 1, 10, None),
(
pd.Series(range(5)),
pd.Series(range(5)) > 1,
pd.Series(range(5, 10)),
None,
),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
(
pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
% 3
)
== 0,
-pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"]),
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) == 4,
None,
None,
),
(
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}),
pd.DataFrame({"a": [1, 2, np.nan], "b": [4, np.nan, 6]}) != 4,
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[True, True, True, False],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True, True, False], [True, True, True, False]],
None,
ValueError,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cuda.to_device(
np.array(
[[True, True], [False, True], [True, False], [False, True]]
)
),
None,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
cupy.array(
[[True, True], [False, True], [True, False], [False, True]]
),
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[[True, True], [False, True], [True, False], [False, True]],
17,
None,
),
(
pd.DataFrame({"p": [-2, 3, -4, -79], "k": [9, 10, 11, 12]}),
[
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
[True, True, False, True],
],
None,
ValueError,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) == 4,
None,
None,
),
(
pd.Series([1, 2, np.nan]),
pd.Series([1, 2, np.nan]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) == 4,
None,
None,
),
(
pd.Series([4, np.nan, 6]),
pd.Series([4, np.nan, 6]) != 4,
None,
None,
),
(
pd.Series([4, np.nan, 6], dtype="category"),
pd.Series([4, np.nan, 6], dtype="category") != 4,
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
None,
None,
),
(
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category"),
pd.Series(["a", "b", "b", "d", "c", "s"], dtype="category") == "b",
"s",
None,
),
(
pd.Series([1, 2, 3, 2, 5]),
pd.Series([1, 2, 3, 2, 5]) == 2,
pd.DataFrame(
{
"a": pd.Series([1, 2, 3, 2, 5]),
"b": pd.Series([1, 2, 3, 2, 5]),
}
),
NotImplementedError,
),
],
)
@pytest.mark.parametrize("inplace", [True, False])
def test_df_sr_mask_where(data, condition, other, error, inplace):
ps_where = data
gs_where = cudf.from_pandas(data)
ps_mask = ps_where.copy(deep=True)
gs_mask = gs_where.copy(deep=True)
if hasattr(condition, "__cuda_array_interface__"):
if type(condition).__module__.split(".")[0] == "cupy":
ps_condition = cupy.asnumpy(condition)
else:
ps_condition = np.array(condition).astype("bool")
else:
ps_condition = condition
if type(condition).__module__.split(".")[0] == "pandas":
gs_condition = cudf.from_pandas(condition)
else:
gs_condition = condition
ps_other = other
if type(other).__module__.split(".")[0] == "pandas":
gs_other = cudf.from_pandas(other)
else:
gs_other = other
if error is None:
expect_where = ps_where.where(
ps_condition, other=ps_other, inplace=inplace
)
got_where = gs_where.where(
gs_condition, other=gs_other, inplace=inplace
)
expect_mask = ps_mask.mask(
ps_condition, other=ps_other, inplace=inplace
)
got_mask = gs_mask.mask(gs_condition, other=gs_other, inplace=inplace)
if inplace:
expect_where = ps_where
got_where = gs_where
expect_mask = ps_mask
got_mask = gs_mask
if
|
pd.api.types.is_categorical_dtype(expect_where)
|
pandas.api.types.is_categorical_dtype
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
with tm.assertRaisesRegexp(ValueError,
"The number of dimensions required is 3"):
Panel(np.random.randn(10, 2))
def test_consolidate(self):
self.assertTrue(self.panel._data.is_consolidated())
self.panel['foo'] = 1.
self.assertFalse(self.panel._data.is_consolidated())
panel = self.panel.consolidate()
self.assertTrue(panel._data.is_consolidated())
def test_ctor_dict(self):
itema = self.panel['ItemA']
itemb = self.panel['ItemB']
d = {'A': itema, 'B': itemb[5:]}
d2 = {'A': itema._series, 'B': itemb[5:]._series}
d3 = {'A': None,
'B': DataFrame(itemb[5:]._series),
'C': DataFrame(itema._series)}
wp = Panel.from_dict(d)
wp2 = Panel.from_dict(d2) # nested Dict
# TODO: unused?
wp3 = Panel.from_dict(d3) # noqa
self.assertTrue(wp.major_axis.equals(self.panel.major_axis))
assert_panel_equal(wp, wp2)
# intersect
wp = Panel.from_dict(d, intersect=True)
self.assertTrue(wp.major_axis.equals(itemb.index[5:]))
# use constructor
assert_panel_equal(Panel(d), Panel.from_dict(d))
assert_panel_equal(Panel(d2), Panel.from_dict(d2))
assert_panel_equal(Panel(d3), Panel.from_dict(d3))
# a pathological case
d4 = {'A': None, 'B': None}
# TODO: unused?
wp4 = Panel.from_dict(d4) # noqa
assert_panel_equal(Panel(d4), Panel(items=['A', 'B']))
# cast
dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))
for k, v in compat.iteritems(d))
result = Panel(dcasted, dtype=int)
expected = Panel(dict((k, v.astype(int))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
result = Panel(dcasted, dtype=np.int32)
expected = Panel(dict((k, v.astype(np.int32))
for k, v in compat.iteritems(dcasted)))
assert_panel_equal(result, expected)
def test_constructor_dict_mixed(self):
data = dict((k, v.values) for k, v in self.panel.iteritems())
result = Panel(data)
exp_major = Index(np.arange(len(self.panel.major_axis)))
self.assertTrue(result.major_axis.equals(exp_major))
result = Panel(data, items=self.panel.items,
major_axis=self.panel.major_axis,
minor_axis=self.panel.minor_axis)
assert_panel_equal(result, self.panel)
data['ItemC'] = self.panel['ItemC']
result = Panel(data)
assert_panel_equal(result, self.panel)
# corner, blow up
data['ItemB'] = data['ItemB'][:-1]
self.assertRaises(Exception, Panel, data)
data['ItemB'] = self.panel['ItemB'].values[:, :-1]
self.assertRaises(Exception, Panel, data)
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
self.assertTrue(list(p.items) == keys)
p = Panel.from_dict(d)
self.assertTrue(list(p.items) == keys)
def test_constructor_resize(self):
data = self.panel._data
items = self.panel.items[:-1]
major = self.panel.major_axis[:-1]
minor = self.panel.minor_axis[:-1]
result = Panel(data, items=items, major_axis=major, minor_axis=minor)
expected = self.panel.reindex(items=items, major=major, minor=minor)
assert_panel_equal(result, expected)
result = Panel(data, items=items, major_axis=major)
expected = self.panel.reindex(items=items, major=major)
assert_panel_equal(result, expected)
result = Panel(data, items=items)
expected = self.panel.reindex(items=items)
assert_panel_equal(result, expected)
result = Panel(data, minor_axis=minor)
expected = self.panel.reindex(minor=minor)
assert_panel_equal(result, expected)
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
self.assertEqual(panel['foo'].values.dtype, np.object_)
self.assertEqual(panel['A'].values.dtype, np.float64)
def test_constructor_error_msgs(self):
def testit():
Panel(np.random.randn(3, 4, 5), lrange(4), lrange(5), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(4, 5, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(4), lrange(5))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 4, 5\)",
testit)
def testit():
Panel(np.random.randn(3, 4, 5), lrange(5), lrange(5), lrange(4))
assertRaisesRegexp(ValueError,
"Shape of passed values is \(3, 4, 5\), "
"indices imply \(5, 5, 4\)",
testit)
def test_conform(self):
df = self.panel['ItemA'][:-5].filter(items=['A', 'B'])
conformed = self.panel.conform(df)
assert (conformed.index.equals(self.panel.major_axis))
assert (conformed.columns.equals(self.panel.minor_axis))
def test_convert_objects(self):
# GH 4937
p = Panel(dict(A=dict(a=['1', '1.0'])))
expected = Panel(dict(A=dict(a=[1, 1.0])))
result = p._convert(numeric=True, coerce=True)
assert_panel_equal(result, expected)
def test_dtypes(self):
result = self.panel.dtypes
expected = Series(np.dtype('float64'), index=self.panel.items)
assert_series_equal(result, expected)
def test_apply(self):
# GH1148
# ufunc
applied = self.panel.apply(np.sqrt)
self.assertTrue(assert_almost_equal(applied.values, np.sqrt(
self.panel.values)))
# ufunc same shape
result = self.panel.apply(lambda x: x * 2, axis='items')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='major_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2, axis='minor_axis')
expected = self.panel * 2
assert_panel_equal(result, expected)
# reduction to DataFrame
result = self.panel.apply(lambda x: x.dtype, axis='items')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.minor_axis)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='major_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.minor_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.dtype, axis='minor_axis')
expected = DataFrame(np.dtype('float64'), index=self.panel.major_axis,
columns=self.panel.items)
assert_frame_equal(result, expected)
# reductions via other dims
expected = self.panel.sum(0)
result = self.panel.apply(lambda x: x.sum(), axis='items')
assert_frame_equal(result, expected)
expected = self.panel.sum(1)
result = self.panel.apply(lambda x: x.sum(), axis='major_axis')
assert_frame_equal(result, expected)
expected = self.panel.sum(2)
result = self.panel.apply(lambda x: x.sum(), axis='minor_axis')
assert_frame_equal(result, expected)
# pass kwargs
result = self.panel.apply(lambda x, y: x.sum() + y, axis='items', y=5)
expected = self.panel.sum(0) + 5
assert_frame_equal(result, expected)
def test_apply_slabs(self):
# same shape as original
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'major_axis'])
expected = (self.panel * 2).transpose('minor_axis', 'major_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['items', 'minor_axis'])
expected = (self.panel * 2).transpose('major_axis', 'minor_axis',
'items')
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'items'])
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['major_axis', 'minor_axis'])
expected = self.panel * 2
assert_panel_equal(result, expected)
result = self.panel.apply(lambda x: x * 2,
axis=['minor_axis', 'major_axis'])
assert_panel_equal(result, expected)
# reductions
result = self.panel.apply(lambda x: x.sum(0), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(1).T
assert_frame_equal(result, expected)
result = self.panel.apply(lambda x: x.sum(1), axis=[
'items', 'major_axis'
])
expected = self.panel.sum(0)
assert_frame_equal(result, expected)
# transforms
f = lambda x: ((x.T - x.mean(1)) / x.std(1)).T
# make sure that we don't trigger any warnings
with tm.assert_produces_warning(False):
result = self.panel.apply(f, axis=['items', 'major_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[:, :, ax]))
for ax in self.panel.minor_axis]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['major_axis', 'minor_axis'])
expected = Panel(dict([(ax, f(self.panel.loc[ax]))
for ax in self.panel.items]))
assert_panel_equal(result, expected)
result = self.panel.apply(f, axis=['minor_axis', 'items'])
expected = Panel(dict([(ax, f(self.panel.loc[:, ax]))
for ax in self.panel.major_axis]))
assert_panel_equal(result, expected)
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_reindex(self):
ref = self.panel['ItemB']
# items
result = self.panel.reindex(items=['ItemA', 'ItemB'])
assert_frame_equal(result['ItemB'], ref)
# major
new_major = list(self.panel.major_axis[:10])
result = self.panel.reindex(major=new_major)
assert_frame_equal(result['ItemB'], ref.reindex(index=new_major))
# raise exception put both major and major_axis
self.assertRaises(Exception, self.panel.reindex, major_axis=new_major,
major=new_major)
# minor
new_minor = list(self.panel.minor_axis[:2])
result = self.panel.reindex(minor=new_minor)
assert_frame_equal(result['ItemB'], ref.reindex(columns=new_minor))
# this ok
result = self.panel.reindex()
assert_panel_equal(result, self.panel)
self.assertFalse(result is self.panel)
# with filling
smaller_major = self.panel.major_axis[::5]
smaller = self.panel.reindex(major=smaller_major)
larger = smaller.reindex(major=self.panel.major_axis, method='pad')
assert_frame_equal(larger.major_xs(self.panel.major_axis[1]),
smaller.major_xs(smaller_major[0]))
# don't necessarily copy
result = self.panel.reindex(major=self.panel.major_axis, copy=False)
assert_panel_equal(result, self.panel)
self.assertTrue(result is self.panel)
def test_reindex_multi(self):
# with and without copy full reindexing
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
self.assertIs(result.items, self.panel.items)
self.assertIs(result.major_axis, self.panel.major_axis)
self.assertIs(result.minor_axis, self.panel.minor_axis)
result = self.panel.reindex(items=self.panel.items,
major=self.panel.major_axis,
minor=self.panel.minor_axis, copy=False)
assert_panel_equal(result, self.panel)
# multi-axis indexing consistency
# GH 5900
df = DataFrame(np.random.randn(4, 3))
p = Panel({'Item1': df})
expected = Panel({'Item1': df})
expected['Item2'] = np.nan
items = ['Item1', 'Item2']
major_axis = np.arange(4)
minor_axis = np.arange(3)
results = []
results.append(p.reindex(items=items, major_axis=major_axis,
copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
copy=False))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=True))
results.append(p.reindex(items=items, minor_axis=minor_axis,
copy=False))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=True))
results.append(p.reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=False))
for i, r in enumerate(results):
assert_panel_equal(expected, r)
def test_reindex_like(self):
# reindex_like
smaller = self.panel.reindex(items=self.panel.items[:-1],
major=self.panel.major_axis[:-1],
minor=self.panel.minor_axis[:-1])
smaller_like = self.panel.reindex_like(smaller)
assert_panel_equal(smaller, smaller_like)
def test_take(self):
# axis == 0
result = self.panel.take([2, 0, 1], axis=0)
expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])
assert_panel_equal(result, expected)
# axis >= 1
result = self.panel.take([3, 0, 1, 2], axis=2)
expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])
assert_panel_equal(result, expected)
# neg indicies ok
expected = self.panel.reindex(minor=['D', 'D', 'B', 'C'])
result = self.panel.take([3, -1, 1, 2], axis=2)
assert_panel_equal(result, expected)
self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)
def test_sort_index(self):
import random
ritems = list(self.panel.items)
rmajor = list(self.panel.major_axis)
rminor = list(self.panel.minor_axis)
random.shuffle(ritems)
random.shuffle(rmajor)
random.shuffle(rminor)
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0)
assert_panel_equal(sorted_panel, self.panel)
# descending
random_order = self.panel.reindex(items=ritems)
sorted_panel = random_order.sort_index(axis=0, ascending=False)
assert_panel_equal(sorted_panel,
self.panel.reindex(items=self.panel.items[::-1]))
random_order = self.panel.reindex(major=rmajor)
sorted_panel = random_order.sort_index(axis=1)
assert_panel_equal(sorted_panel, self.panel)
random_order = self.panel.reindex(minor=rminor)
sorted_panel = random_order.sort_index(axis=2)
assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
filled = self.panel.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
filled = self.panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
self.panel['ItemA'].fillna(method='backfill'))
panel = self.panel.copy()
panel['str'] = 'foo'
filled = panel.fillna(method='backfill')
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
self.assertRaises(ValueError, self.panel.fillna)
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
self.assertRaises(NotImplementedError, lambda: p.fillna(999, limit=1))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
self.panel.fillna(method='ffill'))
assert_panel_equal(self.panel.bfill(),
self.panel.fillna(method='bfill'))
def test_truncate_fillna_bug(self):
# #1823
result = self.panel.truncate(before=None, after=None, axis='items')
# it works!
result.fillna(value=0.0)
def test_swapaxes(self):
result = self.panel.swapaxes('items', 'minor')
self.assertIs(result.items, self.panel.minor_axis)
result = self.panel.swapaxes('items', 'major')
self.assertIs(result.items, self.panel.major_axis)
result = self.panel.swapaxes('major', 'minor')
self.assertIs(result.major_axis, self.panel.minor_axis)
panel = self.panel.copy()
result = panel.swapaxes('major', 'minor')
panel.values[0, 0, 1] = np.nan
expected = panel.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
# this should also work
result = self.panel.swapaxes(0, 1)
self.assertIs(result.items, self.panel.major_axis)
# this works, but return a copy
result = self.panel.swapaxes('items', 'items')
assert_panel_equal(self.panel, result)
self.assertNotEqual(id(self.panel), id(result))
def test_transpose(self):
result = self.panel.transpose('minor', 'major', 'items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# test kwargs
result = self.panel.transpose(items='minor', major='major',
minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# text mixture of args
result = self.panel.transpose('minor', major='major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'major', minor='items')
expected = self.panel.swapaxes('items', 'minor')
assert_panel_equal(result, expected)
# duplicate axes
with tm.assertRaisesRegexp(TypeError,
'not enough/duplicate arguments'):
self.panel.transpose('minor', maj='major', minor='items')
with tm.assertRaisesRegexp(ValueError, 'repeated axis in transpose'):
self.panel.transpose('minor', 'major', major='minor',
minor='items')
result = self.panel.transpose(2, 1, 0)
assert_panel_equal(result, expected)
result = self.panel.transpose('minor', 'items', 'major')
expected = self.panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
result = self.panel.transpose(2, 0, 1)
assert_panel_equal(result, expected)
self.assertRaises(ValueError, self.panel.transpose, 0, 0, 1)
def test_transpose_copy(self):
panel = self.panel.copy()
result = panel.transpose(2, 0, 1, copy=True)
expected = panel.swapaxes('items', 'minor')
expected = expected.swapaxes('major', 'minor')
assert_panel_equal(result, expected)
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
@ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
expected = self.panel.to_frame().dropna(how='any')
assert_frame_equal(filtered, expected)
# unfiltered
unfiltered = self.panel.to_frame(filter_observations=False)
assert_panel_equal(unfiltered.to_panel(), self.panel)
# names
self.assertEqual(unfiltered.index.names, ('major', 'minor'))
# unsorted, round trip
df = self.panel.to_frame(filter_observations=False)
unsorted = df.take(np.random.permutation(len(df)))
pan = unsorted.to_panel()
assert_panel_equal(pan, self.panel)
# preserve original index names
df = DataFrame(np.random.randn(6, 2),
index=[['a', 'a', 'b', 'b', 'c', 'c'],
[0, 1, 0, 1, 0, 1]],
columns=['one', 'two'])
df.index.names = ['foo', 'bar']
df.columns.name = 'baz'
rdf = df.to_panel().to_frame()
self.assertEqual(rdf.index.names, df.index.names)
self.assertEqual(rdf.columns.names, df.columns.names)
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'), (
np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1], [3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12], [3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'], ['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], ['y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4], [-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples([(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_to_panel_na_handling(self):
df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
panel = df.to_panel()
self.assertTrue(isnull(panel[0].ix[1, [0, 1]]).all())
def test_to_panel_duplicates(self):
# #2441
df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
idf = df.set_index(['a', 'b'])
assertRaisesRegexp(ValueError, 'non-uniquely indexed', idf.to_panel)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[['A', 'B']]
expected.items = ['A', 'A']
result = panel.loc['A']
assert_panel_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, ['A', 'B']]
expected.major_axis = ['A', 'A']
result = panel.loc[:, 'A']
assert_panel_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, ['A', 'B']]
expected.minor_axis = ['A', 'A']
result = panel.loc[:, :, 'A']
assert_panel_equal(result, expected)
def test_filter(self):
pass
def test_compound(self):
compounded = self.panel.compound()
assert_series_equal(compounded['ItemA'],
(1 + self.panel['ItemA']).product(0) - 1,
check_names=False)
def test_shift(self):
# major
idx = self.panel.major_axis[0]
idx_lag = self.panel.major_axis[1]
shifted = self.panel.shift(1)
assert_frame_equal(self.panel.major_xs(idx), shifted.major_xs(idx_lag))
# minor
idx = self.panel.minor_axis[0]
idx_lag = self.panel.minor_axis[1]
shifted = self.panel.shift(1, axis='minor')
assert_frame_equal(self.panel.minor_xs(idx), shifted.minor_xs(idx_lag))
# items
idx = self.panel.items[0]
idx_lag = self.panel.items[1]
shifted = self.panel.shift(1, axis='items')
assert_frame_equal(self.panel[idx], shifted[idx_lag])
# negative numbers, #2164
result = self.panel.shift(-1)
expected = Panel(dict((i, f.shift(-1)[:-1])
for i, f in self.panel.iteritems()))
assert_panel_equal(result, expected)
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame()) for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodPanel()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_panel_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=bday)
assert_panel_equal(shifted, shifted3)
assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')
# DatetimeIndex
panel = _panel
shifted = panel.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(panel, unshifted)
shifted2 = panel.tshift(freq=panel.major_axis.freq)
assert_panel_equal(shifted, shifted2)
inferred_ts = Panel(panel.values, items=panel.items,
major_axis=Index(np.asarray(panel.major_axis)),
minor_axis=panel.minor_axis)
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_panel_equal(shifted, panel.tshift(1))
assert_panel_equal(unshifted, inferred_ts)
no_freq = panel.ix[:, [0, 5, 7], :]
self.assertRaises(ValueError, no_freq.tshift)
def test_pct_change(self):
df1 = DataFrame({'c1': [1, 2, 5], 'c2': [3, 4, 6]})
df2 = df1 + 1
df3 = DataFrame({'c1': [3, 4, 7], 'c2': [5, 6, 8]})
wp = Panel({'i1': df1, 'i2': df2, 'i3': df3})
# major, 1
result = wp.pct_change() # axis='major'
expected = Panel({'i1': df1.pct_change(),
'i2': df2.pct_change(),
'i3': df3.pct_change()})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=1)
assert_panel_equal(result, expected)
# major, 2
result = wp.pct_change(periods=2)
expected = Panel({'i1': df1.pct_change(2),
'i2': df2.pct_change(2),
'i3': df3.pct_change(2)})
assert_panel_equal(result, expected)
# minor, 1
result = wp.pct_change(axis='minor')
expected = Panel({'i1': df1.pct_change(axis=1),
'i2': df2.pct_change(axis=1),
'i3': df3.pct_change(axis=1)})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=2)
assert_panel_equal(result, expected)
# minor, 2
result = wp.pct_change(periods=2, axis='minor')
expected = Panel({'i1': df1.pct_change(periods=2, axis=1),
'i2': df2.pct_change(periods=2, axis=1),
'i3': df3.pct_change(periods=2, axis=1)})
assert_panel_equal(result, expected)
# items, 1
result = wp.pct_change(axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [1, 0.5, .2],
'c2': [1. / 3, 0.25, 1. / 6]}),
'i3': DataFrame({'c1': [.5, 1. / 3, 1. / 6],
'c2': [.25, .2, 1. / 7]})})
assert_panel_equal(result, expected)
result = wp.pct_change(axis=0)
assert_panel_equal(result, expected)
# items, 2
result = wp.pct_change(periods=2, axis='items')
expected = Panel({'i1': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i2': DataFrame({'c1': [np.nan, np.nan, np.nan],
'c2': [np.nan, np.nan, np.nan]}),
'i3': DataFrame({'c1': [2, 1, .4],
'c2': [2. / 3, .5, 1. / 3]})})
assert_panel_equal(result, expected)
def test_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
evalues = [[[float(np.around(i)) for i in j] for j in k]
for k in values]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
expected = Panel(evalues, items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
result = p.round()
self.assert_panel_equal(expected, result)
def test_multiindex_get(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.ix['a']
assert_panel_equal(f1, f2)
self.assertTrue((f1.items == [1, 2]).all())
self.assertTrue((f2.items == [1, 2]).all())
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_multiindex_blocks(self):
ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
wp = Panel(self.panel._data)
wp.items = ind
f1 = wp['a']
self.assertTrue((f1.items == [1, 2]).all())
f1 = wp[('b', 1)]
self.assertTrue((f1.columns == ['A', 'B', 'C', 'D']).all())
def test_repr_empty(self):
empty = Panel()
repr(empty)
def test_rename(self):
mapper = {'ItemA': 'foo', 'ItemB': 'bar', 'ItemC': 'baz'}
renamed = self.panel.rename_axis(mapper, axis=0)
exp = Index(['foo', 'bar', 'baz'])
self.assertTrue(renamed.items.equals(exp))
renamed = self.panel.rename_axis(str.lower, axis=2)
exp = Index(['a', 'b', 'c', 'd'])
self.assertTrue(renamed.minor_axis.equals(exp))
# don't copy
renamed_nocopy = self.panel.rename_axis(mapper, axis=0, copy=False)
renamed_nocopy['foo'] = 3.
self.assertTrue((self.panel['ItemA'].values == 3).all())
def test_get_attr(self):
assert_frame_equal(self.panel['ItemA'], self.panel.ItemA)
# specific cases from #3440
self.panel['a'] = self.panel['ItemA']
assert_frame_equal(self.panel['a'], self.panel.a)
self.panel['i'] = self.panel['ItemA']
assert_frame_equal(self.panel['i'], self.panel.i)
def test_from_frame_level1_unsorted(self):
tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
('MSFT', 1)]
midx = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.rand(5, 4), index=midx)
p = df.to_panel()
assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
def test_to_excel(self):
try:
import xlwt # noqa
import xlrd # noqa
import openpyxl # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for ext in ['xls', 'xlsx']:
path = '__tmp__.' + ext
with ensure_clean(path) as path:
self.panel.to_excel(path)
try:
reader = ExcelFile(path)
except ImportError:
raise nose.SkipTest("need xlwt xlrd openpyxl")
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
assert_frame_equal(df, recdf)
def test_to_excel_xlsxwriter(self):
try:
import xlrd # noqa
import xlsxwriter # noqa
from pandas.io.excel import ExcelFile
except ImportError:
raise nose.SkipTest("Requires xlrd and xlsxwriter. Skipping test.")
path = '__tmp__.xlsx'
with ensure_clean(path) as path:
self.panel.to_excel(path, engine='xlsxwriter')
try:
reader = ExcelFile(path)
except ImportError as e:
raise nose.SkipTest("cannot write excel file: %s" % e)
for item, df in self.panel.iteritems():
recdf = reader.parse(str(item), index_col=0)
|
assert_frame_equal(df, recdf)
|
pandas.util.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import zipfile
DROPCOLS = ['rougher.input.floatbank11_copper_sulfate',
'rougher.input.floatbank10_xanthate',
'rougher.state.floatbank10_c_air',
'rougher.state.floatbank10_e_air',
'rougher.state.floatbank10_f_air',
'rougher.state.floatbank10_d_level',
'rougher.state.floatbank10_f_level',
'primary_cleaner.state.floatbank8_b_air',
'primary_cleaner.state.floatbank8_c_air',
"secondary_cleaner.state.floatbank4_b_air",
'secondary_cleaner.state.floatbank2_b_air',
"secondary_cleaner.state.floatbank5_b_air",
"secondary_cleaner.state.floatbank3_a_air"
]
def to_integer(dt_time):
y = 365 * (dt_time.year - 2016) + 30.25 * dt_time.month + dt_time.day
return y.to_numpy().reshape(-1, 1)
def get_ransac(y=None, threshold=30):
from sklearn import linear_model
rs = linear_model.RANSACRegressor()
y = y[y > threshold]
y_rs = y.rolling(window=24 * 7).min().dropna()
t = to_integer(y_rs.index)
rs.fit(t, y_rs)
return rs
def encode(data, col, max_val):
import numpy as np
data[col + '_sin'] = np.sin(2 * np.pi * data[col] / max_val)
data[col + '_cos'] = np.cos(2 * np.pi * data[col] / max_val)
return data
def main(root=None):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
import pandas as pd
from sklearn.decomposition import PCA
import numpy as np
pca_threshold = 0.996
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
zip_ref = zipfile.ZipFile(f'{root}/data/raw/for_competition_release.zip', 'r')
zip_ref.extractall(f'{root}/data/interim/')
df_train = pd.read_csv(f'{root}/data/interim/train_data/all_train.csv', parse_dates=['date'])
# df_train['date'] =df_train['date'].dt.round('H')
data_dict =
|
pd.read_json(f'{root}/data/interim/data_dictionary_v1.json')
|
pandas.read_json
|
import numpy as np
import pandas as pd
def interpolate_monthly(df, date_col=None, resample_col=None):
if date_col == None:
date_col = df.columns[0]
if resample_col == None:
resample_col = df.columns[1]
data = df[[date_col, resample_col]].copy()
data[date_col] = pd.to_datetime(data[date_col], format="%Y-%m")
data['start_of_month'] = (data[date_col].dt.floor('d') + pd.offsets.MonthEnd(0) - pd.offsets.MonthBegin(1))
data['end_of_month'] = pd.to_datetime(data['start_of_month']) + pd.offsets.MonthEnd(1)
data['days_in_month'] = (data['end_of_month'] - data['start_of_month']).dt.days + 1
data[resample_col] = data[resample_col] / data['days_in_month']
data['date'] = data['start_of_month']
dr = pd.date_range(start=data.start_of_month.min(),
end=data.end_of_month.max(),
freq='1D')
date_df =
|
pd.DataFrame({'date': dr})
|
pandas.DataFrame
|
"""
Wrappers for IUCN API calls.
API documentation can be found at: https://apiv3.iucnredlist.org/api/v3/docs
"""
from collections import defaultdict
from typing import Union
from urllib.parse import urljoin
import numpy as np
import pandas as pd
import requests
from .._helpers import expand_result
API_URL = "https://apiv3.iucnredlist.org/api/v3/"
def _request(url: str, token: str) -> requests.Response:
"""
Creates a request for the IUCN API and handles HTTP exceptions.
Parameters
----------
url : str
IUCN API endpoint.
token : str
IUCN API authentication token.
Returns
-------
Response
Request response.
"""
try:
response = requests.get(url, params={"token": token})
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise Exception(f"Error calling IUCN API. {err}")
if "message" in response.json():
msg = response.json()["message"]
raise Exception(f"Error calling IUCN API. {msg}")
return response
def get_common_names(
names: Union[list, np.ndarray, pd.Series, str],
token: str,
add_supplied_names: bool = False,
expand: bool = True,
):
"""
Gets common names for multiple species using the IUCN API.
Parameters
----------
names : list, array, Series or str
Scientific name(s) to get results for.
token : str
IUCN API authentication token.
add_supplied_names : bool
Add supplied scientific names to the resulting DataFrame.
expand : bool
Whether to expand result rows to match `names` size. If False,
the number of rows will correspond to the number of unique names
in `names`.
Returns
-------
DataFrame
DataFrame with common names.
"""
if isinstance(names, (list, str, np.ndarray)):
names = pd.Series(names)
endpoint = urljoin(API_URL, "species/common_names/")
df = pd.DataFrame()
unique_names = names.dropna().unique()
for name in unique_names:
response = _request(urljoin(endpoint, name), token)
if response.json().get("result"):
result = defaultdict(list)
for item in response.json().get("result"):
result[item["language"]].append(item["taxonname"])
result = pd.Series(result).str.join("|")
else:
result = pd.Series([], dtype="object")
df = df.append(result, ignore_index=True)
if add_supplied_names:
df["supplied_name"] = unique_names
if expand:
df = expand_result(df, names)
return df
def get_country_occurrence(
names: Union[list, np.ndarray, pd.Series, str],
token: str,
add_supplied_names: bool = False,
expand: bool = True,
) -> pd.DataFrame:
"""
Gets country occurrence and related information for multiple species
using the IUCN API.
Parameters
----------
names : list, array, Series or str
Scientific name(s) to get results for.
token : str
IUCN API authentication token.
add_supplied_names : bool
Add supplied scientific names to the resulting DataFrame.
expand : bool
Whether to expand result rows to match `names` size. If False,
the number of rows will correspond to the number of unique names
in `names`.
Returns
-------
DataFrame
DataFrame with information about country occurrence for each name.
"""
if isinstance(names, (list, str, np.ndarray)):
names = pd.Series(names)
endpoint = urljoin(API_URL, "species/countries/name/")
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import logging, os, sys, pickle, json, time, yaml, glob
from datetime import datetime as dt
import warnings
warnings.filterwarnings('ignore')
import subprocess
from itertools import chain
from tqdm import tqdm
import networkx as nx
import pandas as pd
from math import pi
import numpy as np
from kedro.io import DataCatalog
import h5py
import gc
from ffsc.flow.simplex import network_simplex
from ffsc.interdiction.gp import *
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
N_WORKERS=2
# total cost overflowing max int: 2147483647
import multiprocessing as mp
def dijkstra_pypy_pickle(community_nodes, df_edges, params):
"""
Pickle out supply nodes, target nodes, and edges
"""
if 'COALMINE' in community_nodes['NODETYPE'].unique():
carrier = 'coal'
source_types = ['COALMINE']
elif 'LNGTERMINAL' in community_nodes['NODETYPE'].unique():
carrier= 'gas'
source_types = ['OILFIELD','OILWELL']
else:
carrier='oil'
source_types = ['OILFIELD','OILWELL']
intermediate_types = ['REFINERY']
logger = logging.getLogger(f'{carrier} pypy pickle')
if carrier == 'gas':
keep_supply = json.load(open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_flow','keep_oilfields_gas.json'),'r')) \
+ json.load(open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_flow','keep_oilwells_gas.json'),'r'))
logger.info(f'Got Keepsupply len {len(keep_supply)}')
elif carrier=='oil':
keep_supply = json.load(open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_flow','keep_oil.json'),'r'))
keep_refineries = json.load(open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_flow','keep_refineries.json'),'r'))
else:
# carrier=='coal':
drop_CA = json.load(open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_flow','drop_CA_coalmines.json'),'r'))
keep_supply = community_nodes.loc[(community_nodes['NODETYPE'].isin(source_types)) & (~community_nodes['NODE'].isin(drop_CA)),'NODE'].values.tolist()
logger.info(f'Got Keepsupply len {len(keep_supply)}')
logger.info('prepping dfs')
df_edges = df_edges[df_edges['source']!='supersource']
community_nodes = community_nodes.reset_index().rename(columns={'index':'idx'})
community_nodes['idx'] = community_nodes['idx'].astype(int)
df_edges = pd.merge(df_edges, community_nodes[['idx','NODE','NODETYPE']], how='left',left_on='source',right_on='NODE').drop(columns=['NODE']).rename(columns={'idx':'source_idx','NODETYPE':'source_type'})
df_edges = pd.merge(df_edges, community_nodes[['idx','NODE','NODETYPE']], how='left',left_on='target',right_on='NODE').drop(columns=['NODE']).rename(columns={'idx':'target_idx','NODETYPE':'target_type'})
#print (df_edges)
target_types = ['CITY','POWERSTATION']
if carrier in ['gas','coal']:
logger.info('prepping graph')
G = nx.DiGraph()
G.add_edges_from([(r[0],r[1], {'z':r[2]}) for r in df_edges[['source_idx','target_idx','z']].astype(int).values.tolist()])
supply_nodes = community_nodes.loc[(community_nodes['NODETYPE'].isin(source_types)) & (community_nodes['NODE'].isin(keep_supply)),'idx'].astype(int).values.tolist()
target_nodes = community_nodes.loc[community_nodes['NODETYPE'].isin(target_types),'idx'].astype(int).values.tolist()
logger.info('pickling')
pickle.dump(supply_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'supply_nodes.pkl'),'wb'),protocol=2)
pickle.dump(target_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'target_nodes.pkl'),'wb'),protocol=2)
pickle.dump(G._succ,open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'edges.pkl'),'wb'),protocol=2)
else: # is oil and is tricky.
logger.info('Prepping crude/products distinction')
# make lastmile cities-ports is on the 'B' side
df_edges.loc[(df_edges['target_type']=='CITY')&(df_edges['source_type']=='PORT'),'source'] = df_edges.loc[(df_edges['target_type']=='CITY')&(df_edges['source_type']=='PORT'),'source'].apply(lambda el: el+'_B')
df_edges.loc[(df_edges['source_type']=='CITY')&(df_edges['target_type']=='PORT'),'target'] = df_edges.loc[(df_edges['source_type']=='CITY')&(df_edges['target_type']=='PORT'),'target'].apply(lambda el: el+'_B')
df_edges['source_B'] = df_edges['source'].apply(lambda el: el[-2:]=='_B')
df_edges['target_B'] = df_edges['target'].apply(lambda el: el[-2:]=='_B')
df_edges['side']='A'
cond_B = df_edges['source_type'].isin(['CITY','POWERSTATION']) | df_edges['target_type'].isin(['CITY','POWERSTATION']) | (df_edges['source_B']==True) | (df_edges['target_B']==True)
df_edges.loc[cond_B,'side']='B'
logger.info('Prepping crude graph')
G = nx.DiGraph()
G.add_edges_from([(r[0],r[1], {'z':r[2]}) for r in df_edges.loc[df_edges['side']=='A',['source_idx','target_idx','z']].astype(int).values.tolist()])
supply_nodes = community_nodes.loc[(community_nodes['NODETYPE'].isin(source_types)) & (community_nodes['NODE'].isin(keep_supply)),'idx'].astype(int).values.tolist()
target_nodes = community_nodes.loc[community_nodes['NODETYPE'].isin(intermediate_types),'idx'].astype(int).values.tolist()
logger.info('pickling crude')
pickle.dump(supply_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'supply_A_nodes.pkl'),'wb'),protocol=2)
pickle.dump(target_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'target_A_nodes.pkl'),'wb'),protocol=2)
pickle.dump(G._succ,open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'edges_A.pkl'),'wb'),protocol=2)
logger.info('Prepping products graph')
G = nx.DiGraph()
G.add_edges_from([(r[0],r[1], {'z':r[2]}) for r in df_edges.loc[df_edges['side']=='B',['source_idx','target_idx','z']].astype(int).values.tolist()])
supply_nodes = community_nodes.loc[(community_nodes['NODE'].isin(keep_refineries)),'idx'].astype(int).values.tolist()
target_nodes = community_nodes.loc[community_nodes['NODETYPE'].isin(target_types),'idx'].astype(int).values.tolist()
logger.info('pickling products')
pickle.dump(supply_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'supply_B_nodes.pkl'),'wb'),protocol=2)
pickle.dump(target_nodes, open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'target_B_nodes.pkl'),'wb'),protocol=2)
pickle.dump(G._succ,open(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'edges_B.pkl'),'wb'),protocol=2)
return []
def call_pypy_dijkstra(call_params):
call_params = json.loads(call_params)
print (call_params)
process = subprocess.Popen([str(r) for r in call_params],shell=True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
for line in process.stdout:
print(line.decode(), end='')
process.stdout.close()
return_code = process.wait()
return True
def dijkstra_post_oil(df_nodes):
#### first: generate an adjacent costs matrix for oil.
logger = logging.getLogger('Post_oil')
# load all the cost pickles
logger.info('Load all the cost pickles')
picklefiles = glob.glob(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths','oil','costs','*.pkl'))
pkl_data = {int(os.path.splitext(f.split('/')[-1])[0]):pickle.load(open(f,'rb')) for f in picklefiles}
print (pkl_data.keys())
df_nodes = df_nodes.reset_index().rename(columns={'index':'idx'})
target_nodes = df_nodes.loc[df_nodes['NODETYPE'].isin(['CITY','POWERSTATION']),'idx'].values.tolist()
inter_nodes = df_nodes.loc[df_nodes['NODETYPE'].isin(['REFINERY']),'idx'].values.tolist()
source_nodes = df_nodes.loc[df_nodes['NODETYPE'].isin(['OILWELL','OILFIELD']) & df_nodes['idx'].isin(pkl_data.keys()),'idx'].values.tolist()
pkl_data = {(kk1,kk2):cost for kk1,vv in pkl_data.items() for kk2, cost in vv.items() }
#print (pkl_data.keys())
start_nodes = list(set([kk[0] for kk in list(pkl_data.keys()) if kk[0] in source_nodes]))
# make them into an nx digraph
G = nx.DiGraph()
G.add_edges_from([(kk_tup[0],kk_tup[1], {'z':cost}) for kk_tup, cost in pkl_data.items()])
#print('nodes')
#print(G.nodes)
# get supply and target nodes on graph, and call dijkstra for all supply nodes
dijkstra_results = {}
for sn in tqdm(start_nodes, desc='shortest dijkstra'):
costs,paths = nx.single_source_dijkstra(G, sn, target=None, cutoff=None, weight='z')
#print (sn)
#print (costs, paths)
dijkstra_results[sn] = {'costs':costs,'paths':paths}
# parse results into adjacency matrix
df_adj = pd.DataFrame(
{sn:{tn:dijkstra_results[sn]['costs'][tn] for tn in target_nodes if tn in dijkstra_results[sn]['costs'].keys()}
for sn in start_nodes})
"""
path_files = glob.glob('/paths/oil/*.pkl')
path_files = [f for f in path_files if int(os.path.splitext(f.split('/')[-1])[0]) in inter_nodes]
for ii_f, f in enumerate(path_files):
if ii_f>139:
idx = int(os.path.splitext(f.split('/')[-1])[0])
data = pickle.load(open(f,'rb'))
for kk,vv in tqdm(data.items(), desc=f'{ii_f}/{len(path_files)}'):
if not os.path.exists(f'/paths/oil_json/{idx}-{kk}.json'):
json.dump(vv, open(f'/paths/oil_json/{idx}-{kk}.json','w'))
gc.collect()
"""
# if idx in inter_nodes:
# inter_paths[idx] = pickle.load(open(f,'rb'))
### second: combine paths
logger.info('combining paths')
for ii_s, sn in enumerate(start_nodes):
crude_paths = pickle.load(open(f'/paths/oil/{sn}.pkl','rb'))
full_paths = {}
for tn in tqdm(target_nodes, desc=f'{ii_s}/{len(start_nodes)}'):
if tn in dijkstra_results[sn]['paths'].keys():
#print (dijkstra_results[sn]['paths'])
master_path = dijkstra_results[sn]['paths'][tn]
inter_idx = master_path[1]
#full_paths[tn] = crude_paths[inter_idx] + h5_objs[inter_idx][str(tn)][1:].values.tolist()
full_paths[tn] = crude_paths[inter_idx] + json.load(open(f'/paths/oil_json/{inter_idx}-{tn}.json','r'))
pickle.dump(full_paths, open(f'/paths/oil_join/{sn}.pkl','wb'))
idx_mapper = {r[0]:r[1] for r in df_nodes[['idx','NODE']].values.tolist()}
df_adj.index = df_adj.index.astype(int).map(idx_mapper)
df_adj.columns = df_adj.columns.astype(int).map(idx_mapper)
print (df_adj)
# for each source:
## for each target:
### combine paths into new dict
return df_adj
def dijkstra_post_parse(community_nodes):
if 'COALMINE' in community_nodes['NODETYPE'].unique():
carrier = 'coal'
source_types = ['COALMINE']
elif 'LNGTERMINAL' in community_nodes['NODETYPE'].unique():
carrier= 'gas'
source_types = ['OILFIELD','OILWELL']
else:
carrier='oil'
source_types = ['OILFIELD','OILWELL']
logger=logging.getLogger(f'{carrier} parse dijkstra')
community_nodes = community_nodes.reset_index().rename(columns={'index':'idx'})
cost_pkl_fs = glob.glob(os.path.join(os.getcwd(),'results','interdiction','dijkstra_paths',carrier,'costs','*'))
logger.info(f'found {len(cost_pkl_fs)} pickle files')
logger.info('Loading pickles...')
cost_pkls = {os.path.splitext(os.path.split(el)[1])[0]:pickle.load(open(el,'rb')) for el in cost_pkl_fs}
logger.info('Parsing to df')
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from typing import List
import pytest
import numpy as np
import pandas as pd
from obp.dataset import (
linear_reward_function,
logistic_reward_function,
linear_behavior_policy_logit,
SyntheticSlateBanditDataset,
)
from obp.types import BanditFeedback
# n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description
invalid_input_of_init = [
(
"4",
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
1,
3,
2,
"binary",
"independent",
"pbm",
1,
"n_unique_action must be an integer larger than 1",
),
(
5,
"4",
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
-1,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
10,
2,
"binary",
"independent",
"pbm",
1,
"len_list must be an integer such that",
),
(
5,
3,
0,
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(
5,
3,
"2",
"binary",
"independent",
"pbm",
1,
"dim_context must be a positive integer",
),
(5, 3, 2, "aaa", "independent", "pbm", 1, "reward_type must be either"),
(5, 3, 2, "binary", "aaa", "pbm", 1, "reward_structure must be one of"),
(5, 3, 2, "binary", "independent", "aaa", 1, "click_model must be one of"),
(5, 3, 2, "binary", "independent", "pbm", "x", "random_state must be an integer"),
(5, 3, 2, "binary", "independent", "pbm", None, "random_state must be an integer"),
]
@pytest.mark.parametrize(
"n_unique_action, len_list, dim_context, reward_type, reward_structure, click_model, random_state, description",
invalid_input_of_init,
)
def test_synthetic_slate_init_using_invalid_inputs(
n_unique_action,
len_list,
dim_context,
reward_type,
reward_structure,
click_model,
random_state,
description,
):
with pytest.raises(ValueError, match=f"{description}*"):
_ = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
reward_structure=reward_structure,
click_model=click_model,
random_state=random_state,
)
def check_slate_bandit_feedback(bandit_feedback: BanditFeedback):
# check pscore columns
pscore_columns: List[str] = []
pscore_candidate_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
for column in pscore_candidate_columns:
if column in bandit_feedback and bandit_feedback[column] is not None:
pscore_columns.append(column)
else:
pscore_columns.append(column)
assert (
len(pscore_columns) > 0
), f"bandit feedback must contain at least one of the following pscore columns: {pscore_candidate_columns}"
bandit_feedback_df = pd.DataFrame()
for column in ["slate_id", "position", "action"] + pscore_columns:
bandit_feedback_df[column] = bandit_feedback[column]
# sort dataframe
bandit_feedback_df = (
bandit_feedback_df.sort_values(["slate_id", "position"])
.reset_index(drop=True)
.copy()
)
# check uniqueness
assert (
bandit_feedback_df.duplicated(["slate_id", "position"]).sum() == 0
), "position must not be duplicated in each slate"
assert (
bandit_feedback_df.duplicated(["slate_id", "action"]).sum() == 0
), "action must not be duplicated in each slate"
# check pscores
for column in pscore_columns:
invalid_pscore_flgs = (bandit_feedback_df[column] < 0) | (
bandit_feedback_df[column] > 1
)
assert invalid_pscore_flgs.sum() == 0, "the range of pscores must be [0, 1]"
if "pscore_cascade" in pscore_columns and "pscore" in pscore_columns:
assert (
bandit_feedback_df["pscore_cascade"] < bandit_feedback_df["pscore"]
).sum() == 0, "pscore must be smaller than or equal to pscore_cascade"
if "pscore_item_position" in pscore_columns and "pscore" in pscore_columns:
assert (
bandit_feedback_df["pscore_item_position"] < bandit_feedback_df["pscore"]
).sum() == 0, "pscore must be smaller than or equal to pscore_item_position"
if "pscore_item_position" in pscore_columns and "pscore_cascade" in pscore_columns:
assert (
bandit_feedback_df["pscore_item_position"]
< bandit_feedback_df["pscore_cascade"]
).sum() == 0, (
"pscore_cascade must be smaller than or equal to pscore_item_position"
)
if "pscore_cascade" in pscore_columns:
previous_minimum_pscore_cascade = (
bandit_feedback_df.groupby("slate_id")["pscore_cascade"]
.expanding()
.min()
.values
)
assert (
previous_minimum_pscore_cascade < bandit_feedback_df["pscore_cascade"]
).sum() == 0, "pscore_cascade must be non-decresing sequence in each slate"
if "pscore" in pscore_columns:
count_pscore_in_expression = bandit_feedback_df.groupby("slate_id").apply(
lambda x: x["pscore"].unique().shape[0]
)
assert (
count_pscore_in_expression != 1
).sum() == 0, "pscore must be unique in each slate"
if "pscore" in pscore_columns and "pscore_cascade" in pscore_columns:
last_slot_feedback_df = bandit_feedback_df.drop_duplicates(
"slate_id", keep="last"
)
assert (
last_slot_feedback_df["pscore"] != last_slot_feedback_df["pscore_cascade"]
).sum() == 0, "pscore must be the same as pscore_cascade in the last slot"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
pscore_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
bandit_feedback_df = pd.DataFrame()
for column in ["slate_id", "position", "action"] + pscore_columns:
bandit_feedback_df[column] = bandit_feedback[column]
# check pscore marginal
pscore_item_position = 1 / n_unique_action
assert np.allclose(
bandit_feedback_df["pscore_item_position"].unique(), pscore_item_position
), f"pscore_item_position must be [{pscore_item_position}], but {bandit_feedback_df['pscore_item_position'].unique()}"
# check pscore joint
pscore_cascade = []
pscore_above = 1.0
for position_ in np.arange(len_list):
pscore_above = pscore_above * 1.0 / (n_unique_action - position_)
pscore_cascade.append(pscore_above)
assert np.allclose(
bandit_feedback_df["pscore_cascade"], np.tile(pscore_cascade, n_rounds)
), f"pscore_cascade must be {pscore_cascade} for all impresessions"
assert np.allclose(
bandit_feedback_df["pscore"].unique(), [pscore_above]
), f"pscore must be {pscore_above} for all slates"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_uniform_random_behavior_policy_largescale():
# set parameters
n_unique_action = 100
len_list = 10
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 10000
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
)
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
# check pscore marginal
pscore_item_position = 1 / n_unique_action
assert np.allclose(
np.unique(bandit_feedback["pscore_item_position"]), pscore_item_position
), f"pscore_item_position must be [{pscore_item_position}], but {np.unique(bandit_feedback['pscore_item_position'])}"
def test_synthetic_slate_obtain_batch_bandit_feedback_using_linear_behavior_policy():
# set parameters
n_unique_action = 10
len_list = 3
dim_context = 2
reward_type = "binary"
random_state = 12345
n_rounds = 100
dataset = SyntheticSlateBanditDataset(
n_unique_action=n_unique_action,
len_list=len_list,
dim_context=dim_context,
reward_type=reward_type,
random_state=random_state,
behavior_policy_function=linear_behavior_policy_logit,
)
with pytest.raises(ValueError):
_ = dataset.obtain_batch_bandit_feedback(n_rounds=-1)
with pytest.raises(ValueError):
_ = dataset.obtain_batch_bandit_feedback(n_rounds="a")
# obtain feedback
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
# check slate bandit feedback (common test)
check_slate_bandit_feedback(bandit_feedback=bandit_feedback)
# print reward
pscore_columns = [
"pscore_cascade",
"pscore",
"pscore_item_position",
]
bandit_feedback_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
script to carry out flare finding in Kepler LC's
"""
import pandas as pd
import numpy as np
import time as clock
import datetime
import glob
import os.path
import json
import helper as help
from version import __version__
from aflare import aflare1
import detrend
from fake import ed6890, FlareStats, FakeFlaresDist, FakeCompleteness
from get import Get
from gatspy.periodic import LombScargleFast
import warnings
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter, correlate
from matplotlib import rcParams as rcp
rcp.update({'font.size':12})
rcp.update({'font.family':'sansserif'})
def FINDflare(flux, error, N1=3, N2=1, N3=3,
avg_std=False, std_window=7,
returnbinary=False, debug=False):
'''
The algorithm for local changes due to flares defined by
<NAME> et al. (2015), Eqn. 3a-d
http://arxiv.org/abs/1510.01005
Note: these equations originally in magnitude units, i.e. smaller
values are increases in brightness. The signs have been changed, but
coefficients have not been adjusted to change from log(flux) to flux.
Note: this algorithm originally ran over sections without "changes" as
defined by Change Point Analysis. May have serious problems for data
with dramatic starspot activity. If possible, remove starspot first!
Parameters
----------
flux : numpy array
data to search over
error : numpy array
errors corresponding to data.
N1 : int, optional
Coefficient from original paper (Default is 3)
How many times above the stddev is required.
N2 : int, optional
Coefficient from original paper (Default is 1)
How many times above the stddev and uncertainty is required
N3 : int, optional
Coefficient from original paper (Default is 3)
The number of consecutive points required to flag as a flare
avg_std : bool, optional
Should the "sigma" in this data be computed by the median of
the rolling().std()? (Default is False)
(Not part of original algorithm)
std_window : float, optional
If avg_std=True, how big of a window should it use?
(Default is 25 data points)
(Not part of original algorithm)
returnbinary : bool, optional
Should code return the start and stop indicies of flares (default,
set to False) or a binary array where 1=flares (set to True)
(Not part of original algorithm)
'''
med_i = np.nanmedian(flux)
if debug is True:
print("DEBUG: med_i = {}".format(med_i))
if avg_std is False:
sig_i = np.nanstd(flux) # just the stddev of the window
else:
# take the average of the rolling stddev in the window.
# better for windows w/ significant starspots being removed
sig_i = np.nanmedian(pd.Series(flux).rolling(std_window, center=True).std())
if debug is True:
print("DEBUG: sig_i = ".format(sig_i))
ca = flux - med_i
cb = np.abs(flux - med_i) / sig_i
cc = np.abs(flux - med_i - error) / sig_i
if debug is True:
print("DEBUG: N0={}, N1={}, N2={}".format(sum(ca>0),sum(cb>N1),sum(cc>N2)))
# pass cuts from Eqns 3a,b,c
ctmp = np.where((ca > 0) & (cb > N1) & (cc > N2))
cindx = np.zeros_like(flux)
cindx[ctmp] = 1
# Need to find cumulative number of points that pass "ctmp"
# Count in reverse!
ConM = np.zeros_like(flux)
# this requires a full pass thru the data -> bottleneck
for k in range(2, len(flux)):
ConM[-k] = cindx[-k] * (ConM[-(k-1)] + cindx[-k])
# these only defined between dl[i] and dr[i]
# find flare start where values in ConM switch from 0 to >=N3
istart_i = np.where((ConM[1:] >= N3) &
(ConM[0:-1] - ConM[1:] < 0))[0] + 1
# use the value of ConM to determine how many points away stop is
istop_i = istart_i + (ConM[istart_i] - 1)
istart_i = np.array(istart_i, dtype='int')
istop_i = np.array(istop_i, dtype='int')
if returnbinary is False:
return istart_i, istop_i
else:
bin_out = np.zeros_like(flux, dtype='int')
for k in range(len(istart_i)):
bin_out[istart_i[k]:istop_i[k]+1] = 1
return bin_out
def ModelLC(time, flux, error, mode='davenport', **kwargs):
'''
Construct a model light curve.
Parameters:
------------
time : numpy array
flux : numpy array
errors : numpy array
mode : 'davenport' or str
Defines the method used to construct model light curve
'''
if (mode == 'median'):
# only for fully preprocessed LCs like K2SC
flux_model_i = np.nanmedian(flux) * np.ones_like(flux)
flux_diff = flux - flux_model_i
if (mode == 'boxcar'):
# just use the multi-pass boxcar and average. Simple. Too simple...
flux_model1 = detrend.MultiBoxcar(time, flux, error, kernel=0.1)
flux_model2 = detrend.MultiBoxcar(time, flux, error, kernel=1.0)
flux_model3 = detrend.MultiBoxcar(time, flux, error, kernel=10.0)
flux_model_i = (flux_model1 + flux_model2 + flux_model3) / 3.
flux_diff = flux - flux_model_i
if (mode == 'fitsin'):
# first do a pass thru w/ largebox to get obvious flares
box1 = detrend.MultiBoxcar(time, flux, error,
kernel=2.0, numpass=2)
sin1 = detrend.FitSin(time, box1, error, maxnum=2,
maxper=(max(time)-min(time)))
box2 = detrend.MultiBoxcar(time, flux - sin1, error, kernel=0.25)
flux_model_i = (box2 + sin1)
flux_diff = flux - flux_model_i
if (mode == 'davenport'):
# do iterative rejection and spline fit - like FBEYE did
# also like DFM & Hogg suggest w/ BART
box1 = detrend.MultiBoxcar(time, flux, error,
kernel=2.0, numpass=2)
sin1 = detrend.FitSin(time, box1, error, maxnum=5,
maxper=(max(time)-min(time)),
per2=False, debug=kwargs['debug'])
box3 = detrend.MultiBoxcar(time, flux - sin1, error, kernel=0.3)
t = np.array(time)
dt = np.nanmedian(t[1:] - t[0:-1])
exptime_m = (np.nanmax(time) - np.nanmin(time)) / len(time)
# ksep used to = 0.07...
flux_model_i = detrend.IRLSSpline(time, box3, error, numpass=20,
ksep=exptime_m*10.,
debug=kwargs['debug'])
flux_model_i += sin1
signalfwhm = dt * 2
ftime = np.arange(0, 2, dt)
modelfilter = aflare1(ftime, 1, signalfwhm, 1)
#Cross-correlate model filter to enhance flare signals
flux_diff = correlate(flux - flux_model_i,
modelfilter, mode='same')
if (mode == 'savgol'):
# fit data with a SAVGOL filter
dt = np.nanmedian(time[1:] - time[0:-1])
Nsmo = np.floor(0.2 / dt)
if Nsmo % 2 == 0:
Nsmo = Nsmo + 1
flux_model_i = savgol_filter(flux, Nsmo, 2, mode='nearest')
flux_diff = flux - flux_model_i
return flux_model_i, flux_diff
def MultiFind(lc, dlr,mode='davenport',
gapwindow=0.1, minsep=3, debug=False):
'''
NOTE:
This needs to be either
1. made in to simple multi-pass cleaner,
2. made in to "run till no signif change" cleaner, or
3. folded back in to main code
Search for flares in the continuous observation periods
of light curves.
Parameters:
------------
lc : pandas DataFrame
light curve
dlr : list of tuples
contains boundaries of continuous observation periods
mode : 'davenport' or str
method for model light curve construction
gapwindow : 0.1 or float
minsep : 3 or int
debug : False or bool
Return:
------------
istart : numpy array
start indices of flares
istop : numpy array
stop indices of flares
flux_model : numpy array
model light curve
'''
lc['flux_model'] = 0.
istart = np.array([], dtype='int')
istop = np.array([], dtype='int')
flux_model = lc.flux_model.copy().values
for (le,ri) in dlr:
lct = lc.iloc[le:ri].copy()
time, flux = lct.time.values, lct.flux.values,
error, flags = lct.error.values, lct.flags.values
# the bad data points (search where bad < 1)
bad = help.FlagCuts(flags, returngood=False)
flux_model_i, flux_diff = ModelLC(time, flux, error,
gapwindow=gapwindow, minsep=minsep,
mode=mode, debug=debug)
# run final flare-find on DATA - MODEL
isflare = FINDflare(flux_diff, error, N1=3, N2=4, N3=3,
returnbinary=True, avg_std=True)
# now pick out final flare candidate points from above
cand1 = np.where((bad < 1) & (isflare > 0))[0]
x1 = np.where((np.abs(time[cand1]-time[-1]) < gapwindow))
x2 = np.where((np.abs(time[cand1]-time[0]) < gapwindow))
cand1 = np.delete(cand1, x1)
cand1 = np.delete(cand1, x2)
if (len(cand1) < 1):
istart_i = np.array([])
istop_i = np.array([])
else:
# find start and stop index, combine neighboring candidates in to same events
istart_i = cand1[np.append([0], np.where((cand1[1:]-cand1[:-1] > minsep))[0]+1)]
istop_i = cand1[np.append(np.where((cand1[1:]-cand1[:-1] > minsep))[0], [len(cand1)-1])]
# if start & stop times are the same, add 1 more datum on the end
to1 = np.where((istart_i-istop_i == 0))
if len(to1[0])>0:
istop_i[to1] += 1
if debug is True:
plt.figure()
plt.title('debugging plot')
plt.scatter(time, flux_i, alpha=0.5,label='flux')
plt.plot(time,flux_model_i, c='black',label='flux model')
plt.scatter(time[cand1], flux_i[cand1], c='red',label='flare candidates')
plt.legend()
plt.show()
#chi2 = chisq(lc.flux.values[le:ri], flux_model_i, error[le:ri])
istart = np.array(np.append(istart, istart_i + le), dtype='int')
istop = np.array(np.append(istop, istop_i + le), dtype='int')
flux_model[le:ri] = flux_model_i
return istart, istop, flux_model
def FakeFlares(df1, lc, dlr, mode='davenport', gapwindow=0.1, fakefreq=.25, debug=False,
savefile=False, outfile='', display=False, verboseout = False):
'''
Create a number of events, inject them in to data
Use grid of amplitudes and durations, keep ampl in relative flux units
Keep track of energy in Equiv Dur.
Duration defined in minutes
Amplitude defined multiples of the median error
Parameters:
-------------
df1 - contains info about flare start and stop
lc - lightcurve
dlr - list of tuples with periods in light curve to analyse
mode - de-trending mode
gapwindow - =0.1
fakefreq - = .25, flares per day
debug - =False
savefile - =False
outfile - =''
display =False
verboseout =False
Returns:
------------
fakeres - DataFrame with ed_fake (injected EDs), rec_fake (bool, recovered
or not), ed_rec (recovered ED), ed_rec_err (uncertainty of recovered ED),
istart_rec, istop_rec (locations of recovered fake flares)
'''
def EquivDur(time, flux):
'''
Compute the Equivalent Duration of a fake flare.
This is the area under the flare, in relative flux units.
Parameters:
-------------
time : numpy array
units of DAYS
flux : numpy array
relative flux units
Return:
------------
p : float
equivalent duration of a single event in units of seconds
'''
p = np.trapz(flux, x=(time * 60.0 * 60.0 * 24.0))
return p
if debug is True:
print(str(datetime.datetime.now()) + ' FakeFlares started')
fakeres = pd.DataFrame()
new_flux = np.copy(lc.flux)/lc.flux_model.median()-1.
nfakesum = int(np.rint(fakefreq * (lc.time.max() - lc.time.min())))
t0_fake = np.zeros(nfakesum, dtype='float')
ed_fake = np.zeros(nfakesum, dtype='float')
dur_fake = np.zeros(nfakesum, dtype='float')
ampl_fake = np.zeros(nfakesum, dtype='float')
checksum = 0
for (le,ri) in dlr:
df2t= lc.iloc[le:ri]
nfake = int(np.rint(fakefreq * (df2t.time.max() - df2t.time.min())))
if debug == True:
print('Inject {} fake flares into a {} datapoint long array.'.format(nfake,ri-le))
df1t = df1[(df1.istart >= le) & (df1.istop <= ri)]
medflux = df2t.flux_model.median()# flux needs to be normalized
rft = pd.DataFrame({'tstart':df2t.time[df2t.index.isin(df1t.istart)],
'tstop':df2t.time[df2t.index.isin(df1t.istop)]})
flags = df2t.flags.values
error = df2t.error.values / medflux
flux = df2t.flux.values / medflux - 1.
time = df2t.time.values
std = np.nanmedian(error)
dur_fake[checksum:checksum+nfake], ampl_fake[checksum:checksum+nfake] = FakeFlaresDist(std, nfake, mode='hawley2014', debug=debug)
for k in range(checksum, checksum+nfake):
# generate random peak time, avoid known flares
isok = False
while isok is False:
# choose a random peak time
t0 = np.random.choice(time)
# Are there any real flares to deal with?
if rft.tstart.shape[0]>0:
# Are there any real flares happening at peak time?
# Fake flares should not overlap with real ones.
b = rft[(t0 >= rft.tstart) & (t0 <= rft.tstop)].shape[0]
if b == 0:
isok = True
else: isok = True
t0_fake[k] = t0
# generate the fake flare
fl_flux = aflare1(time, t0, dur_fake[k], ampl_fake[k])
ed_fake[k] = EquivDur(time, fl_flux)
# inject flare in to light curve
new_flux[le:ri] = new_flux[le:ri] + fl_flux
checksum +=nfake
'''
Re-run flare finding for data + fake flares
Figure out: which flares were recovered?
'''
# all the hard decision making should go herehere
#error minimum is a safety net for the spline function if mode=3
new_lc = pd.DataFrame({'flux':new_flux,'time':lc.time,
'error':max(1e-10,np.nanmedian(
|
pd.Series(new_flux)
|
pandas.Series
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
|
pd.Period("2011-01", freq="M")
|
pandas.Period
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P":
|
pd.Series([1], dtype=np.int64)
|
pandas.Series
|
from __future__ import division
from builtins import str
import numpy as np
import pandas as pd
from ..base.transform_primitive_base import (
TransformPrimitive,
make_trans_primitive
)
from featuretools.variable_types import (
Boolean,
Datetime,
DatetimeTimeIndex,
Discrete,
Id,
LatLong,
Numeric,
Ordinal,
Text,
Timedelta,
Variable
)
class IsNull(TransformPrimitive):
"""For each value of base feature, return 'True' if value is null."""
name = "is_null"
input_types = [Variable]
return_type = Boolean
def get_function(self):
return lambda array: pd.isnull(pd.Series(array))
class Absolute(TransformPrimitive):
"""Absolute value of base feature."""
name = "absolute"
input_types = [Numeric]
return_type = Numeric
def get_function(self):
return lambda array: np.absolute(array)
class TimeSincePrevious(TransformPrimitive):
"""Compute the time since the previous instance."""
name = "time_since_previous"
input_types = [DatetimeTimeIndex, Id]
return_type = Numeric
def __init__(self, time_index, group_feature):
"""Summary
Args:
base_feature (PrimitiveBase): Base feature.
group_feature (None, optional): Variable or feature to group
rows by before calculating diff.
"""
group_feature = self._check_feature(group_feature)
assert issubclass(group_feature.variable_type, Discrete), \
"group_feature must have a discrete variable_type"
self.group_feature = group_feature
super(TimeSincePrevious, self).__init__(time_index, group_feature)
def generate_name(self):
return u"time_since_previous_by_%s" % self.group_feature.get_name()
def get_function(self):
def pd_diff(base_array, group_array):
bf_name = 'base_feature'
groupby = 'groupby'
grouped_df = pd.DataFrame.from_dict({bf_name: base_array,
groupby: group_array})
grouped_df = grouped_df.groupby(groupby).diff()
return grouped_df[bf_name].apply(lambda x: x.total_seconds())
return pd_diff
class DatetimeUnitBasePrimitive(TransformPrimitive):
"""Transform Datetime feature into time or calendar units
(second/day/week/etc)"""
name = None
input_types = [Datetime]
return_type = Ordinal
def get_function(self):
return lambda array: pd_time_unit(self.name)(pd.DatetimeIndex(array))
class TimedeltaUnitBasePrimitive(TransformPrimitive):
"""Transform Timedelta features into number of time units
(seconds/days/etc) they encompass."""
name = None
input_types = [Timedelta]
return_type = Numeric
def get_function(self):
return lambda array: pd_time_unit(self.name)(pd.TimedeltaIndex(array))
class Day(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the day."""
name = "day"
class Days(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of days."""
name = "days"
class Hour(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the hour."""
name = "hour"
class Hours(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of hours."""
name = "hours"
def get_function(self):
def pd_hours(array):
return pd_time_unit("seconds")(pd.TimedeltaIndex(array)) / 3600.
return pd_hours
class Second(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the second."""
name = "second"
class Seconds(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of seconds."""
name = "seconds"
class Minute(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the minute."""
name = "minute"
class Minutes(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of minutes."""
name = "minutes"
def get_function(self):
def pd_minutes(array):
return pd_time_unit("seconds")(pd.TimedeltaIndex(array)) / 60
return pd_minutes
class Week(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the week."""
name = "week"
class Weeks(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of weeks."""
name = "weeks"
def get_function(self):
def pd_weeks(array):
return pd_time_unit("days")(pd.TimedeltaIndex(array)) / 7
return pd_weeks
class Month(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the month."""
name = "month"
class Months(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of months."""
name = "months"
def get_function(self):
def pd_months(array):
return pd_time_unit("days")(pd.TimedeltaIndex(array)) * (12 / 365)
return pd_months
class Year(DatetimeUnitBasePrimitive):
"""Transform a Datetime feature into the year."""
name = "year"
class Years(TimedeltaUnitBasePrimitive):
"""Transform a Timedelta feature into the number of years."""
name = "years"
def get_function(self):
def pd_years(array):
return pd_time_unit("days")(pd.TimedeltaIndex(array)) / 365
return pd_years
class Weekend(TransformPrimitive):
"""Transform Datetime feature into the boolean of Weekend."""
name = "weekend"
input_types = [Datetime]
return_type = Boolean
def get_function(self):
return lambda df: pd_time_unit("weekday")(pd.DatetimeIndex(df)) > 4
class Weekday(DatetimeUnitBasePrimitive):
"""Transform Datetime feature into the boolean of Weekday."""
name = "weekday"
class NumCharacters(TransformPrimitive):
"""Return the characters in a given string.
"""
name = 'characters'
input_types = [Text]
return_type = Numeric
def get_function(self):
return lambda array:
|
pd.Series(array)
|
pandas.Series
|
# Benchmark classifiers
import argparse
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import random
import tablib
from sklearn import metrics
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier, RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier, Perceptron, PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid
from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.extmath import density
from time import time
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format="[%(asctime)s] [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %I:%M:%S %p")
SEED = 0
MAX_ITER = 1000
TOL = 1e-3
random.seed(SEED)
def main(train_files, output_dir, print_report, select_chi2, print_cm, print_top10, all_categories, use_hashing,
n_features, filtered, **kwargs):
try:
os.mkdir(output_dir)
except FileExistsError:
pass
for train_file in train_files:
run_benchmark(print_report, select_chi2, print_cm, print_top10, all_categories, use_hashing, n_features,
filtered, train_file, output_dir)
def run_benchmark(print_report, select_chi2, print_cm, print_top10, all_categories, use_hashing,
n_features, filtered, train_file, output_dir):
print("Loading data set")
data = tablib.Dataset().load(open(train_file).read(), "csv")
random.shuffle(data)
texts, labels = zip(*data)
split_index = int(len(data) * 0.8)
train_data = texts[:split_index]
train_target = labels[:split_index]
test_data = texts[split_index:]
test_target = labels[split_index:]
print('data loaded')
# split a training set and a test set
y_train, y_test = train_target, test_target
print("Extracting features from the training data using a sparse vectorizer")
if use_hashing:
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False,
n_features=n_features)
x_train = vectorizer.transform(train_data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
x_train = vectorizer.fit_transform(train_data)
print("Extracting features from the test data using the same vectorizer")
x_test = vectorizer.transform(test_data)
# mapping from integer feature name to original token string
if use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if select_chi2:
print("Extracting %d best features by a chi-squared test" %
select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=select_chi2)
x_train = ch2.fit_transform(x_train, y_train)
x_test = ch2.transform(x_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
# #############################################################################
# Benchmark classifiers
def benchmark(clf, clf_descr=None):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(x_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(x_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
precision, recall, fscore, _ = metrics.precision_recall_fscore_support(y_test, pred, average="weighted")
print("f1 score: %0.3f" % fscore)
print("precision: %0.3f" % precision)
print("recall: %0.3f" % recall)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, label in enumerate(train_target):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s" % (label, " ".join(feature_names[top10]))))
print()
# if parser_result.print_report:
# print("classification report:")
# print(metrics.classification_report(y_test, pred,
# target_names=target_names))
if print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
if clf_descr is None:
clf_descr = str(clf).split('(')[0]
return clf_descr, fscore, precision, recall, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(random_state=SEED), "Ridge Classifier"),
(Perceptron(random_state=SEED, max_iter=MAX_ITER, tol=TOL), "Perceptron"),
(PassiveAggressiveClassifier(random_state=SEED, max_iter=MAX_ITER, tol=TOL), "Passive-Aggressive"),
(RandomForestClassifier(random_state=SEED), "Random forest"),
(AdaBoostClassifier(random_state=SEED), "Ada Boost"),
(BaggingClassifier(random_state=SEED), "Bagging"),
(GradientBoostingClassifier(random_state=SEED), "Gradient Boosting"),
(LinearSVC(random_state=SEED, max_iter=MAX_ITER, tol=TOL), "Linear SVC"),
(SGDClassifier(random_state=SEED, max_iter=MAX_ITER, tol=TOL), "SGD Classifier"),
(DecisionTreeClassifier(random_state=SEED), "Decision Tree"),
(MLPClassifier(random_state=SEED), "Neural Networks"),
(KNeighborsClassifier(), "kNN"),
(NearestCentroid(), "Nearest Centroid"),
(MultinomialNB(), "Multinomial NB"),
(BernoulliNB(), "Bernoulli NB")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
# print('=' * 80)
# print("LinearSVC with L1-based feature selection")
# # The smaller C, the stronger the regularization.
# # The more regularization, the more sparsity.
# results.append(benchmark(Pipeline([
# ('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
# tol=1e-3))),
# ('classification', LinearSVC(penalty="l2"))])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(len(results[0]))]
clf_names, f1_scores, precisions, recalls, training_times, test_times = results
clf_names = [x for _, x in sorted(zip(f1_scores, clf_names))]
precisions = [x for _, x in sorted(zip(f1_scores, precisions))]
recalls = [x for _, x in sorted(zip(f1_scores, recalls))]
training_times = [x for _, x in sorted(zip(f1_scores, training_times))]
test_times = [x for _, x in sorted(zip(f1_scores, test_times))]
f1_scores = sorted(f1_scores)
plt.figure(figsize=(12, 8))
plt.title("Score")
width = 0.15
plt.barh(indices + width * 2, f1_scores, width, label="f1 score")
plt.barh(indices + width * 1, recalls, width, label="recall")
plt.barh(indices + width * 0, precisions, width, label="precision")
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.savefig(output_dir + "/" + os.path.splitext(os.path.basename(train_file))[0] + "_clf_score.pdf", format="pdf",
dpi=600, bbox_inches='tight')
plt.close()
df =
|
pd.DataFrame({"training": training_times, "testing": test_times}, index=clf_names)
|
pandas.DataFrame
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
|
pd.testing.assert_index_equal(data, res)
|
pandas.testing.assert_index_equal
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
notna,
)
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(series):
A = series
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(frame):
result = frame.expanding().cov()
rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(frame):
result = frame.expanding().corr()
rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_func(func, static_comp, frame_or_series):
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = getattr(data.expanding(min_periods=1, axis=0), func)()
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[10], static_comp(data[:11]))
else:
tm.assert_series_equal(
result.iloc[10], static_comp(data[:11]), check_names=False
)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_min_periods(func, static_comp):
ser = Series(np.random.randn(50))
result = getattr(ser.expanding(min_periods=30, axis=0), func)()
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = getattr(ser.expanding(min_periods=15, axis=0), func)()
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result0, result1)
result = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_expanding_apply(engine_and_raw, frame_or_series):
engine, raw = engine_and_raw
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = data.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[9], np.mean(data[:11]))
else:
tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
def test_expanding_min_periods_apply(engine_and_raw):
engine, raw = engine_and_raw
ser = Series(np.random.randn(50))
result = ser.expanding(min_periods=30).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
# min_periods is working correctly
result = ser.expanding(min_periods=15).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = ser2.expanding(min_periods=5).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = ser.expanding(min_periods=0).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
result1 = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result0, result1)
result = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_std(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.expanding(min_periods=min_periods).corr(x)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
std_y = x.expanding(min_periods=min_periods).std(ddof=ddof)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if ddof == 0:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.expanding(min_periods=min_periods).mean()
mean_y = x.expanding(min_periods=min_periods).mean()
mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
result = x.expanding(min_periods=min_periods).mean()
expected = (
x.expanding(min_periods=min_periods).sum()
/ x.expanding(min_periods=min_periods).count()
)
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding().count()
mean_x = x.expanding(min_periods=min_periods).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.expanding(min_periods=min_periods).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
# check variance debiasing factors
var_unbiased_x = x.expanding(min_periods=min_periods).var()
var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0)
var_debiasing_factors_x = x.expanding().count() / (
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_apply_empty_series(engine_and_raw):
engine, raw = engine_and_raw
ser = Series([], dtype=np.float64)
tm.assert_series_equal(
ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
)
def test_expanding_apply_min_periods_0(engine_and_raw):
# GH 8080
engine, raw = engine_and_raw
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_pairwise_diff_length():
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))
df1a = DataFrame(
[[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")
)
df2 = DataFrame(
[[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")
)
df2a = DataFrame(
[[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")
)
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame(
[[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(["A", "B"], name="foo"),
index=Index(["X", "Y"], name="foo"),
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length():
# GH 7512
df1 = DataFrame(
[[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")
)
df1a = DataFrame(
[[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"]
)
df2 = DataFrame(
[[5, 6], [None, None], [2, 1]],
columns=["X", "Y"],
index=Index(range(3), name="bar"),
)
df2a = DataFrame(
[[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"]
)
result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
expected = DataFrame(
[[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"])
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_apply_args_kwargs(engine_and_raw):
def mean_w_arg(x, const):
return np.mean(x) + const
engine, raw = engine_and_raw
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0
result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
# generate test_data
# Last date : 13.04.2021
# By : <NAME> (<EMAIL>)
# This script is intended to generate sample data and save them into the
# test_data file. The saved objects will then be used to test the
# normalization module using unit testing.
import pickle
import pandas as pd
import pathlib
import BFAIR.normalization as normalization
current_dir = str(pathlib.Path(__file__).parent.absolute())
|
pd.set_option("mode.chained_assignment", None)
|
pandas.set_option
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import numpy as np
import orca
import pandas as pd
from activitysim.core import simulate as asim
from activitysim.core import tracing
from activitysim.core import pipeline
logger = logging.getLogger(__name__)
@orca.table()
def households(store, households_sample_size, trace_hh_id):
df_full = store["households"]
# if we are tracing hh exclusively
if trace_hh_id and households_sample_size == 1:
# df contains only trace_hh (or empty if not in full store)
df = tracing.slice_ids(df_full, trace_hh_id)
# if we need sample a subset of full store
elif households_sample_size > 0 and len(df_full.index) > households_sample_size:
# take the requested random sample
df = asim.random_rows(df_full, households_sample_size)
# if tracing and we missed trace_hh in sample, but it is in full store
if trace_hh_id and trace_hh_id not in df.index and trace_hh_id in df_full.index:
# replace first hh in sample with trace_hh
logger.debug("replacing household %s with %s in household sample" %
(df.index[0], trace_hh_id))
df_hh = tracing.slice_ids(df_full, trace_hh_id)
df =
|
pd.concat([df_hh, df[1:]])
|
pandas.concat
|
# -*- coding: utf-8 -*-
import sys
import requests
from bs4 import BeautifulSoup,BeautifulStoneSoup
from sqlalchemy import *
from flask import g
import pandas as pd
import numpy as np
from flask import current_app as app
from webapp.services import db,db_service as dbs
from webapp.models import Stock,Comment,FinanceBasic
import json,random,time
import http.cookiejar
from pandas.tseries.offsets import *
from datetime import datetime
import urllib2,re,html5lib
group_stockholder_rate = None
def getLatestStockHolder():
global group_stockholder_rate
if group_stockholder_rate is None:
hdf = pd.read_sql_query("select code,report_date,holder_type,holder_name,rate\
from stock_holder order by report_date desc", db.engine)
group_stockholder_rate = hdf.groupby(['code']).head(10)
#group_stockholder_rate = gdf[gdf['holder_type'] != '自然人股']
return group_stockholder_rate
def getRefreshStocks():
start_date = datetime.now().strftime('%Y-%m-%d')
#获得所有股票代码列表
stocks = db.session.query(Stock).\
filter(or_(Stock.holder_updated_time == None,Stock.holder_updated_time < start_date)).\
filter_by(flag=0).all()
return map(lambda x:x.code, stocks)
def refreshStockHolderSum(gdf,code):
#gdf = getLatestStockHolder()
agdf = gdf[gdf['holder_type'] != '自然人股']
a1gdf = agdf.groupby(['report_date'])
t1_gdf = a1gdf['rate'].agg({'size': np.size})
t1_gdf = t1_gdf.reset_index()
t2_gdf = a1gdf['rate'].agg({'sum': np.sum})
t2_gdf = t2_gdf.reset_index()
t3_gdf = pd.merge(t1_gdf, t2_gdf, on='report_date')
t3_gdf = t3_gdf.sort_values(by='report_date', ascending=False).head(1)
t3_gdf['code'] = code
bdf = pd.read_sql_query("select * from stock_basic where code =%(code)s and flag=0", db.engine, params={'code': code})
t3_df = pd.merge(t3_gdf, bdf, on='code')
m2_df = pd.DataFrame({
'code': t3_df.code,
'name': t3_df['name'],
'report_date': t3_df.report_date,
'count': t3_df['size'],
'sum': t3_df['sum']
})
if not m2_df.empty:
for row_index, row in m2_df.iterrows():
sql = text("delete from stock_holder_sum where code =:code")
result = db.session.execute(sql,{'code': row.code})
m2_df.to_sql('stock_holder_sum', db.engine, if_exists='append', index=False, chunksize=1000)
global group_stockholder_rate
group_stockholder_rate = None
#获得机构持股比例
def getGroupStockHolderRate():
df = pd.read_sql_query(
"select code,count,sum,report_date from stock_holder_sum",
db.engine)
return df
def queryHolderName(hcode):
sql = "select holder_name from stock_holder where holder_code=:code limit 1";
resultProxy = db.session.execute(text(sql), {'code': hcode})
return resultProxy.scalar()
#获得自然人持股排行榜
def getStockHolderRank():
gdf = getLatestStockHolder()
agdf = gdf[gdf['holder_type'] == '自然人股']
a1gdf = agdf.groupby(['code'])
t2_gdf = a1gdf['rate'].agg({'sum': np.sum}) #比例
t3_gdf = a1gdf['rate'].agg({'size': np.size}) #个数
t4_df = pd.concat([t2_gdf, t3_gdf], axis=1, join='inner')
t41_df = pd.DataFrame({
'sum': t4_df['sum'],
'count': t4_df['size'],
'avg': t4_df['sum'] / t4_df['size']
})
t5_df = t41_df.sort_index(by='avg', ascending=True).head(100)
t6_df = t5_df.reset_index()
bdf = dbs.get_global_basic_data().reset_index()
t7_df = pd.merge(t6_df, bdf, on='code')
return t7_df
def getStockHolderFromNet(code):
import re
url = "http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CirculateStockHolder/stockid/" + code + ".phtml"
headers = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
session = requests.session()
feeddata = session.get(url, headers=headers)
soup = BeautifulSoup(feeddata.content, "html5lib")
paper_name = soup.html.body.find(id="CirculateShareholderTable").tbody.find_all('tr')
report_date = []
holder_name = []
holder_code = []
amount = []
rate = []
holder_type = []
holder_parent = []
rdate = ''
i = 0
for e in paper_name:
t = e.find_all('td')
s = e.find_all('strong')
if len(s) > 0:
if s[0].string == '截止日期':
rdate = t[1].string
i += 1
if i ==1:
latest_val = rdate
if t[0].div:
if t[0].div.string:
if t[0].div.string.isdigit():
hname = t[1].div.text
report_date.append(rdate)
holder_name.append(hname)
amount.append(t[2].div.string)
rateStr = t[3].div.string
# 新浪特殊数据处理
if rateStr:
rateArray = re.findall("^[0-9]*\.?[0-9]{0,2}", rateStr)
rate.append(rateArray[0])
else:
rate.append('1')
holder_type.append(t[4].div.string)
hcode = re.sub(u"[\–\-\-\:\s+\.\!\/_,$%^*(+\"\')]+|[+——()?【】“”!,。?、~@#¥%……&*()]+", "", hname)
holder_code.append(hcode)
hname_array = re.compile(u'-|-').split(hname)
holder_parent.append(hname_array[0])
df1 = pd.DataFrame({
'code': code,
'report_date': report_date,
'holder_name': holder_name,
'holder_code': holder_code,
'amount': amount,
'rate': rate,
'holder_type': holder_type,
'holder_parent': holder_parent
})
return {'code': code, 'data': df1}
def updateStockHolder(data):
code = data['code']
df1 = data['data']
st = dbs.getStock(code)
sql = "select max(report_date) from stock_holder where code=:code";
resultProxy = db.session.execute(text(sql), {'code': code})
s_date = resultProxy.scalar()
if (s_date == None):
s_date = st.launch_date # 取上市日期
def convertDate(x):
return pd.to_datetime(x).date()
df2 = df1[df1['report_date'].apply(convertDate) > s_date]
if not df2.empty:
df2.to_sql('stock_holder', db.engine, if_exists='append', index=False, chunksize=1000)
#更新汇总信息
#refreshStockHolderSum(df2,code)
latest_report = df1['report_date'].max()
#更新stock情况
st.latest_report = latest_report
st.holder_updated_time = datetime.now()
db.session.flush()
#app.logger.info(code +' update done. the latest report date is:' + latest_report)
def getLatestStockHolder(code):
today = datetime.now()
dt_2 = QuarterEnd().rollback(today - DateOffset(years=3))
submit_date = dt_2.date()
hdf = pd.read_sql_query("select id, code,report_date,holder_type,holder_name,holder_code,rate,amount \
from stock_holder where code=%(name)s and report_date>=%(submit_date)s \
order by report_date asc,rate desc", db.engine, \
params={'name': code, 'submit_date': submit_date.strftime('%Y-%m-%d')})
grouped = hdf.groupby('report_date')
pre_group = pd.DataFrame()
def getValue(x, attri):
d1 = m1_df[m1_df['holder_code'] == x]
v1 = d1.get(attri + '_x')
v2 = d1.get(attri + '_y')
if v1.item() != v1.item(): # 空值判断
return v2.item()
else:
return v1.item()
def countVar(x):
d1 = m1_df[m1_df['holder_code'] == x]
v1 = d1.get('rate_x')
v2 = d1.get('rate_y')
# 补充精度不准问题
v3 = d1.get('amount_x')
v4 = d1.get('amount_y')
if v1.item() != v1.item(): # 空值判断
return '-'
elif v2.item() != v2.item():
return '+'
elif (v1.item() == v2.item()) or (v3.item() == v4.item()):
return '0'
else:
return format(v1.item() - v2.item())
def countAmtVar(x):
d1 = m1_df[m1_df['holder_code'] == x]
v1 = d1.get('amount_x')
v2 = d1.get('amount_y')
if v1.item() != v1.item(): # 空值判断
return '-'
elif v2.item() != v2.item():
return '+'
else:
return format(int(v1.item() - v2.item()), ',')
result = []
for name, group in grouped:
if not pre_group.empty:
m1_df = pd.merge(group, pre_group, on='holder_code', how='outer')
m2_df = pd.DataFrame({
'name': m1_df['holder_code'].apply(getValue, args=('holder_name',)),
'code': m1_df['holder_code'],
'report_date': name,
'amount': m1_df['holder_code'].apply(getValue, args=('amount',)),
'rate': m1_df['holder_code'].apply(getValue, args=('rate',)),
'var': m1_df['holder_code'].apply(countVar),
'var_amt': m1_df['holder_code'].apply(countAmtVar)
})
result.append({'report_date': name, 'data': m2_df})
pre_group = group # 重新设置比较列表
return result
#近期持股情况比较
def getStockHolder(code,report_date,direction):
sql = "select max(report_date) from stock_holder where code=:code";
resultProxy = db.session.execute(text(sql), {'code': code})
_max_date = resultProxy.scalar()
if report_date == '':
if (_max_date == None):
_max_date = dbs.getStock(code).launch_date # 取上市日期
_next_date =
|
pd.to_datetime(_max_date)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 97.38610996),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(
|
Timestamp('2014-11-01 00:00:00')
|
pandas.Timestamp
|
from typing import (
Any,
Callable,
cast,
Dict,
IO,
Iterable,
Iterator,
List,
Optional,
overload,
Set,
TextIO,
Tuple,
TYPE_CHECKING,
Union,
)
import io
import os
import pickle
import sys
import json
import time
import weakref
import inspect
import textwrap
import threading
import contextlib
from io import BytesIO, StringIO
from urllib.parse import urlparse, urlunparse
from pathlib import PosixPath, PurePath
from graphviz.backend import ExecutableNotFound
import pandas as pd
import requests
from requests import Response
from requests.exceptions import HTTPError, RequestException
from typing_extensions import Literal
import quick_server
from .util import (
async_compute,
ByteResponse,
content_to_csv_bytes,
df_to_csv_bytes,
get_age,
get_file_hash,
get_file_upload_chunk_size,
get_max_retry,
get_progress_bar,
get_retry_sleep,
has_graph_easy,
interpret_ctype,
is_jupyter,
maybe_json_loads,
merge_ctype,
safe_opt_num,
ServerSideError,
to_bool,
)
from .types import (
AllowedCustomImports,
BlobFilesResponse,
BlobInit,
BlobOwner,
BlobTypeResponse,
BlobURIResponse,
CacheStats,
CopyBlob,
DagCreate,
DagDef,
DagDupResponse,
DagInfo,
DagInit,
DagList,
DagPrettyNode,
DagReload,
DagStatus,
DeleteBlobResponse,
DynamicResults,
DynamicStatusResponse,
ESQueryResponse,
FlushAllQueuesResponse,
InCursors,
InstanceStatus,
JSONBlobAppendResponse,
KafkaGroup,
KafkaMessage,
KafkaOffsets,
KafkaThroughput,
KafkaTopicNames,
KafkaTopics,
KnownBlobs,
MinimalQueueStatsResponse,
ModelInfo,
ModelParamsResponse,
ModelReleaseResponse,
ModelVersionResponse,
NamespaceList,
NamespaceUpdateSettings,
NodeChunk,
NodeCustomCode,
NodeCustomImports,
NodeDef,
NodeDefInfo,
NodeInfo,
NodeState,
NodeStatus,
NodeTiming,
NodeTypeResponse,
NodeTypes,
NodeUserColumnsResponse,
PrettyResponse,
PutNodeBlob,
QueueMode,
QueueStatsResponse,
QueueStatus,
ReadNode,
S3Config,
SetNamedSecret,
SettingsObj,
TaskStatus,
Timing,
TimingResult,
Timings,
TritonModelsResponse,
UploadFilesResponse,
UUIDResponse,
VersionResponse,
WorkerScale,
)
if TYPE_CHECKING:
WVD = weakref.WeakValueDictionary[str, 'DagHandle']
else:
WVD = weakref.WeakValueDictionary
API_VERSION = 4
DEFAULT_URL = "http://localhost:8080"
DEFAULT_NAMESPACE = "default"
METHOD_DELETE = "DELETE"
METHOD_FILE = "FILE"
METHOD_GET = "GET"
METHOD_LONGPOST = "LONGPOST"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
PREFIX = "/xyme"
INPUT_CSV_EXT = ".csv"
INPUT_TSV_EXT = ".tsv"
INPUT_ZIP_EXT = ".zip"
INPUT_EXT = [INPUT_ZIP_EXT, INPUT_CSV_EXT, INPUT_TSV_EXT]
FUNC = Callable[..., Any]
CUSTOM_NODE_TYPES = {
"custom_data",
"custom_json",
"custom_json_to_data",
"custom_json_join_data",
}
NO_RETRY = [METHOD_POST, METHOD_FILE]
class AccessDenied(Exception):
pass
# *** AccessDenied ***
class LegacyVersion(Exception):
def __init__(self, api_version: int) -> None:
super().__init__(f"expected {API_VERSION} got {api_version}")
self._api_version = api_version
def get_api_version(self) -> int:
return self._api_version
# *** LegacyVersion ***
class XYMEClient:
def __init__(
self,
url: str,
token: Optional[str],
namespace: str) -> None:
self._url = url.rstrip("/")
if token is None:
token = os.environ.get("XYME_SERVER_TOKEN")
self._token = token
self._namespace = namespace
self._last_action = time.monotonic()
self._auto_refresh = True
self._dag_cache: WVD = weakref.WeakValueDictionary()
self._node_defs: Optional[Dict[str, NodeDefInfo]] = None
def get_version() -> int:
server_version = self.get_server_version()
try:
return int(server_version["api_version"])
except (ValueError, KeyError) as e:
raise LegacyVersion(1) from e
api_version = get_version()
if api_version < API_VERSION:
raise LegacyVersion(api_version)
self._api_version = api_version
def get_api_version(self) -> int:
return self._api_version
def set_auto_refresh(self, is_auto_refresh: bool) -> None:
self._auto_refresh = is_auto_refresh
def is_auto_refresh(self) -> bool:
return self._auto_refresh
def refresh(self) -> None:
self._node_defs = None
def _maybe_refresh(self) -> None:
if self.is_auto_refresh():
self.refresh()
# FIXME: Do we still need this?
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
old_refresh = self.is_auto_refresh()
try:
self.set_auto_refresh(False)
yield old_refresh
finally:
self.set_auto_refresh(old_refresh)
def _raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_bytes(
method,
path,
args,
files,
add_prefix,
add_namespace,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
if method in NO_RETRY:
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> TextIO:
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_str(
method,
path,
args,
add_prefix,
add_namespace,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if method in NO_RETRY:
raise
if retry >= max_retry:
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_json(
method,
path,
args,
add_prefix,
add_namespace,
files,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
if method in NO_RETRY:
raise
time.sleep(get_retry_sleep())
retry += 1
def _fallible_raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]],
add_prefix: bool,
add_namespace: bool,
api_version: Optional[int]) -> Tuple[BytesIO, str]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
# NOTE: no content type check -- will be handled by interpret_ctype
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
add_namespace: bool,
api_version: Optional[int]) -> TextIO:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return StringIO(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return StringIO(req.text)
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
add_namespace: bool,
files: Optional[Dict[str, IO[bytes]]],
api_version: Optional[int]) -> Dict[str, Any]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
if method != METHOD_FILE and files is not None:
raise ValueError(
f"files are only allow for post (got {method}): {files}")
req = None
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
try:
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_PUT:
req = requests.put(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_DELETE:
req = requests.delete(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_LONGPOST:
args["token"] = self._token
try:
res = quick_server.worker_request(url, args)
if "errMessage" in res:
raise ServerSideError(res["errMessage"])
return res
except quick_server.WorkerError as e:
if e.get_status_code() == 403:
raise AccessDenied(e.args) from e
raise e
raise ValueError(f"unknown method {method}")
except json.decoder.JSONDecodeError as e:
if req is None:
raise
raise ValueError(req.text) from e
def request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
return self._raw_request_bytes(
method, path, args, files, add_prefix, add_namespace, api_version)
def request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
return self._raw_request_json(
method, path, args, add_prefix, add_namespace, files, api_version)
def get_server_version(self) -> VersionResponse:
return cast(VersionResponse, self.request_json(
METHOD_GET,
f"{PREFIX}/v{API_VERSION}/version",
{},
add_prefix=False,
add_namespace=False))
def get_namespaces(self) -> List[str]:
return cast(NamespaceList, self.request_json(
METHOD_GET, "/namespaces", {}))["namespaces"]
def get_dags(self) -> List[str]:
return [
res["dag"]
for res in self.get_dag_times(retrieve_times=False)[1]
]
def get_dag_ages(self) -> List[Dict[str, Optional[str]]]:
cur_time, dags = self.get_dag_times(retrieve_times=True)
return [
{
"config_error": dag_status["config_error"],
"created": get_age(cur_time, dag_status["created"]),
"dag": dag_status["dag"],
"deleted": get_age(cur_time, dag_status["deleted"]),
"latest": get_age(cur_time, dag_status["latest"]),
"oldest": get_age(cur_time, dag_status["oldest"]),
}
for dag_status in sorted(dags, key=lambda el: (
el["config_error"] is None,
safe_opt_num(el["oldest"]),
safe_opt_num(el["latest"]),
el["dag"]))
]
def get_dag_times(self, retrieve_times: bool = True) -> Tuple[
float, List[DagStatus]]:
res = cast(DagList, self.request_json(
METHOD_GET, "/dags", {
"retrieve_times": int(retrieve_times),
}))
return res["cur_time"], res["dags"]
def get_dag(self, dag_uri: str) -> 'DagHandle':
res = self._dag_cache.get(dag_uri)
if res is not None:
return res
res = DagHandle(self, dag_uri)
self._dag_cache[dag_uri] = res
return res
def get_blob_handle(self, uri: str, is_full: bool = False) -> 'BlobHandle':
return BlobHandle(self, uri, is_full=is_full)
def get_node_defs(self) -> Dict[str, NodeDefInfo]:
self._maybe_refresh()
if self._node_defs is not None:
return self._node_defs
res = cast(NodeTypes, self.request_json(
METHOD_GET, "/node_types", {}, add_namespace=False))["info"]
self._node_defs = res
return res
def create_new_blob(self, blob_type: str) -> str:
return cast(BlobInit, self.request_json(
METHOD_POST, "/blob_init", {
"type": blob_type,
}, add_namespace=False))["blob"]
def get_blob_owner(self, blob_uri: str) -> BlobOwner:
return cast(BlobOwner, self.request_json(
METHOD_GET, "/blob_owner", {
"blob": blob_uri,
}))
def set_blob_owner(
self,
blob_uri: str,
dag_id: Optional[str] = None,
node_id: Optional[str] = None,
external_owner: bool = False) -> BlobOwner:
return cast(BlobOwner, self.request_json(
METHOD_PUT, "/blob_owner", {
"blob": blob_uri,
"owner_dag": dag_id,
"owner_node": node_id,
"external_owner": external_owner,
}))
def create_new_dag(
self,
username: Optional[str] = None,
dagname: Optional[str] = None,
index: Optional[int] = None) -> str:
return cast(DagInit, self.request_json(
METHOD_POST, "/dag_init", {
"user": username,
"name": dagname,
"index": index,
}))["dag"]
def get_blob_type(self, blob_uri: str) -> BlobTypeResponse:
return cast(BlobTypeResponse, self.request_json(
METHOD_GET, "/blob_type", {
"blob_uri": blob_uri,
},
))
def get_csv_blob(self, blob_uri: str) -> 'CSVBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_csv"]:
raise ValueError(f"blob: {blob_uri} is not csv type")
return CSVBlobHandle(self, blob_uri, is_full=False)
def get_custom_code_blob(self, blob_uri: str) -> 'CustomCodeBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_custom_code"]:
raise ValueError(f"blob: {blob_uri} is not custom code type")
return CustomCodeBlobHandle(self, blob_uri, is_full=False)
def get_json_blob(self, blob_uri: str) -> 'JSONBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_json"]:
raise ValueError(f"blob: {blob_uri} is not json type")
return JSONBlobHandle(self, blob_uri, is_full=False)
def duplicate_dag(
self,
dag_uri: str,
dest_uri: Optional[str] = None,
copy_nonowned_blobs: Optional[bool] = None,
retain_nonowned_blobs: bool = False,
warnings_io: Optional[IO[Any]] = sys.stderr) -> str:
if copy_nonowned_blobs is None:
copy_nonowned_blobs = not retain_nonowned_blobs
elif warnings_io is not None:
warnings_io.write(
"copy_nonowned_blobs is deprecated; "
"use retain_nonowned_blobs instead\n")
warnings_io.flush()
# FIXME: !!!xyme-backend bug!!!
copy_nonowned_blobs = not copy_nonowned_blobs
args = {
"dag": dag_uri,
"copy_nonowned_blobs": copy_nonowned_blobs,
}
if dest_uri is not None:
args["dest"] = dest_uri
return cast(DagDupResponse, self.request_json(
METHOD_POST, "/dag_dup", args))["dag"]
def set_dag(
self,
dag_uri: str,
defs: DagDef,
warnings_io: Optional[IO[Any]] = sys.stderr) -> 'DagHandle':
dag_create = cast(DagCreate, self.request_json(
METHOD_POST, "/dag_create", {
"dag": dag_uri,
"defs": defs,
}))
dag_uri = dag_create["dag"]
if warnings_io is not None:
warnings = dag_create["warnings"]
if len(warnings) > 1:
warnings_io.write(
f"{len(warnings)} warnings while "
f"setting dag {dag_uri}:\n")
elif len(warnings) == 1:
warnings_io.write(
f"Warning while setting dag {dag_uri}:\n")
for warn in warnings:
warnings_io.write(f"{warn}\n")
if warnings:
warnings_io.flush()
return self.get_dag(dag_uri)
def set_settings(
self, config_token: str, settings: SettingsObj) -> SettingsObj:
return cast(NamespaceUpdateSettings, self.request_json(
METHOD_POST, "/settings", {
"settings": settings,
"config_token": config_token,
}))["settings"]
def get_settings(self) -> SettingsObj:
return cast(NamespaceUpdateSettings, self.request_json(
METHOD_GET, "/settings", {}))["settings"]
def get_allowed_custom_imports(self) -> AllowedCustomImports:
return cast(AllowedCustomImports, self.request_json(
METHOD_GET, "/allowed_custom_imports", {}, add_namespace=False))
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(
self,
dag: Optional[str] = None,
minimal: bool = False) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
if minimal:
return cast(MinimalQueueStatsResponse, self.request_json(
METHOD_GET, "/queue_stats", {
"dag": dag,
"minimal": True,
}))
return cast(QueueStatsResponse, self.request_json(
METHOD_GET, "/queue_stats", {
"dag": dag,
"minimal": False,
}))
def get_instance_status(
self,
dag_uri: Optional[str] = None,
node_id: Optional[str] = None) -> Dict[InstanceStatus, int]:
return cast(Dict[InstanceStatus, int], self.request_json(
METHOD_GET, "/instance_status", {
"dag": dag_uri,
"node": node_id,
}))
def get_queue_mode(self) -> str:
return cast(QueueMode, self.request_json(
METHOD_GET, "/queue_mode", {}, add_namespace=False))["mode"]
def set_queue_mode(self, mode: str) -> str:
return cast(QueueMode, self.request_json(
METHOD_PUT, "/queue_mode", {
"mode": mode,
}, add_namespace=False))["mode"]
def flush_all_queue_data(self) -> None:
def do_flush() -> bool:
res = cast(FlushAllQueuesResponse, self.request_json(
METHOD_POST, "/flush_all_queues", {}, add_namespace=False))
return bool(res["success"])
while do_flush(): # we flush until there is nothing to flush anymore
time.sleep(1.0)
def get_cache_stats(self) -> CacheStats:
return cast(CacheStats, self.request_json(
METHOD_GET, "/cache_stats", {}, add_namespace=False))
def reset_cache(self) -> CacheStats:
return cast(CacheStats, self.request_json(
METHOD_POST, "/cache_reset", {}, add_namespace=False))
def create_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self.request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 1,
}))
def get_kafka_error_topic(self) -> str:
res = cast(KafkaTopicNames, self.request_json(
METHOD_GET, "/kafka_topic_names", {}))["error"]
assert res is not None
return res
def delete_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self.request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 0,
}))
def read_kafka_errors(self, offset: str = "current") -> List[str]:
return cast(List[str], self.request_json(
METHOD_GET, "/kafka_msg", {
"offset": offset,
}))
def get_named_secrets(
self,
config_token: Optional[str] = None,
show_values: bool = False) -> Dict[str, Optional[str]]:
if show_values and config_token is None:
raise ValueError("config_token must be set to show_values")
return cast(Dict[str, Optional[str]], self.request_json(
METHOD_GET, "/named_secrets", {
"show": int(bool(show_values)),
"config_token": config_token,
}))
def set_named_secret(
self, config_token: str, key: str, value: str) -> bool:
return cast(SetNamedSecret, self.request_json(
METHOD_PUT, "/named_secrets", {
"key": key,
"value": value,
"config_token": config_token,
}))["replaced"]
def get_error_logs(self) -> str:
with self._raw_request_str(METHOD_GET, "/error_logs", {}) as fin:
return fin.read()
def get_known_blobs(
self,
blob_type: Optional[str] = None,
connector: Optional[str] = None) -> List[str]:
return [
res[0]
for res in self.get_known_blob_times(
retrieve_times=False,
blob_type=blob_type,
connector=connector)[1]
]
def get_known_blob_ages(
self,
blob_type: Optional[str] = None,
connector: Optional[str] = None) -> List[Tuple[str, str]]:
cur_time, blobs = self.get_known_blob_times(
retrieve_times=True, blob_type=blob_type, connector=connector)
return [
(blob_id, get_age(cur_time, blob_time))
for (blob_id, blob_time) in sorted(blobs, key=lambda el: (
safe_opt_num(el[1]), el[0]))
]
def get_known_blob_times(
self,
retrieve_times: bool,
blob_type: Optional[str] = None,
connector: Optional[str] = None,
) -> Tuple[float, List[Tuple[str, Optional[float]]]]:
obj: Dict[str, Union[int, str]] = {
"retrieve_times": int(retrieve_times),
}
if blob_type is not None:
obj["blob_type"] = blob_type
if connector is not None:
obj["connector"] = connector
res = cast(KnownBlobs, self.request_json(
METHOD_GET, "/known_blobs", obj))
return res["cur_time"], res["blobs"]
def get_triton_models(self) -> List[str]:
return cast(TritonModelsResponse, self.request_json(
METHOD_GET, "/inference_models", {}))["models"]
@staticmethod
def read_dvc(
path: str,
repo: str,
rev: Optional[str] = "HEAD",
warnings_io: Optional[IO[Any]] = sys.stderr) -> Any:
"""Reading dvc file content from git tracked DVC project.
Args:
path (str):
File path to read, relative to the root of the repo.
repo (str):
specifies the location of the DVC project. It can be a
github URL or a file system path.
rev (str):
Git commit (any revision such as a branch or tag name, or a
commit hash). If repo is not a Git repo, this option is
ignored. Default: HEAD.
warnings_io (optional IO):
IO stream where the warning will be printed to
Returns:
the content of the file.
"""
from .util import has_dvc
if not has_dvc():
if warnings_io is not None:
warnings_io.write(
"Please install dvc https://dvc.org/doc/install")
return None
import dvc.api
res = dvc.api.read(path, repo=repo, rev=rev, mode="r")
maybe_parse = maybe_json_loads(res)
if maybe_parse is not None:
return maybe_parse
return res
@staticmethod
def get_env_str(key: str, default: Optional[str] = None) -> str:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return f"{res}"
@staticmethod
def get_env_int(key: str, default: Optional[int] = None) -> int:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return int(res)
@staticmethod
def get_env_bool(key: str, default: Optional[bool] = None) -> bool:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return to_bool(res)
@staticmethod
def load_json(json_path: str) -> Dict[str, Any]:
with open(json_path, "r") as fin:
return json.load(fin)
@classmethod
def load_s3_config(cls, config_path: str) -> S3Config:
return cast(S3Config, cls.load_json(config_path))
@classmethod
def download_s3_from_file(
cls, dest_path: List[str], config_path: str) -> None:
cls.download_s3(dest_path, cls.load_s3_config(config_path))
@staticmethod
def download_s3(dest_path: List[str], config: S3Config) -> None:
import boto3
s3 = boto3.client(
"s3",
aws_access_key_id=config["accern_aws_key"],
aws_secret_access_key=config["accern_aws_access_key"])
assert len(dest_path) == len(config["model_download_path"])
for (dest, path) in zip(dest_path, config["model_download_path"]):
s3.download_file(config["model_download_bucket"], path, dest)
def get_uuid(self) -> str:
return cast(UUIDResponse, self.request_json(
METHOD_GET, "/uuid", {}))["uuid"]
def delete_blobs(self, blob_uris: List[str]) -> DeleteBlobResponse:
return cast(DeleteBlobResponse, self.request_json(
METHOD_DELETE, "/blob", {
"blob_uris": blob_uris,
},
))
# *** XYMEClient ***
class DagHandle:
def __init__(
self,
client: XYMEClient,
dag_uri: str) -> None:
self._client = client
self._dag_uri = dag_uri
self._name: Optional[str] = None
self._company: Optional[str] = None
self._state: Optional[str] = None
self._is_high_priority: Optional[bool] = None
self._queue_mng: Optional[str] = None
self._nodes: Dict[str, NodeHandle] = {}
self._node_lookup: Dict[str, str] = {}
self._dynamic_error: Optional[str] = None
self._ins: Optional[List[str]] = None
self._outs: Optional[List[Tuple[str, str]]] = None
def refresh(self) -> None:
self._name = None
self._company = None
self._state = None
self._is_high_priority = None
self._queue_mng = None
self._ins = None
self._outs = None
# NOTE: we don't reset nodes
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _maybe_fetch(self) -> None:
if self._name is None:
self._fetch_info()
def get_info(self) -> DagInfo:
return cast(DagInfo, self._client.request_json(
METHOD_GET, "/dag_info", {
"dag": self.get_uri(),
}))
def _fetch_info(self) -> None:
info = self.get_info()
self._name = info["name"]
self._company = info["company"]
self._state = info["state"]
self._is_high_priority = info["high_priority"]
self._queue_mng = info["queue_mng"]
self._ins = info["ins"]
self._outs = [(el[0], el[1]) for el in info["outs"]]
old_nodes = {} if self._nodes is None else self._nodes
self._nodes = {
node["id"]: NodeHandle.from_node_info(
self._client, self, node, old_nodes.get(node["id"]))
for node in info["nodes"]
}
self._node_lookup = {
node["name"]: node["id"]
for node in info["nodes"]
if node["name"] is not None
}
def get_nodes(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
return list(self._nodes.keys())
def get_node(self, node_name: str) -> 'NodeHandle':
self._maybe_refresh()
self._maybe_fetch()
node_id = self._node_lookup.get(node_name, node_name)
return self._nodes[node_id]
def get_uri(self) -> str:
return self._dag_uri
def get_name(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._name is not None
return self._name
def get_company(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._company is not None
return self._company
def get_state_type(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._state is not None
return self._state
def get_timing(
self,
blacklist: Optional[List[str]] = None,
) -> TimingResult:
blist = [] if blacklist is None else blacklist
node_timing: Dict[str, NodeTiming] = {}
nodes = self.get_nodes()
def get_filterd_times(
node_time: List[Timing]) -> Tuple[float, float, List[Timing]]:
fns = []
node_total = 0.0
for value in node_time:
if value["name"] not in blist:
fns.append(value)
node_total += value["total"]
if not fns:
return (0, 0, fns)
return (node_total, node_total / len(fns), fns)
dag_total = 0.0
for node in nodes:
node_get = self.get_node(node)
node_time = node_get.get_timing()
node_name = node_get.get_node_def()["name"]
node_id = node_get.get_id()
node_total, avg_time, fns = get_filterd_times(node_time)
node_timing[node_id] = {
"node_name": node_name,
"node_total": node_total,
"node_avg": avg_time,
"fns": fns,
}
dag_total += node_total
node_timing_sorted = sorted(
node_timing.items(),
key=lambda x: x[1]["node_total"],
reverse=True)
return {
"dag_total": dag_total,
"nodes": node_timing_sorted,
}
def is_high_priority(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
assert self._is_high_priority is not None
return self._is_high_priority
def is_queue(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng is not None
def get_queue_mng(self) -> Optional[str]:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng
def get_ins(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
assert self._ins is not None
return self._ins
def get_outs(self) -> List[Tuple[str, str]]:
self._maybe_refresh()
self._maybe_fetch()
assert self._outs is not None
return self._outs
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
def set_dag(self, defs: DagDef) -> None:
self._client.set_dag(self.get_uri(), defs)
def dynamic_model(
self,
inputs: List[Any],
format_method: str = "simple",
no_cache: bool = False) -> List[Any]:
res = cast(DynamicResults, self._client.request_json(
METHOD_POST, "/dynamic_model", {
"format": format_method,
"inputs": inputs,
"no_cache": no_cache,
"dag": self.get_uri(),
}))
return res["results"]
def dynamic_list(
self,
inputs: List[Any],
input_key: Optional[str] = None,
output_key: Optional[str] = None,
split_th: Optional[int] = 1000,
max_threads: int = 50,
format_method: str = "simple",
force_keys: bool = False,
no_cache: bool = False) -> List[Any]:
if split_th is None or len(inputs) <= split_th:
res = cast(DynamicResults, self._client.request_json(
METHOD_POST, "/dynamic_list", {
"force_keys": force_keys,
"format": format_method,
"input_key": input_key,
"inputs": inputs,
"no_cache": no_cache,
"output_key": output_key,
"dag": self.get_uri(),
}))
return res["results"]
# FIXME: write generic spliterator implementation
split_num: int = split_th
assert split_num > 0
res_arr: List[Any] = [None] * len(inputs)
exc: List[Optional[BaseException]] = [None]
active_ths: Set[threading.Thread] = set()
def compute_half(cur: List[Any], offset: int) -> None:
if exc[0] is not None:
return
if len(cur) <= split_num:
try:
cur_res = self.dynamic_list(
cur,
input_key=input_key,
output_key=output_key,
split_th=None,
max_threads=max_threads,
format_method=format_method,
force_keys=force_keys,
no_cache=no_cache)
res_arr[offset:offset + len(cur_res)] = cur_res
except BaseException as e: # pylint: disable=broad-except
exc[0] = e
return
half_ix: int = len(cur) // 2
args_first = (cur[:half_ix], offset)
args_second = (cur[half_ix:], offset + half_ix)
if len(active_ths) < max_threads:
comp_th = threading.Thread(
target=compute_half, args=args_first)
active_ths.add(comp_th)
comp_th.start()
compute_half(*args_second)
comp_th.join()
active_ths.remove(comp_th)
else:
compute_half(*args_first)
compute_half(*args_second)
compute_half(inputs, 0)
for remain_th in active_ths:
remain_th.join()
raise_e = exc[0]
try:
if isinstance(raise_e, BaseException):
raise raise_e # pylint: disable=raising-bad-type
except RequestException as e:
raise ValueError(
"request error while processing. processing time per batch "
"might be too large. try reducing split_th") from e
return res_arr
def dynamic(self, input_data: BytesIO) -> ByteResponse:
cur_res, ctype = self._client.request_bytes(
METHOD_FILE, "/dynamic", {
"dag": self.get_uri(),
}, files={
"file": input_data,
})
return interpret_ctype(cur_res, ctype)
def dynamic_obj(self, input_obj: Any) -> ByteResponse:
bio = BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
return self.dynamic(bio)
def dynamic_async(
self, input_data: List[BytesIO]) -> List['ComputationHandle']:
names = [f"file{pos}" for pos in range(len(input_data))]
res: Dict[str, str] = self._client.request_json(
METHOD_FILE, "/dynamic_async", {
"dag": self.get_uri(),
}, files=dict(zip(names, input_data)))
return [
ComputationHandle(
self,
res[name],
self.get_dynamic_error_message,
self.set_dynamic_error_message)
for name in names]
def set_dynamic_error_message(self, msg: Optional[str]) -> None:
self._dynamic_error = msg
def get_dynamic_error_message(self) -> Optional[str]:
return self._dynamic_error
def dynamic_async_obj(
self, input_data: List[Any]) -> List['ComputationHandle']:
return self.dynamic_async([
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_data
])
def get_dynamic_result(self, value_id: str) -> ByteResponse:
try:
cur_res, ctype = self._client.request_bytes(
METHOD_GET, "/dynamic_result", {
"dag": self.get_uri(),
"id": value_id,
})
except HTTPError as e:
if e.response.status_code == 404:
raise KeyError(f"value_id {value_id} does not exist") from e
raise e
return interpret_ctype(cur_res, ctype)
def get_dynamic_status(
self,
value_ids: List['ComputationHandle']) -> Dict[
'ComputationHandle', QueueStatus]:
res = cast(DynamicStatusResponse, self._client.request_json(
METHOD_POST, "/dynamic_status", {
"value_ids": [value_id.get_id() for value_id in value_ids],
"dag": self.get_uri(),
}))
status = res["status"]
hnd_map = {value_id.get_id(): value_id for value_id in value_ids}
return {
hnd_map[key]: cast(QueueStatus, value)
for key, value in status.items()
}
def get_dynamic_bulk(
self,
input_data: List[BytesIO],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def get_dynamic_bulk_obj(
self,
input_data: List[Any],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async_obj,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def _pretty(
self,
nodes_only: bool,
allow_unicode: bool,
method: Optional[str] = "accern",
fields: Optional[List[str]] = None) -> PrettyResponse:
args = {
"dag": self.get_uri(),
"nodes_only": nodes_only,
"allow_unicode": allow_unicode,
"method": method,
}
if fields is not None:
args["fields"] = ",".join(fields)
return cast(PrettyResponse, self._client.request_json(
METHOD_GET, "/pretty", args))
def pretty(
self,
nodes_only: bool = False,
allow_unicode: bool = True,
method: Optional[str] = "dot",
fields: Optional[List[str]] = None,
output_format: Optional[str] = "png",
display: Optional[IO[Any]] = sys.stdout) -> Optional[str]:
def render(value: str) -> Optional[str]:
if display is not None:
display.write(value)
display.flush()
return None
return value
graph_str = self._pretty(
nodes_only=nodes_only,
allow_unicode=allow_unicode,
method=method,
fields=fields)["pretty"]
if method == "accern":
return render(graph_str)
if method == "dot":
try:
from graphviz import Source
graph = Source(graph_str)
if output_format == "dot":
return render(graph_str)
if output_format == "svg":
svg_str = graph.pipe(format="svg")
if display is not None:
if not is_jupyter():
display.write(
"Warning: Ipython instance not found.\n")
display.write(svg_str)
display.flush()
else:
from IPython.display import display as idisplay
from IPython.display import SVG
idisplay(SVG(svg_str))
return None
return svg_str
if output_format == "png":
graph = Source(graph_str)
png_str = graph.pipe(format="png")
if display is not None:
if not is_jupyter():
display.write(
"Warning: Ipython instance not found.\n")
display.write(png_str)
display.flush()
else:
from IPython.display import display as idisplay
from IPython.display import Image
idisplay(Image(png_str))
return None
return png_str
if output_format == "ascii":
if not has_graph_easy():
return render(graph_str)
import subprocess
cmd = ["echo", graph_str]
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p2 = subprocess.check_output(
["graph-easy"], stdin=p1.stdout)
res = p2.decode("utf-8")
return render(res)
raise ValueError(
f"invalid format {output_format}, "
"use svg, png, ascii, or dot")
except ExecutableNotFound as e:
raise RuntimeError(
"use 'brew install graphviz' or use 'method=accern'",
) from e
raise ValueError(
f"invalid method {method}, use accern or dot")
def pretty_obj(
self,
nodes_only: bool = False,
allow_unicode: bool = True,
fields: Optional[List[str]] = None) -> List[DagPrettyNode]:
return self._pretty(
nodes_only=nodes_only,
allow_unicode=allow_unicode,
fields=fields)["nodes"]
def get_def(self, full: bool = True) -> DagDef:
return cast(DagDef, self._client.request_json(
METHOD_GET, "/dag_def", {
"dag": self.get_uri(),
"full": full,
}))
def set_attr(self, attr: str, value: Any) -> None:
dag_def = self.get_def()
dag_def[attr] = value # type: ignore
self._client.set_dag(self.get_uri(), dag_def)
def set_name(self, value: str) -> None:
self.set_attr("name", value)
def set_company(self, value: str) -> None:
self.set_attr("company", value)
def set_state(self, value: str) -> None:
self.set_attr("state", value)
def set_high_priority(self, value: bool) -> None:
self.set_attr("high_priority", value)
def set_queue_mng(self, value: Optional[str]) -> None:
self.set_attr("queue_mng", value)
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(self, minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
return self._client.check_queue_stats(self.get_uri(), minimal=minimal)
def scale_worker(self, replicas: int) -> int:
return cast(WorkerScale, self._client.request_json(
METHOD_PUT, "/worker", {
"dag": self.get_uri(),
"replicas": replicas,
"task": None,
}))["num_replicas"]
def reload(self, timestamp: Optional[float] = None) -> float:
return cast(DagReload, self._client.request_json(
METHOD_PUT, "/dag_reload", {
"dag": self.get_uri(),
"when": timestamp,
}))["when"]
def get_kafka_input_topic(self, postfix: str = "") -> str:
res = cast(KafkaTopicNames, self._client.request_json(
METHOD_GET, "/kafka_topic_names", {
"dag": self.get_uri(),
"postfix": postfix,
"no_output": True,
}))["input"]
assert res is not None
return res
def get_kafka_output_topic(self) -> str:
res = cast(KafkaTopicNames, self._client.request_json(
METHOD_GET, "/kafka_topic_names", {
"dag": self.get_uri(),
}))["output"]
assert res is not None
return res
def set_kafka_topic_partitions(
self,
num_partitions: int,
postfix: str = "",
large_input_retention: bool = False,
no_output: bool = False) -> KafkaTopics:
return cast(KafkaTopics, self._client.request_json(
METHOD_POST, "/kafka_topics", {
"dag": self.get_uri(),
"num_partitions": num_partitions,
"postfix": postfix,
"large_input_retention": large_input_retention,
"no_output": no_output,
}))
def post_kafka_objs(self, input_objs: List[Any]) -> List[str]:
bios = [
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_objs
]
return self.post_kafka_msgs(bios)
def post_kafka_msgs(
self,
input_data: List[BytesIO],
postfix: str = "") -> List[str]:
names = [f"file{pos}" for pos in range(len(input_data))]
res = cast(KafkaMessage, self._client.request_json(
METHOD_FILE, "/kafka_msg", {
"dag": self.get_uri(),
"postfix": postfix,
}, files=dict(zip(names, input_data))))
msgs = res["messages"]
return [msgs[key] for key in names]
def read_kafka_output(
self,
offset: str = "current",
max_rows: int = 100) -> Optional[ByteResponse]:
offset_str = [offset]
def read_single() -> Tuple[ByteResponse, str]:
cur, read_ctype = self._client.request_bytes(
METHOD_GET, "/kafka_msg", {
"dag": self.get_uri(),
"offset": offset_str[0],
})
offset_str[0] = "current"
return interpret_ctype(cur, read_ctype), read_ctype
if max_rows <= 1:
return read_single()[0]
res: List[ByteResponse] = []
ctype: Optional[str] = None
while True:
val, cur_ctype = read_single()
if val is None:
break
if ctype is None:
ctype = cur_ctype
elif ctype != cur_ctype:
raise ValueError(
f"inconsistent return types {ctype} != {cur_ctype}")
res.append(val)
if len(res) >= max_rows:
break
if not res or ctype is None:
return None
return merge_ctype(res, ctype)
def get_kafka_offsets(
self, alive: bool, postfix: Optional[str] = None) -> KafkaOffsets:
args = {
"dag": self.get_uri(),
"alive": int(alive),
}
if postfix is not None:
args["postfix"] = postfix
return cast(KafkaOffsets, self._client.request_json(
METHOD_GET, "/kafka_offsets", args))
def get_kafka_throughput(
self,
postfix: Optional[str] = None,
segment_interval: float = 120.0,
segments: int = 5) -> KafkaThroughput:
assert segments > 0
assert segment_interval > 0.0
offsets = self.get_kafka_offsets(postfix=postfix, alive=False)
now = time.monotonic()
measurements: List[Tuple[int, int, int, float]] = [(
offsets["input"],
offsets["output"],
offsets["error"],
now,
)]
for _ in range(segments):
prev = now
while now - prev < segment_interval:
time.sleep(max(0.0, segment_interval - (now - prev)))
now = time.monotonic()
offsets = self.get_kafka_offsets(postfix=postfix, alive=False)
measurements.append((
offsets["input"],
offsets["output"],
offsets["error"],
now,
))
first = measurements[0]
last = measurements[-1]
total_input = last[0] - first[0]
total_output = last[1] - first[1]
errors = last[2] - first[2]
total = last[3] - first[3]
input_segments: List[float] = []
output_segments: List[float] = []
cur_input = first[0]
cur_output = first[1]
cur_time = first[3]
for (next_input, next_output, _, next_time) in measurements[1:]:
seg_time = next_time - cur_time
input_segments.append((next_input - cur_input) / seg_time)
output_segments.append((next_output - cur_output) / seg_time)
cur_input = next_input
cur_output = next_output
cur_time = next_time
inputs = pd.Series(input_segments)
outputs = pd.Series(output_segments)
return {
"dag": self.get_uri(),
"input": {
"throughput": total_input / total,
"max": inputs.max(),
"min": inputs.min(),
"stddev": inputs.std(),
"segments": segments,
"count": total_input,
"total": total,
},
"output": {
"throughput": total_output / total,
"max": outputs.max(),
"min": outputs.min(),
"stddev": outputs.std(),
"segments": segments,
"count": total_output,
"total": total,
},
"faster": "both" if total_input == total_output else (
"input" if total_input > total_output else "output"),
"errors": errors,
}
def get_kafka_group(self) -> KafkaGroup:
return cast(KafkaGroup, self._client.request_json(
METHOD_GET, "/kafka_group", {
"dag": self.get_uri(),
}))
def set_kafka_group(
self,
group_id: Optional[str] = None,
reset: Optional[str] = None,
**kwargs: Any) -> KafkaGroup:
return cast(KafkaGroup, self._client.request_json(
METHOD_PUT, "/kafka_group", {
"dag": self.get_uri(),
"group_id": group_id,
"reset": reset,
**kwargs,
}))
def delete(self) -> DeleteBlobResponse:
return cast(DeleteBlobResponse, self._client.request_json(
METHOD_DELETE, "/blob", {
"blob_uris": [self.get_uri()],
},
))
def __hash__(self) -> int:
return hash(self.get_uri())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_uri() == other.get_uri()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self.get_uri()
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.get_uri()}]"
# *** DagHandle ***
class NodeHandle:
def __init__(
self,
client: XYMEClient,
dag: DagHandle,
node_id: str,
node_name: str,
kind: str) -> None:
self._client = client
self._dag = dag
self._node_id = node_id
self._node_name = node_name
self._type = kind
self._blobs: Dict[str, BlobHandle] = {}
self._inputs: Dict[str, Tuple[str, str]] = {}
self._state: Optional[int] = None
self._config_error: Optional[str] = None
self._is_model: Optional[bool] = None
def as_owner(self) -> BlobOwner:
return {
"owner_dag": self.get_dag().get_uri(),
"owner_node": self.get_id(),
}
@staticmethod
def from_node_info(
client: XYMEClient,
dag: DagHandle,
node_info: NodeInfo,
prev: Optional['NodeHandle']) -> 'NodeHandle':
if prev is None:
res = NodeHandle(
client,
dag,
node_info["id"],
node_info["name"],
node_info["type"])
else:
if prev.get_dag() != dag:
raise ValueError(f"{prev.get_dag()} != {dag}")
res = prev
res.update_info(node_info)
return res
def update_info(self, node_info: NodeInfo) -> None:
if self.get_id() != node_info["id"]:
raise ValueError(f"{self._node_id} != {node_info['id']}")
self._node_name = node_info["name"]
self._type = node_info["type"]
self._blobs = {
key: BlobHandle(self._client, value, is_full=False)
for (key, value) in node_info["blobs"].items()
}
self._inputs = node_info["inputs"]
self._state = node_info["state"]
self._config_error = node_info["config_error"]
def get_dag(self) -> DagHandle:
return self._dag
def get_id(self) -> str:
return self._node_id
def get_name(self) -> str:
return self._node_name
def get_type(self) -> str:
return self._type
def get_node_def(self) -> NodeDefInfo:
return self._client.get_node_defs()[self.get_type()]
def get_inputs(self) -> Set[str]:
return set(self._inputs.keys())
def get_input(self, key: str) -> Tuple['NodeHandle', str]:
node_id, out_key = self._inputs[key]
return self.get_dag().get_node(node_id), out_key
def get_status(self) -> TaskStatus:
return cast(NodeStatus, self._client.request_json(
METHOD_GET, "/node_status", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["status"]
def has_config_error(self) -> bool:
return self._config_error is not None
def get_config_error(self) -> Optional[str]:
return self._config_error
def get_blobs(self) -> List[str]:
return sorted(self._blobs.keys())
def get_blob_handles(self) -> Dict[str, 'BlobHandle']:
return self._blobs
def get_blob_handle(self, key: str) -> 'BlobHandle':
return self._blobs[key]
def set_blob_uri(self, key: str, blob_uri: str) -> str:
return cast(PutNodeBlob, self._client.request_json(
METHOD_PUT, "/node_blob", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"blob_key": key,
"blob_uri": blob_uri,
}))["new_uri"]
def get_in_cursor_states(self) -> Dict[str, int]:
return cast(InCursors, self._client.request_json(
METHOD_GET, "/node_in_cursors", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["cursors"]
def get_highest_chunk(self) -> int:
return cast(NodeChunk, self._client.request_json(
METHOD_GET, "/node_chunk", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["chunk"]
def get_short_status(self, allow_unicode: bool = True) -> str:
status_map: Dict[TaskStatus, str] = {
"blocked": "B",
"waiting": "W",
"running": "→" if allow_unicode else "R",
"complete": "✓" if allow_unicode else "C",
"eos": "X",
"paused": "P",
"error": "!",
"unknown": "?",
"virtual": "∴" if allow_unicode else "V",
"queue": "=",
}
return status_map[self.get_status()]
def get_logs(self) -> str:
with self._client._raw_request_str(
METHOD_GET, "/node_logs", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}) as fin:
return fin.read()
def get_timing(self) -> List[Timing]:
return cast(Timings, self._client.request_json(
METHOD_GET, "/node_perf", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["times"]
def read_blob(
self,
key: str,
chunk: Optional[int],
force_refresh: bool) -> 'BlobHandle':
# FIXME: !!!!!! explicitly repeat on timeout
dag = self.get_dag()
res = cast(ReadNode, self._client.request_json(
METHOD_POST, "/read_node", {
"dag": dag.get_uri(),
"node": self.get_id(),
"key": key,
"chunk": chunk,
"is_blocking": True,
"force_refresh": force_refresh,
}))
uri = res["result_uri"]
if uri is None:
raise ValueError(f"uri is None: {res}")
return BlobHandle(self._client, uri, is_full=True)
def read(
self,
key: str,
chunk: Optional[int],
force_refresh: bool = False,
filter_id: bool = True) -> Optional[ByteResponse]:
content = self.read_blob(key, chunk, force_refresh).get_content()
if filter_id and isinstance(content, pd.DataFrame):
content =
|
pd.DataFrame(content[content["row_id"] >= 0])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import OPTICS
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from hurst import compute_Hc
from arch.unitroot import ADF
import itertools
import typing
class ClusteringPairSelection:
performance_features: pd.DataFrame = pd.DataFrame()
explained_variance: pd.Series = pd.Series()
_clusters: pd.Series = pd.Series()
pairs_list: typing.List[typing.Tuple]
cointegrated_pairs_list: typing.List[typing.List]
cointegration_result: pd.DataFrame = pd.DataFrame()
filtered_pairs: pd.DataFrame = pd.DataFrame()
spreads: pd.DataFrame = pd.DataFrame()
def __init__(self, price: pd.DataFrame):
price_no_na = price.dropna(axis=1)
n_dropped = price.shape[1] - price_no_na.shape[1]
print(f"Dropped {n_dropped} columns out of {price.shape[1]}")
self.price = price_no_na
self.log_price = np.log(price_no_na)
self.performance = self.log_price.diff().iloc[1:]
self.normal_performance = StandardScaler().fit_transform(self.performance)
def select_pairs(self):
print("Converting prices to features...")
self.returns_to_features(5)
pd.Series(self.explained_variance).plot(kind='bar', title="Cumulative explained variance")
plt.show()
print("Creating clusters....")
self.create_clusters(3)
self.clusters.plot(kind='bar', title='Clusters, % of Allocated samples')
plt.show()
self.plot_clusters()
print("Running cointegration check....")
self.check_cointegration()
print("Estimating selection criteria...")
self._calculate_hurst_exponent()
self._calculate_half_life()
print("Applying filters...")
self._apply_post_cointegration_filters()
def returns_to_features(self, n_components):
pca = PCA(n_components=n_components)
transposed_returns = self.normal_performance.T
pca.fit(transposed_returns)
reduced_returns = pd.DataFrame(transposed_returns.dot(pca.components_.T), index=self.performance.columns)
self.explained_variance = pca.explained_variance_ratio_.cumsum()
self.performance_features = reduced_returns
def create_clusters(self, min_samples):
optics = OPTICS(min_samples=min_samples)
clustering = optics.fit(self.performance_features)
len(clustering.labels_[clustering.labels_ == -1]) / len(clustering.labels_)
classified = pd.Series(clustering.labels_, index=self.performance.columns)
self._clusters = classified
self._create_cluster_based_pairs()
@property
def clusters(self):
clusters =
|
pd.Series(self._clusters.index.values, index=self._clusters)
|
pandas.Series
|
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import datetime
import multiprocessing
import os
import pandas as pd
import numpy as np
import gensim
from gensim.models.doc2vec import Doc2Vec
import config
def tokenizer(s):
return s.strip().split()
def Cosine(vec1, vec2):
vec1 = np.array(vec1, dtype=np.float)
vec2 = np.array(vec2, dtype=np.float)
res = np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2) + 0.000001)
return res
def Manhattan(vec1, vec2):
vec1 = np.array(vec1, dtype=np.float)
vec2 = np.array(vec2, dtype=np.float)
res = np.sum(np.abs(vec1 - vec2))
# res = np.linalg.norm(vec1 - vec2, ord=1)
return res
def Euclidean(vec1, vec2):
vec1 = np.array(vec1, dtype=np.float)
vec2 = np.array(vec2, dtype=np.float)
res = np.sqrt(np.sum(np.square(vec1 - vec2)))
# res = np.linalg.norm(vec1-vec2)
return res
def PearsonSimilar(vec1, vec2):
vec1 = np.array(vec1, dtype=np.float)
vec2 = np.array(vec2, dtype=np.float)
data = np.vstack((vec1, vec2))
return pd.DataFrame(data).T.corr('pearson')[0][1]
def SpearmanSimilar(vec1, vec2):
vec1 = np.array(vec1, dtype=np.float)
vec2 = np.array(vec2, dtype=np.float)
data = np.vstack((vec1, vec2))
return
|
pd.DataFrame(data)
|
pandas.DataFrame
|
# -*- coding: UTF-8 -*-
# ********************************************************
# * Author : <NAME>
# * Email : <EMAIL>
# * Create time : 2021-07-26 17:03
# * Last modified : 2021-07-27 13:17
# * Filename : quant.py
# * Description :
# *********************************************************
import pandas as pd
import os
import json
from datetime import timedelta, datetime
from dplearn.tools import tick_start, tick_end
# =============================================================================
# ##### K Line Wrapping #####
# =============================================================================
def wrapKLine(data, open_c, close_c, high_c, low_c, vol_c, ts_c, ts_format, wrap):
"""
This is a function of wrapping K-Line dataframe into longer-duration one.
Input:
data: [pandas dataframe] K-Line dataframe
open_c: [string] Column name of open price
close_c: [string] Column name of close price
high_c: [string] Column name of highest price
low_c: [string] Column name of lowest price
vol_c: [string] Column name of lowest price
ts_c: [string] Column name of timestamp
ts_format: [string] Format of timestamp in input data (eg: "%Y-%m-%d %H:%M:%S")
wrap: [string] Time range that you want to wrap
Output:
Pandas dataframe
"""
tick_start("Wraping K Line data")
col_list = [open_c, close_c, high_c, low_c, vol_c, ts_c]
df = data[col_list]
df[ts_c] =
|
pd.to_datetime(df[ts_c], format=ts_format)
|
pandas.to_datetime
|
""" test scalar indexing, including at and iat """
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.tests.indexing.common import Base
class TestScalar(Base):
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_get(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
result = getattr(f, func)[i]
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(result, expected)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
self.check_values(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
@pytest.mark.parametrize("kind", ["series", "frame"])
def test_at_and_iat_set(self, kind):
def _check(f, func, values=False):
if f is not None:
indices = self.generate_indices(f, values)
for i in indices:
getattr(f, func)[i] = 1
expected = self.get_value(func, f, i, values)
tm.assert_almost_equal(expected, 1)
d = getattr(self, kind)
# iat
for f in [d["ints"], d["uints"]]:
_check(f, "iat", values=True)
for f in [d["labels"], d["ts"], d["floats"]]:
if f is not None:
msg = "iAt based indexing can only have integer indexers"
with pytest.raises(ValueError, match=msg):
_check(f, "iat")
# at
for f in [d["ints"], d["uints"], d["labels"], d["ts"], d["floats"]]:
_check(f, "at")
class TestAtAndiAT:
# at and iat tests that don't need Base class
def test_float_index_at_iat(self):
ser = Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in ser.items():
assert ser.at[el] == item
for i in range(len(ser)):
assert ser.iat[i] == i + 1
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates =
|
date_range("1/1/2000", periods=8)
|
pandas.date_range
|
import pandas as pd
from argparse import ArgumentParser
def save_new_labels(df_labels: pd.DataFrame, filename="new_labels.csv"):
df_labels.to_csv(filename, index_label="img_name")
def move_labels(df_labels: pd.DataFrame):
df_moved_labels = pd.DataFrame(index=df_labels.index[:-1], columns=df_labels.columns)
df_moved_labels[["steer", "steer_angle", "velocity"]] = df_labels[["steer", "steer_angle", "velocity"]].values[1:]
save_new_labels(df_moved_labels, filename="first_dataset.csv")
print(df_moved_labels)
def main(args):
labels_path = args.labels_path
df_labels =
|
pd.read_csv(labels_path, index_col="img_name")
|
pandas.read_csv
|
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
|
tm.assert_series_equal(result, expected)
|
pandas._testing.assert_series_equal
|
from elasticsearch import Elasticsearch
import os
import pandas as pd
from typing import List, Dict, Callable, Any, Union, Tuple
from copy import deepcopy
import numpy as np
import re
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
|
pd.set_option('display.width', 1000)
|
pandas.set_option
|
import pandas as pd
import argparse
import os
class Node():
def __init__(self, id, parent="", seq="", phen=""):
self.id = id
self.parent = parent
self.children = []
self.seq = seq
self.phenotype = phen
def lookup_phenotype(row, genotype_bank):
if row["sequence"] in genotype_bank.index:
return genotype_bank.loc[row["sequence"], "task_profile"]
else:
return ""
def main():
parser = argparse.ArgumentParser(description="Standards phylogeny file to ggmuller input files converter.")
parser.add_argument("input", type=str, nargs='+', help="Input files")
parser.add_argument("-output_prefix", "-out", type=str, help="Prefix to add to output file names")
parser.add_argument("-treatment", "-treatment", type=str, required=True, help="What treatment is this run from?")
parser.add_argument("-run_id", "-run", type=str, required=True, help="What run is this run from?")
parser.add_argument("-genotype_bank", "-genotypes", type=str, required=True, help="File mapping genotypes to phenotypes")
# Parse command line arguments.
args = parser.parse_args()
# # Extract/validate arguments
# in_fp = args.input
# if (not os.path.isfile(in_fp)):
# exit("Failed to find provided input file ({})".format(in_fp))
if (args.output_prefix != None):
adj_file_name = args.output_prefix + "_adjacency.csv"
pop_file_name = args.output_prefix + "_pop_info.csv"
else:
adj_file_name = ".".join(args.input.split(".")[:-1]) + "_adjacency.csv"
pop_file_name = ".".join(args.input.split(".")[:-1]) + "_pop_info.csv"
# adj_file = adj_file.astype(dtype={"Identity":"object","Parent":"object"})
pop_file =
|
pd.DataFrame({"Identity":[], "Population":[], "Time":[]})
|
pandas.DataFrame
|
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
|
pd.Timestamp("2017-01-01", tz="US/Pacific")
|
pandas.Timestamp
|
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser =
|
pd.Series(idx)
|
pandas.Series
|
"""
The script generated required result for experiments
"""
import pandas as pd
import numpy as np
import scipy.stats
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import psycopg2 as ps
import squarify
import textblob as tb
import nltk
from collections import Counter
from joblib import dump
from sklearn import tree
from sklearn.metrics import *
from sklearn.model_selection import ShuffleSplit, GridSearchCV, train_test_split
from sklearn.tree import plot_tree
def compute_requirements_stats():
df = pd.read_excel(r'data/DataLast/dataset4.xls')
print(df.shape)
counter = Counter(df.iloc[:, 1])
print('Projects and requirements: ', counter)
print('# Projects: ', len(counter))
quit()
df2 = df.loc[(df['File'] == '0000 - cctns.xml')]['Requirement_text']
# print(df2)
word_count = 0
for req in df2.iteritems():
blob = tb.TextBlob(req[1])
# print(blob.words)
word_count += len(blob.words)
print(word_count)
def compute_smell_prevalence():
df = pd.read_excel(r'data/DataLast/dataset1kv1.xlsx')
countLine = 0
projects_name_list = list()
my_values = []
my_lables = []
numberOfTotalSmells = []
number_of_no_clean_word = []
numberOfSubjectiveSmell = []
numberOfAmbigAdjAdvSmell = []
numberOfLoopholeSmell = []
numberOfOpenendedSmell = []
numberOfSuperlativeSmell = []
numberOfComparativeSmell = []
numberOfNegativeSmell = []
numberOfPronounsSmell = []
numberOfNUncertainSmell = []
numberOfPolysemySmells = []
for index, row in df.iterrows():
smell_number = 0
SubjectiveNum = 0
AmbigAdjAdvNum = 0
LoopholeNum = 0
OpenendedNum = 0
SuperlativeNum = 0
ComparativeNum = 0
NegativeNum = 0
PronounsNum = 0
UncertainNum = 0
PolysemyNum = 0
# Modify project name:
if row['File'] == '2007-ertms.xml':
projects_name_list.append('ERTMS/ETCS')
elif row['File'] == '0000 - cctns.xml':
projects_name_list.append('CCTNS')
elif row['File'] == '2007-eirene_fun_7-2.xml':
projects_name_list.append('EIRENE')
elif row['File'] == '2008 - keepass.xml':
projects_name_list.append('KeePass')
elif row['File'] == '0000 - gamma j.xml':
projects_name_list.append('Gamma-J')
elif row['File'] == 'NEW - 2008 - peering.xml':
projects_name_list.append('Peering')
else:
projects_name_list.append('not_set')
countLine = countLine + 1
my_values.append(len(row['Requirement_text'].split(" ")))
my_lables.append('R' + str(countLine))
if row['Subjective_lang.'] != '-':
subjectiveNum = len(row['Subjective_lang.'].split("*"))
else:
subjectiveNum = 0
smell_number += subjectiveNum
numberOfSubjectiveSmell.append(subjectiveNum)
if row['Ambiguous_adv._adj.'] != '-':
AmbigAdjAdvNum = len(row['Ambiguous_adv._adj.'].split("*"))
else:
AmbigAdjAdvNum = 0
smell_number += AmbigAdjAdvNum
numberOfAmbigAdjAdvSmell.append(AmbigAdjAdvNum)
if row['Loophole'] != '-':
LoopholeNum = len(row['Loophole'].split("*"))
else:
LoopholeNum = 0
smell_number += LoopholeNum
numberOfLoopholeSmell.append(LoopholeNum)
if row['Nonverifiable_term'] != '-':
OpenendedNum = len(row['Nonverifiable_term'].split("*"))
else:
OpenendedNum = 0
smell_number += OpenendedNum
numberOfOpenendedSmell.append(OpenendedNum)
if row['Superlative'] != '-':
SuperlativeNum = len(row['Superlative'].split("*"))
else:
SuperlativeNum = 0
smell_number += SuperlativeNum
numberOfSuperlativeSmell.append(SuperlativeNum)
if row['Comparative'] != '-':
ComparativeNum = len(row['Comparative'].split("*"))
else:
ComparativeNum = 0
smell_number += ComparativeNum
numberOfComparativeSmell.append(ComparativeNum)
if row['Negative'] != '-':
NegativeNum = len(row['Negative'].split("*"))
else:
NegativeNum = 0
smell_number += NegativeNum
numberOfNegativeSmell.append(NegativeNum)
if row['Vague_pron.'] != '-':
PronounsNum = len(row['Vague_pron.'].split("*"))
else:
PronounsNum = 0
smell_number += PronounsNum
numberOfPronounsSmell.append(PronounsNum)
if row['Uncertain_verb'] != '-':
UncertainNum = len(row['Uncertain_verb'].split("*"))
else:
UncertainNum = 0
smell_number += UncertainNum
numberOfNUncertainSmell.append(UncertainNum)
if row['Polysemy'] != '-':
PolysemyNum = len(set(row['Polysemy'].split("*")))
else:
PolysemyNum = 0
smell_number += PolysemyNum
numberOfPolysemySmells.append(PolysemyNum)
blob = tb.TextBlob(row['Requirement_text'])
all_words = len(blob.words)
number_of_no_clean_word.append(all_words - smell_number)
numberOfTotalSmells.append(smell_number)
print('numberOfTotalSmells', numberOfTotalSmells)
print('numberOfSubjectiveSmell', numberOfSubjectiveSmell)
print('numberOfAmbigAdjAdvSmell', numberOfAmbigAdjAdvSmell)
print('numberOfLoopholeSmell', numberOfLoopholeSmell)
print('numberOfOpenendedSmell', numberOfOpenendedSmell)
print('numberOfSuperlativeSmell', numberOfSuperlativeSmell)
print('numberOfComparativeSmell', numberOfComparativeSmell)
print('numberOfNegativeSmell', numberOfNegativeSmell)
print('numberOfPronounsSmell', numberOfPronounsSmell)
print('numberOfNUncertainSmell', numberOfNUncertainSmell)
print('numberOfPolysemySmells', numberOfPolysemySmells)
df2 = pd.DataFrame()
df2['ReqId'] = my_lables
df2['ReqTxt'] = df['Requirement_text']
df2['Project'] = projects_name_list
df2['Words'] = my_values
df2['SmellyWords'] = numberOfTotalSmells
df2['CleanWords'] = number_of_no_clean_word
df2['Subjective'] = numberOfSubjectiveSmell
df2['Ambiguous'] = numberOfAmbigAdjAdvSmell
df2['NonVerifiable'] = numberOfOpenendedSmell
df2['Superlative'] = numberOfSuperlativeSmell
df2['Comparative'] = numberOfComparativeSmell
df2['Negative'] = numberOfNegativeSmell
df2['VaguePron.'] = numberOfPronounsSmell
df2['UncertainVerb'] = numberOfNUncertainSmell
df2['Polysemy'] = numberOfPolysemySmells
df2.to_excel(r'data/DataLast/dataset1kv1_smell_frequency.xlsx')
"""
data = [numberOfSubjectiveSmell, numberOfAmbigAdjAdvSmell,
numberOfOpenendedSmell, numberOfSuperlativeSmell, numberOfComparativeSmell, numberOfNegativeSmell,
numberOfPronounsSmell, numberOfNUncertainSmell, numberOfPolysemySmells]
# Create a figure instance
fig = plt.figure(1, figsize=(15, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data)
ax.set_xticklabels(['Subjective', 'Ambig Adj./Adv.',
'Non-verifiable', 'Superlative', 'Comparative',
'Negative ', 'Vague pronoun.', 'Uncertain verb', 'Polysemy'], fontsize=10)
plt.show()
"""
df2.drop(columns=['Words', 'SmellyWords', 'CleanWords'], inplace=True)
df3 = pd.melt(df2, id_vars=['ReqId', 'ReqTxt', 'Project', ], var_name='Type', value_name='Number')
# print(df3)
df3.to_excel(r'data/DataLast/dataset1kv1_smell_frequency_long_form.xlsx')
# quit()
return
def compute_smell_frequency_percentage():
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency_with_testability.xlsx')
# df = pd.read_excel(r'data/DataLast/dataset1kv1_ARTA_result_smell_frequency_with_testability.xlsx')
# df = pd.read_excel(r'data/DataLast/dataset1kv1_Smella_result_smell_frequency_with_testability.xlsx')
smells = df['SmellyWords'].sum()
all_words = df['Words'].sum()
print('smelly_words:', smells)
print('all_words:', all_words)
print('percentage:', round(smells / all_words, 4))
r, p = scipy.stats.pearsonr(df['Words'], df['SmellyWords'])
r2, p2 = scipy.stats.spearmanr(df['Words'], df['SmellyWords'])
print('pearsonr:', r, 'p-val:', p)
print('spearmanr:', r2, 'p-val', p2)
# Count the requirements without any smell
df2 = df.loc[(df['SmellyWords'] >= 1)]
# print(df2)
print('number_req_with_at_least_one_smell:', len(df2.index) / len(df.index))
print(715 / 985)
print('-'*75)
sentences_list = []
for index, row in df.iterrows():
blob = tb.TextBlob(row['ReqTxt'])
sentences_list.append(len(blob.sentences))
print('Sentence counter',Counter(sentences_list))
print('sum:', sum(sentences_list), 'avg:', sum(sentences_list)/len(sentences_list))
print('weighted avg:', (461*2+432*1+3*77+4*6+5*5+6*2+7*2)/len(sentences_list))
def draw_barplot_of_mean_and_sd_of_smell_frequency():
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency_long_form.xlsx')
# draw_project_smell_frequency:
kind = ['strip', 'swarm', 'box', 'violin', 'boxen', 'point', 'bar', 'count']
"""
g = sns.catplot(
# x='Type',
# y='Number',
# hue='Type',
dodge=True,
col='Project',
col_wrap=3,
# order=['Original', 'Refactored', ],
col_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
height=2.5,
aspect=1.5,
data=df,
kind=kind[6],
# s=3.50,
# color='0.1',
# marker='*',
palette=sns.color_palette('tab10'),
capsize=0.15,
ci=None,
# legend='',
# ax=ax,
# cut=0,
# bw=.2
)
"""
g = sns.catplot(
x='Type',
y='Number',
# hue='Type',
col='Project',
col_wrap=3,
# order=['Original', 'Refactored', ],
col_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
dodge=True,
capsize=0.25,
estimator=np.mean,
ci='sd',
n_boot=10000,
height=3,
aspect=1.5,
kind=kind[6],
data=df,
# s=3.50,
# color='0.1',
# marker='*',
palette=sns.color_palette('tab10'),
# legend='',
# ax=ax,
# cut=0,
# bw=.2
)
"""
for axes in g.axes.flat:
sns.stripplot(x='Type', y='Number',
col='Project',
col_wrap=3,
# order=['Original', 'Refactored', ],
col_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
# color='k',
size=1.5,
s=1.5,
# color='0.1',
# marker='*',
palette=sns.color_palette('tab10'),
linewidth=1.25,
edgecolor='gray',
data=df3,
ax=axes)
"""
# g.ax.set_xticklabels(g.ax.get_xticklabels(), rotation=45)
for axes in g.axes.flat:
axes.set_xticklabels(axes.get_xticklabels(), rotation=25, horizontalalignment='right', fontsize=9)
axes.set_xlabel('Smell type', fontsize=9)
axes.set_ylabel('Smell Frequency', fontsize=9)
axes.tick_params(labelsize=8)
# plt.xticks(rotation=45)
plt.subplots_adjust(hspace=0.1, wspace=0.1)
plt.tight_layout()
plt.savefig('charts/frequent_smells7.png')
plt.show()
def draw_smells_against_size():
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency_with_testability.xlsx')
g = sns.lmplot(
x='Words',
y='SmellyWords',
# hue='Project',
# hue_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
scatter=False,
fit_reg=True,
robust=True,
n_boot=10000,
x_jitter=0.05,
x_ci=95,
# ci=None,
# x_estimator=np.mean,
# order=3,
data=df,
# palette="Set1",
palette='plasma',
aspect=1.2,
# sharex=True,
# sharey=True,
legend=True,
# logistic=True
truncate=True,
# logx=True,
# scatter_kws={'s': 5, },
# line_kws={'lw': 1,
# 'color': 'm',
# 'color': '#4682b4',
# }
legend_out=False
)
g = sns.scatterplot(
x='Words',
y='SmellyWords',
size='Clarity', # The same cleanness
sizes=(0.1, 100.1),
hue='Project',
hue_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
style='Project',
style_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
estimator=np.mean,
data=df,
ax=g.ax
)
g.set(xlabel='Requirement length (word)', ylabel='Number of smells')
# check axes and find which is have legend
# leg = g.axes.flat[0].get_legend()
# new_title = 'My title'
# leg.set_title(new_title)
# new_labels = ['label 1', 'label 2']
# for t, l in zip(leg.texts, new_labels): t.set_text(l)
plt.tight_layout()
plt.savefig(r'charts/smells_against_size_v5.png')
plt.show()
def draw_project_smell_frequency():
pass
def draw_total_smell_frequency():
# https://nbviewer.jupyter.org/gist/fonnesbeck/5850463
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency.xlsx')
# df.boxplot(column=['SmellyWords', 'CleanWords'], vert=False, by='Project',)
axes = df.boxplot(column=['SmellyWords', ], vert=False, by='Project', grid=False)
# axes = df.boxplot(column=['SmellyWords', ], vert=False, grid=False)
for i, project in enumerate(set(df['Project'])):
y = df['SmellyWords'][df.Project == project].dropna()
# Add some random "jitter" to the x-axis
x = np.random.normal(0.05, i, size=len(y))
plt.plot(x, y, 'r.', alpha=0.05)
# axes.set_xticklabels(axes.get_xticklabels(), rotation=25, horizontalalignment='right', fontsize=9)
fig = axes.get_figure()
fig.suptitle('')
# axes.set_xlabel('Smell type', fontsize=9)
axes.set_ylabel('Project', fontsize=9)
# axes.tick_params(labelsize=9)
plt.tight_layout()
plt.savefig('charts/total_smell_frequency_boxplot_v4.png')
plt.show()
def draw_total_smell_frequency2():
# https://nbviewer.jupyter.org/gist/fonnesbeck/5850463
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency.xlsx')
kind = ['strip', 'swarm', 'box', 'violin', 'boxen', 'point', 'bar', 'count']
g = sns.catplot(
y='Project',
x='SmellyWords',
# hue='Type',
dodge=True,
# col='Project',
# col_wrap=3,
order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
# col_order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
height=4,
aspect=1.75,
data=df,
kind=kind[2],
# s=3.50,
# color='0.1',
# marker='*',
palette=sns.color_palette('tab10'),
# capsize=0.20,
# ci=None,
# n_boot=10000,
# legend='',
# ax=ax,
# cut=0,
# bw=.2
orient='h'
)
sns.stripplot(
y='Project',
x='SmellyWords',
# hue='Type',
order=['EIRENE', 'ERTMS/ETCS', 'CCTNS', 'Gamma-J', 'KeePass', 'Peering'],
size=2.5,
s=2.5,
# color='0.1',
# marker='*',
palette=sns.color_palette('tab10'),
linewidth=1.5,
edgecolor='gray',
data=df,
ax=g.ax)
plt.tight_layout()
plt.savefig('charts/total_smell_frequency_boxplot_v5.png')
plt.show()
def compute_requirement_testability():
df = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency.xlsx') # Ground-truth dataset
testability_a00 = list()
testability_a01 = list()
testability_a_05 = list()
testability_a_10 = list()
cleanness = list()
for index, row in df.iterrows():
print('')
smelly_words = row['SmellyWords']
all_words = row['Words']
blob = tb.TextBlob(row['ReqTxt'])
sentence = len(blob.sentences)
print('@', sentence)
t = 0
if row['Subjective'] != 0:
t += 1
if row['Ambiguous'] != 0:
t += 1
if row['NonVerifiable'] != 0:
t += 1
if row['Superlative'] != 0:
t += 1
if row['Comparative'] != 0:
t += 1
if row['Negative'] != 0:
t += 1
if row['VaguePron.'] != 0:
t += 1
if row['UncertainVerb'] != 0:
t += 1
if row['Polysemy'] != 0:
t += 1
if smelly_words == 0:
cleanness_i = 1
else:
cleanness_i = 1 - (smelly_words / all_words) ** (1 / t)
alpha = [0.00, 0.01, 0.50, 0.99, ] # For our paper we set alpha/epsilon cost to 0.01
# Reasoning about alpha? According to Reviewers' comments
#
# testability_i = cleanness_i / ((1 + alpha[0]) ** (sentence - 1))
# print(testability_i)
testability_a00.append(cleanness_i / ((1 + alpha[0]) ** (sentence - 1)))
testability_a01.append(cleanness_i / ((1 + alpha[1]) ** (sentence - 1)))
testability_a_05.append(cleanness_i / ((1 + alpha[2]) ** (sentence - 1)))
testability_a_10.append(cleanness_i / ((1 + alpha[3]) ** (sentence - 1)))
cleanness.append(cleanness_i)
# testability.append(testability_i)
df['Cleanness'] = cleanness
# df['Testability'] = testability
df['Testability_with_alpha_0.00'] = testability_a00
df['Testability_with_alpha_0.01'] = testability_a01
df['Testability_with_alpha_0.05'] = testability_a_05
df['Testability_with_alpha_0.99'] = testability_a_10
print(df)
df.to_excel(r'data/DataLast/dataset1kv1_smell_frequency_with_testability_with_alphas.xlsx') # Ground-truth dataset
# df.to_excel(r'data/DataLast/dataset1kv1_Smella_result_smell_frequency_with_testability.xlsx', index=False)
def evaluate_testability_measurement_method():
# Use scikit learn evaluation metrics for regression
df_gt = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency_with_testability.xlsx') # Ground-truth dataset
df_arta = pd.read_excel(r'data/DataLast/dataset1kv1_ARTA_result_smell_frequency_with_testability.xlsx')
df_smella = pd.read_excel(r'data/DataLast/dataset1kv1_Smella_result_smell_frequency_with_testability.xlsx')
y_true, y_pred = list(df_gt['Testability'].values), list(df_arta['Testability'].values)
y_true = y_true[961:985]
y_pred = y_pred[961:985]
# Print all classifier model metrics
print('Evaluating requirement testability ...')
print('Regressor minimum prediction', min(y_pred), 'Regressor maximum prediction', max(y_pred))
df = pd.DataFrame()
df['r2_score_uniform_average'] = [r2_score(y_true, y_pred, multioutput='uniform_average')]
df['r2_score_variance_weighted'] = [r2_score(y_true, y_pred, multioutput='variance_weighted')]
df['explained_variance_score_uniform_average'] = [
explained_variance_score(y_true, y_pred, multioutput='uniform_average')]
df['explained_variance_score_variance_weighted'] = [
explained_variance_score(y_true, y_pred, multioutput='variance_weighted')]
df['mean_absolute_error'] = [mean_absolute_error(y_true, y_pred)]
df['mean_squared_error_MSE'] = [mean_squared_error(y_true, y_pred)]
df['mean_squared_error_RMSE'] = [mean_squared_error(y_true, y_pred, squared=False)]
df['median_absolute_error'] = [median_absolute_error(y_true, y_pred)]
if min(y_pred) >= 0:
df['mean_squared_log_error'] = [mean_squared_log_error(y_true, y_pred)]
# ValueError: Mean Tweedie deviance error with power=2 can only be used on strictly positive y and y_pred.
df['mean_poisson_deviance'] = [mean_poisson_deviance(y_true, y_pred, )]
df['mean_gamma_deviance'] = [mean_gamma_deviance(y_true, y_pred, )]
df['max_error'] = [max_error(y_true, y_pred)]
df.to_excel(r'data/DataLast/dataset1kv1_ARTA_evaluation_metrics_6_Peering.xlsx', index=True, index_label='Row')
def draw_cleanness_and_testability():
df_gt = pd.read_excel(r'data/DataLast/dataset1kv1_smell_frequency_with_testability.xlsx') # Ground-truth dataset
df_arta = pd.read_excel(r'data/DataLast/dataset1kv1_ARTA_result_smell_frequency_with_testability.xlsx')
df_smella = pd.read_excel(r'data/DataLast/dataset1kv1_Smella_result_smell_frequency_with_testability.xlsx')
df = pd.DataFrame()
df['ReqTxt'] = df_gt['ReqTxt']
df['Project'] = df_gt['Project']
df['Clarity'] = df_gt['Clarity']
df['Ground-truth (Manual)'] = df_gt['Testability']
df['Clarity (ARTA)'] = df_arta['Clarity']
df['ARTA'] = df_arta['Testability']
df['Clarity (Smella)'] = df_smella['Clarity']
df['Smella'] = df_smella['Testability']
# print(df)
df.drop(columns=['Clarity', 'Clarity (ARTA)', 'Clarity (Smella)'], inplace=True)
df2 =
|
pd.melt(df, id_vars=['ReqTxt', 'Project'], value_name='Testability (α = 0.01)', var_name='Method/Tool')
|
pandas.melt
|
"""Rules for conversion between SQL, pandas, and odbc data types."""
import pandas as pd
import pyodbc
rules = pd.DataFrame.from_records(
[
{
"sql_type": "bit",
"sql_category": "boolean",
"min_value": False,
"max_value": True,
"pandas_type": "boolean",
"odbc_type": pyodbc.SQL_BIT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "tinyint",
"sql_category": "exact numeric",
"min_value": 0,
"max_value": 255,
"pandas_type": "UInt8",
"odbc_type": pyodbc.SQL_TINYINT,
"odbc_size": 1,
"odbc_precision": 0,
},
{
"sql_type": "smallint",
"sql_category": "exact numeric",
"min_value": -(2 ** 15),
"max_value": 2 ** 15 - 1,
"pandas_type": "Int16",
"odbc_type": pyodbc.SQL_SMALLINT,
"odbc_size": 2,
"odbc_precision": 0,
},
{
"sql_type": "int",
"sql_category": "exact numeric",
"min_value": -(2 ** 31),
"max_value": 2 ** 31 - 1,
"pandas_type": "Int32",
"odbc_type": pyodbc.SQL_INTEGER,
"odbc_size": 4,
"odbc_precision": 0,
},
{
"sql_type": "bigint",
"sql_category": "exact numeric",
"min_value": -(2 ** 63),
"max_value": 2 ** 63 - 1,
"pandas_type": "Int64",
"odbc_type": pyodbc.SQL_BIGINT,
"odbc_size": 8,
"odbc_precision": 0,
},
{
"sql_type": "float",
"sql_category": "approximate numeric",
"min_value": -(1.79 ** 308),
"max_value": 1.79 ** 308,
"pandas_type": "float64",
"odbc_type": pyodbc.SQL_FLOAT,
"odbc_size": 8,
"odbc_precision": 53,
},
{
"sql_type": "time",
"sql_category": "date time",
"min_value": pd.Timedelta("00:00:00.0000000"),
"max_value": pd.Timedelta("23:59:59.9999999"),
"pandas_type": "timedelta64[ns]",
"odbc_type": pyodbc.SQL_SS_TIME2,
"odbc_size": 16,
"odbc_precision": 7,
},
{
"sql_type": "date",
"sql_category": "date time",
"min_value": (pd.Timestamp.min +
|
pd.DateOffset(days=1)
|
pandas.DateOffset
|
from faultclass import Fault
from faultclass import python_worker
import pandas as pd
from calculate_trigger import calculate_trigger_addresses
from multiprocessing import Queue
import logging
logger = logging.getLogger(__name__)
def run_goldenrun(
config_qemu, qemu_output, data_queue, faultconfig, qemu_pre=None, qemu_post=None
):
dummyfaultlist = [Fault(0, 0, 0, 0, 0, 0, 100, 0)]
queue_output = Queue()
goldenrun_config = {}
goldenrun_config["qemu"] = config_qemu["qemu"]
goldenrun_config["kernel"] = config_qemu["kernel"]
goldenrun_config["plugin"] = config_qemu["plugin"]
goldenrun_config["machine"] = config_qemu["machine"]
if "max_instruction_count" in config_qemu:
goldenrun_config["max_instruction_count"] = config_qemu["max_instruction_count"]
if "memorydump" in config_qemu:
goldenrun_config["memorydump"] = config_qemu["memorydump"]
experiments = []
if "start" in config_qemu:
pre_goldenrun = {"type": "pre_goldenrun", "index": -2, "data": {}}
experiments.append(pre_goldenrun)
goldenrun = {"type": "goldenrun", "index": -1, "data": {}}
experiments.append(goldenrun)
for experiment in experiments:
if experiment["type"] == "pre_goldenrun":
goldenrun_config["end"] = config_qemu["start"]
# Set max_insn_count to ridiculous high number to never reach it
goldenrun_config["max_instruction_count"] = 10000000000000
elif experiment["type"] == "goldenrun":
if "start" in config_qemu:
goldenrun_config["start"] = config_qemu["start"]
if "end" in config_qemu:
goldenrun_config["end"] = config_qemu["end"]
if "start" in config_qemu and "end" in config_qemu:
# Set max_insn_count to ridiculous high number to never reach it
goldenrun_config["max_instruction_count"] = 10000000000000
logger.info(f"{experiment['type']} started...")
python_worker(
dummyfaultlist,
goldenrun_config,
experiment["index"],
queue_output,
qemu_output,
None,
False,
None,
qemu_pre,
qemu_post,
)
experiment["data"] = queue_output.get()
if experiment["data"]["endpoint"] == 1:
logger.info(f"{experiment['type']} successfully finished.")
else:
logger.critical(
f"{experiment['type']} not finished after "
f"{goldenrun_config['max_instruction_count']} tb counts."
)
raise ValueError(
f"{experiment['type']} not finished. Probably no valid instruction! "
f"If valid increase tb max for golden run"
)
data_queue.put(experiment["data"])
if experiment["type"] != "goldenrun":
continue
tbexec =
|
pd.DataFrame(experiment["data"]["tbexec"])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup
import requests
import json
import datetime
from datetime import date
from matplotlib.ticker import PercentFormatter
finalData = pd.DataFrame(pd.np.empty((0, 5)))
#############################################################################################
#Find function to be used later
#############################################################################################
def find(lst, key, value):
for i, dic in enumerate(lst):
if dic[key] == value:
return i
return -1
#############################################################################################
#Drawdown calculation
#############################################################################################
countryETFs = {
'US': 'SPY',
'China': 'MCHI',
'Japan': 'EWJ',
'Germany': 'EWG',
'India': 'INDA',
'United Kingdom': 'EWU',
'France': 'EWQ',
'Italy': 'EWI',
'Brazil': 'EWZ',
'Canada': 'EWC'
}
countryDrawdowns = {
'US': 0.00,
'China': 0.00,
'Japan': 0.00,
'Germany': 0.00,
'India': 0.00,
'United Kingdom': 0.00,
'France': 0.00,
'Italy': 0.00,
'Brazil': 0.00,
'Canada': 0.00,
}
for country in list(countryETFs.keys()):
b = countryETFs[country]
tgt_website = r'https://sg.finance.yahoo.com/quote/'+b+'/key-statistics?p='+b
stock_company = f"https://finance.yahoo.com/quote/{b}"
soup = BeautifulSoup(requests.get(stock_company).text, "html.parser")
ticker_data_url = f"https://query1.finance.yahoo.com/v8/finance/chart/{b}?symbol={b}&period1=1546300800&period2=9999999999&interval=1d"
ticker_data = json.loads(requests.get(ticker_data_url).text)
closingPrices =
|
pd.Series(ticker_data['chart']['result'][0]['indicators']['quote'][0]['close'])
|
pandas.Series
|
import functools
import logging
import operator
import os
import sys
import time
from multiprocessing import Pool, cpu_count
import itertools
import pandas as pd
from typing import Tuple, Dict, Set, List, Optional
from NamedAtomicLock import NamedAtomicLock
from friendly_data.converters import from_df
from nexinfosys.bin.cli_script import get_valid_name, get_file_url, prepare_base_state, print_issues
from nexinfosys.command_generators import IType, Issue
from nexinfosys.common.helper import PartialRetrievalDictionary, any_error_issue, create_dictionary
from nexinfosys.embedded_nis import NIS
from nexinfosys.model_services import State
from nexinfosys.models.musiasem_concepts import Processor, ProcessorsRelationPartOfObservation, Indicator
from nexinfosys.serialization import deserialize_state
from enbios.common.helper import generate_workbook, list_to_dataframe, get_scenario_name
from enbios.input import Simulation
from enbios.input.lci import LCIIndex
from enbios.input.simulators.sentinel import SentinelSimulation
from enbios.model import SimStructuralProcessorAttributes
from enbios.processing import read_parse_configuration, read_submit_solve_nis_file
#####################################################
# MAIN ENTRY POINT #################################
#####################################################
_idx_cols = ["region", "scenario", "technology", "model", "carrier",
"year", "time", "timestep", "unit", "variable", "description"]
def parallelizable_process_fragment(param: Tuple[str, # Fragment label
Dict[str, str], # Fragment dict
Dict[str, Set[str]], #
List[SimStructuralProcessorAttributes]],
s_state: bytes,
output_dir: str,
development_nis_file: str,
generate_nis_fragment_file: bool,
generate_interface_results: bool,
generate_indicators: bool,
max_lci_interfaces: int,
keep_fragment_file: bool
):
"""
Prepares a NIS file from inputs and submits it to NIS for accounting and calculation of indicators
:param param: A Tuple with the information to drive the process
:param s_state: A "bytes" with Serialized state (deserialized inside)
:param output_dir: Outputs directory
:param development_nis_file: URL of a NIS file that would go after state and before the fragment,
parsed everytime (for experiments)
:param generate_nis_fragment_file: True to generate an expanded NIS file for the fragment
:param generate_interface_results: True to generate a NIS file with the values of interfaces after solving
:param generate_indicators: True to generate a file with all indicators
:param max_lci_interfaces: If >0, cut the LCI interfaces used to that number
:param keep_fragment_file: If True, keep the minimal fragment NIS file
:return:
"""
def write_outputs(outputs):
"""
Write results of a submission to output directory
:param outputs: Dictionary with the possible outputs
:return:
"""
nis_idempotent_file = outputs.get("idempotent_nis", None)
df_indicators = outputs.get("indicators", pd.DataFrame())
df_interfaces = outputs.get("interfaces", pd.DataFrame())
df_iamc_indicators = outputs.get("iamc_indicators", pd.DataFrame())
print("Writing results ...")
def append_fragment_to_file(file_name, df):
lock = NamedAtomicLock("enbios-lock")
lock.acquire()
try:
full_file_name = os.path.join(output_dir, file_name)
if not os.path.isfile(full_file_name):
df.to_csv(full_file_name, index=False)
else:
df.to_csv(full_file_name, index=False, mode='a', header=False)
finally:
lock.release()
# Main result: "indicators.csv" file (aggregates all fragments)
if df_indicators is not None:
append_fragment_to_file("indicators.csv", df_indicators)
# Write a separate indicators.csv for the fragment
if generate_indicators:
csv_name = os.path.join(output_dir, f"fragment_indicators{get_valid_name(str(p_key))}.csv")
if not df_indicators.empty:
df_indicators.to_csv(csv_name, index=False)
else:
with open(csv_name, "wt") as f:
f.write("Could not obtain indicator values (??)")
if df_iamc_indicators is not None:
append_fragment_to_file("iamc_indicators.csv", df_iamc_indicators)
# Write NIS of the fragment just processed
if generate_nis_fragment_file and nis_idempotent_file is not None:
fragment_file_name = os.path.join(output_dir, f"full_fragment{get_valid_name(str(p_key))}.xlsx")
with open(fragment_file_name, "wb") as f:
f.write(nis_idempotent_file)
# Write Dataset with values for each interface as calculated by NIS solver, for the fragment
if generate_interface_results and df_interfaces is not None:
csv_name = os.path.join(output_dir, f"fragment_interfaces{get_valid_name(str(p_key))}.csv")
if not df_interfaces.empty:
df_interfaces.to_csv(csv_name, index=False)
else:
with open(csv_name, "wt") as f:
f.write("Could not obtain interface values (??)")
# Starts here
frag_label, p_key, f_metadata, f_processors = param # Unpack "param"
print(f"Fragment processing ...")
start = time.time() # Time execution
# Call main function
outputs = process_fragment(s_state, p_key,
f_metadata, f_processors,
output_dir,
development_nis_file,
max_lci_interfaces,
keep_fragment_file,
generate_nis_fragment_file,
generate_interface_results)
end = time.time() # Stop timing
print(f"Fragment processed in {end - start} seconds ---------------------------------------")
write_outputs(outputs)
start = time.time() # Time also output writing
print(f"Fragment outputs written in {start-end} seconds to output dir: {output_dir}")
class Enviro:
def __init__(self):
self._cfg_file_path = None
self._cfg = None
self._simulation_files_path = None
def set_cfg_file_path(self, cfg_file_path):
self._cfg = read_parse_configuration(cfg_file_path)
self._cfg_file_path = os.path.realpath(cfg_file_path) if isinstance(cfg_file_path, str) else None
if "simulation_files_path" in self._cfg:
self._simulation_files_path = self._cfg["simulation_files_path"]
def _get_simulation(self) -> Simulation:
# Simulation
simulation = None
if self._cfg["simulation_type"].lower() == "sentinel":
simulation = SentinelSimulation(self._simulation_files_path)
# elif self._cfg["simulation_type"].lower() == "calliope":
# simulation = CalliopeSimulation(self._simulation_files_path)
return simulation
def _prepare_process(self) -> Tuple[NIS, LCIIndex, Simulation]:
# Simulation
simulation = self._get_simulation()
# MuSIASEM (NIS)
nis, issues = read_submit_solve_nis_file(self._cfg["nis_file_location"], state=None, solve=False)
# LCI index
lci_data_index = LCIIndex(self._cfg["lci_data_locations"])
return nis, lci_data_index, simulation
def generate_matcher_templates(self, combine_countries=False) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Generate auxiliary DataFrames to help in the elaboration of:
- correspondence file
- block types file
:param combine_countries:
:return: A tuple with 4 pd.DataFrames
"""
# Construct auxiliary models
nis, lci_data_index, simulation = self._prepare_process()
# TODO Build a DataFrame with a list of LCI title, to file code
# name,title,file
lci_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import pytest
from scipy.special import boxcox1p
import ballet.eng.misc
from ballet.util.testing import assert_array_almost_equal, assert_array_equal
def test_value_replacer():
trans = ballet.eng.misc.ValueReplacer(0.0, -99)
data = pd.DataFrame([0, 0, 0, 0, 1, 3, 7, 11, -7])
expected_result = pd.DataFrame([-99, -99, -99, -99, 1, 3, 7, 11, -7])
result = trans.fit_transform(data)
pd.util.testing.assert_frame_equal(result, expected_result)
def test_box_cox_transformer():
threshold = 0.0
lmbda = 0.0
trans = ballet.eng.misc.BoxCoxTransformer(
threshold=threshold, lmbda=lmbda)
skewed = [0., 0., 0., 0., 1.]
unskewed = [0., 0., 0., 0., 0.]
exp_skew_res = boxcox1p(skewed, lmbda)
exp_unskew_res = unskewed
# test on DF, one skewed column
df = pd.DataFrame()
df['skewed'] = skewed
df['unskewed'] = unskewed
df_res = trans.fit_transform(df)
assert isinstance(df_res, pd.DataFrame)
assert 'skewed' in df_res.columns
assert 'unskewed' in df_res.columns
assert_array_almost_equal(df_res['skewed'], exp_skew_res)
# test on DF, no skewed columns
df_unskewed =
|
pd.DataFrame()
|
pandas.DataFrame
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected =
|
tm.box_expected(expected, box)
|
pandas.util.testing.box_expected
|
import numpy as np
import pandas.util.testing as tm
from pandas import (DataFrame, Series, DatetimeIndex, MultiIndex, Index,
date_range)
from .pandas_vb_common import lib
class Reindex(object):
goal_time = 0.2
def setup(self):
rng = DatetimeIndex(start='1/1/1970', periods=10000, freq='1min')
self.df = DataFrame(np.random.rand(10000, 10), index=rng,
columns=range(10))
self.df['foo'] = 'bar'
self.rng_subset = Index(rng[::2])
self.df2 = DataFrame(index=range(10000),
data=np.random.rand(10000, 30), columns=range(30))
N = 5000
K = 200
level1 = tm.makeStringIndex(N).values.repeat(K)
level2 = np.tile(tm.makeStringIndex(K).values, N)
index = MultiIndex.from_arrays([level1, level2])
self.s = Series(np.random.randn(N * K), index=index)
self.s_subset = self.s[::2]
def time_reindex_dates(self):
self.df.reindex(self.rng_subset)
def time_reindex_columns(self):
self.df2.reindex(columns=self.df.columns[1:5])
def time_reindex_multiindex(self):
self.s.reindex(self.s_subset.index)
class ReindexMethod(object):
goal_time = 0.2
params = ['pad', 'backfill']
param_names = ['method']
def setup(self, method):
N = 100000
self.idx =
|
date_range('1/1/2000', periods=N, freq='1min')
|
pandas.date_range
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from utils.utils import CONSTANTS
from utils.utils import publication_plot_pred_act, publication_plot_residuals
from use_crabnet import predict_crabnet
from use_densenet import predict_densenet
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
# %%
plt.rcParams.update({'font.size': 16})
cons = CONSTANTS()
mat_props_units = cons.mp_units_dict
mat_props = cons.mps
mat_props_names = cons.mp_names
pretty_mp_names = cons.mp_names_dict
# %%
def plot_compare_lcs(times,
maes,
mat_prop,
classic_results=None,
ax=None):
mp_sym_dict = cons.mp_sym_dict
mp_units_dict = cons.mp_units_dict
fig = None
if classic_results is not None:
classic_time = classic_results[0]
classic_mae = classic_results[1]
crab_time, dense_time = times
crab_mae, dense_mae = maes
x_crab = np.arange(len(crab_mae))
x_dense = np.arange(len(dense_mae))
x_crab = np.linspace(0, crab_time, len(crab_mae))
x_dense = np.linspace(0, dense_time, len(dense_mae))
# Plot training curve
if ax is None:
fig, ax = plt.subplots(figsize=(6, 6))
ax.plot(x_crab, crab_mae,
'-', color=cons.crab_red, marker='o', ms=0, alpha=1,
label='CrabNet')
ax.plot(x_dense, dense_mae,
'-', color=cons.dense_blue, marker='s', ms=0, alpha=1,
label='DenseNet')
ax.axhline(np.min(dense_mae), color=cons.dense_blue, linestyle='--',
alpha=1)
ax.set_xlabel('Training time [s]')
ax.plot([crab_time, dense_time], [crab_mae.iloc[-5:].mean(),
dense_mae.iloc[-5:].mean()],
'kX', ms=14, mfc='gold', label='1000 epochs')
ymax = 1.5*np.mean(dense_mae)
if classic_results is not None:
classic_x = classic_time
classic_y = 1.5*np.mean(dense_mae)
if classic_time > 1.2 * np.max(crab_time):
classic_x = np.max(crab_time)
ax.plot([classic_x*(14/20), classic_x], [classic_mae, classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '>', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
ax.text(classic_x, classic_mae, f'({classic_time:0.0f} s) \n',
horizontalalignment='right', verticalalignment='center')
elif classic_mae > ymax:
classic_mae = ymax * 0.97
ax.plot([classic_x, classic_x], [classic_mae*(16.5/20), classic_mae],
'g-', linewidth=5)
ax.plot(classic_x, classic_mae, '^', mec='green', ms=12,
mfc='white', mew=3, label='Best classic')
txt = f'\n\n({classic_mae:0.2f} {mp_units_dict[mat_prop]}) '
ax.text(classic_x, classic_mae*(16.5/20), txt,
horizontalalignment='center', verticalalignment='center')
else:
ax.plot(classic_x, classic_mae, 'o', mec='green', ms=12,
mfc='white', mew=4, label='Best classic')
ax.set_ylabel(f'MAE of {mp_sym_dict[mat_prop]} '
f'[{mp_units_dict[mat_prop]}]')
ax.set_ylim(np.min(crab_mae)/1.5, ymax)
ax.tick_params(left=True, top=True, right=True, direction='in', length=7)
ax.tick_params(which='minor', left=True, top=True, right=True,
direction='in', length=4)
minor_locator_x = AutoMinorLocator(2)
minor_locator_y = AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minor_locator_x)
ax.yaxis.set_minor_locator(minor_locator_y)
# Get all plot labels for legend and label legend
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines,
labels,
loc='best',
prop={'size': 12})
if fig is not None:
return fig
def multi_plots_lcs(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files
if 'test_scores.csv' in file][0]
df_classics = pd.read_csv(classics_results_csv)
files = os.listdir(nn_dir)
# print(files)
nn_results_csv = nn_dir + [file for file in files
if 'all_results' in file
if '.csv' in file][0]
df_nn = pd.read_csv(nn_results_csv)
mat_props = df_nn['mat_prop'].unique()
seeds = df_nn['rng_seed'].unique()
seed_values = {seed: 0 for seed in seeds}
df_crabnet = df_nn[df_nn['model_type'] == 'CrabNet']
for mp in mat_props:
df_mp = df_crabnet
mp_bools = df_mp['mat_prop'] == mp
best_mae = np.min(df_mp[mp_bools]['mae_val'])
pc_mae = (df_mp[mp_bools]['mae_val'] - best_mae) / best_mae
imp_col = pd.Series(pc_mae, name='improvement')
df_mp = pd.concat([df_mp, imp_col], axis=1)
df_mp = df_mp[df_mp['mat_prop'] == mp].sort_values(by='improvement')
df_mp_seeds = df_mp['rng_seed']
for i, seed in enumerate(df_mp_seeds):
seed_values[seed] += (df_mp.iloc[i]['improvement'])
ranked_seeds = pd.Series(seed_values).sort_values()
seed = ranked_seeds.index[0]
df_nn = df_nn[df_nn['rng_seed'] == seed]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
mats = ['energy_atom', 'Egap', 'agl_thermal_conductivity_300K',
'ael_debye_temperature']
for mp, ax in zip(mats, axes.ravel()):
run_ids = df_nn[df_nn['mat_prop'] == mp]
crab_id = run_ids[run_ids['model_type'] == 'CrabNet']['id'].values[0]
dense_id = run_ids[run_ids['model_type'] == 'DenseNet']['id'].values[0]
crab_df = pd.read_csv(f'{nn_dir}/{crab_id}/progress.csv')
dense_df = pd.read_csv(f'{nn_dir}/{dense_id}/progress.csv')
crab_maes = crab_df['mae_val']
dense_maes = dense_df['mae_val']
crab_bools = run_ids['model_type'] == 'CrabNet'
dense_bools = run_ids['model_type'] == 'DenseNet'
crab_time = run_ids[crab_bools]['fit_time'].values[0]
dense_time = run_ids[dense_bools]['fit_time'].values[0]
df_classic = df_classics[df_classics['mat_prop'] == mp]
classic_mae = df_classic['mae_test'].values[0]
classic_time = df_classic['fit_time'].values[0]
plot_compare_lcs((crab_time, dense_time),
(crab_maes, dense_maes),
mp,
(classic_time, classic_mae),
ax=ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/learning_curves/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_learning_curve.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_preds(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_pred_act(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_pred_act(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
if fig is not None:
return fig
def multi_plots_preds():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_preds(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/pred_vs_act/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_pred_vs_act.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def plot_dense_crab_residuals(mp, ax):
test_file = f'test_files/{mp}_test.csv'
fig = None
if ax is None:
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
y_act_dense, y_pred_dense = predict_densenet(mp, test_file)
fig_dense = publication_plot_residuals(y_act_dense,
y_pred_dense,
mat_prop=mp,
model='DenseNet',
ax=ax[0])
y_act_crab, y_pred_crab = predict_crabnet(mp, test_file)
fig_crab = publication_plot_residuals(y_act_crab,
y_pred_crab,
mat_prop=mp,
model='CrabNet',
ax=ax[1])
y0_min, y0_max = ax[0].get_ylim()
y1_min, y1_max = ax[1].get_ylim()
y_min_min = np.min([y0_min, y1_min])
y_max_max = np.max([y0_max, y1_max])
ax[0].set_ylim(y_min_min, y_max_max)
ax[1].set_ylim(y_min_min, y_max_max)
if fig is not None:
return fig
def multi_plots_residuals():
mat_props = ['energy_atom', 'Egap']
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, mp in enumerate(mat_props):
ax = axes[i, :]
ax = plot_dense_crab_residuals(mp, ax)
plt.subplots_adjust(wspace=0.22)
out_dir = r'figures/residuals/'
os.makedirs(out_dir, exist_ok=True)
fig_file = os.path.join(out_dir, f'four_panel_residuals.png')
if fig is not None:
fig.savefig(fig_file,
dpi=300,
bbox_inches='tight')
# %%
def get_figures(nn_dir, classics_dir):
files = os.listdir(classics_dir)
classics_results_csv = classics_dir + [file for file in files if
'test_scores.csv' in file][0]
df_classics = pd.read_csv(classics_results_csv)
files = os.listdir(nn_dir)
# print(files)
nn_results_csv = nn_dir + [file for file in files
if 'all_results' in file
if '.csv' in file][0]
df_nn = pd.read_csv(nn_results_csv)
mat_props = df_nn['mat_prop'].unique()
seeds = df_nn['rng_seed'].unique()
seed_values = {seed: 0 for seed in seeds}
df_crabnet = df_nn[df_nn['model_type'] == 'CrabNet']
for mp in mat_props:
df_mp = (df_crabnet[df_crabnet['mat_prop'] == mp]
.sort_values(by='mae_val'))
df_mp_seeds = df_mp['rng_seed']
for i, seed in enumerate(df_mp_seeds):
seed_values[seed] += i
ranked_seeds = pd.Series(seed_values).sort_values()
seed = ranked_seeds.index[0]
df_nn = df_nn[df_nn['rng_seed'] == seed]
for mp in mat_props:
run_ids = df_nn[df_nn['mat_prop'] == mp]
crab_id = run_ids[run_ids['model_type'] == 'CrabNet']['id'].values[0]
dense_id = run_ids[run_ids['model_type'] == 'DenseNet']['id'].values[0]
crab_df =
|
pd.read_csv(f'{nn_dir}/{crab_id}/progress.csv')
|
pandas.read_csv
|
import pandas as pd
from matplotlib import pyplot as plt
df = pd.read_csv('.csv')
delta_crix = list()
delta_crix.append(0)
for index, row in df.iterrows():
if index == 0:
print(0)
else:
new_delta = (df.at[index, 'CRIX'] - df.at[index - 1, 'CRIX']) / df.at[index - 1, 'CRIX']
delta_crix.append(new_delta)
plt.plot(delta_crix)
plt.xlabel('days')
plt.ylabel('evolution of CRIX indicator')
plt.title(' CRIX index')
plt.show()
df2 = pd.DataFrame({'deltasent': delta_crix})
df['delta_sent'] = df2
k = 0.05
df1 =
|
pd.read_csv('BTCUSDT-1d.csv')
|
pandas.read_csv
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 11:49:28 2020
@author: juanporras
"""
import pandas as pd
import numpy as np
import json
import urllib.request
from urllib.request import urlopen
config = {'displayModeBar': False}
from cleaning_datas import df
df_US=df[(df["Country_Region"]=="US")]
df_US = df_US.groupby(['Last_Update', 'Country_Region','Province_State']).sum().loc[:,['Confirmed','Recovered','Deaths']].reset_index()
df_US["Last_Update"] = pd.to_datetime(df_US["Last_Update"]).dt.strftime('%m/%d/%Y')
Raw_Capital_dict = {
'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona':'Phoenix',
'Arkansas':'Little Rock',
'California': 'Sacramento',
'Colorado':'Denver',
'Connecticut':'Hartford',
'Delaware':'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinios': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Monies',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'St. Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Neveda': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhoda Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakoda': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'
}
Capital_dict = dict(map(reversed, Raw_Capital_dict.items()))
df_US['State'] = df_US['Province_State'].replace(Capital_dict)
State_dict = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
State_inverse_dict = dict(map(reversed, State_dict.items()))
list_us=df_US.loc[df_US["Country_Region"]=="US","State"].reset_index(drop=True)
for elem in range(0,len(list_us)):
if len(list_us[elem].split(", ",1))==2:
list_us[elem]=list_us[elem].split(", ",1)[1].replace(".","")[0:2]
if list_us[elem]=="US":
list_us[elem]=float("NaN")
else:
if list_us[elem].split(", ",1)[0] in State_dict:
list_us[elem]=State_dict[list_us[elem].split(", ",1)[0]]
else:
if list_us[elem].split(", ",1)[0]=="Chicago":
list_us[elem]="IL"
else:
list_us[elem]=float("NaN")
df_US['State_Code'] = list_us
### Load Json File
url_us="https://raw.githubusercontent.com/jgoodall/us-maps/master/geojson/state.geo.json"
with urlopen(url_us) as response_us:
states_us = json.load(response_us)
for i in range(0,len(states_us["features"])):
states_us["features"][i]["id"] = states_us["features"][i]["properties"]["STUSPS10"]
States = []
for i in range(0,len(states_us["features"])):
state = states_us["features"][i]["id"]
States.append(state)
S1 = set(States)
S2 = set(df_US['State_Code'].unique())
S2-S1
# Center for Disease Control and Prevention
# Provisional Covid19 Death Count by week, ending date and state (Deaths)
deaths = pd.read_csv("https://data.cdc.gov/api/views/r8kw-7aab/rows.csv?accessType=DOWNLOAD")
# Race and Hispanic Origin (Deaths)
Demographic = pd.read_csv("https://data.cdc.gov/api/views/pj7m-y5uh/rows.csv?accessType=DOWNLOAD")
Demographic['State Name'] = Demographic['State'].replace(State_inverse_dict)
# Three Different Datasets
cond1 = (Demographic['Indicator']=='Distribution of COVID-19 deaths (%)')
cond2 = (Demographic['Indicator']=='Weighted distribution of population (%)')
cond3 = (Demographic['Indicator']=='Unweighted distribution of population (%)')
Deaths_Covid = Demographic[cond1].drop(columns=['Indicator','Footnote'])
Weighted_pop = Demographic[cond2]
Unweighted_pop = Demographic[cond3]
# Tests DATASET
Current_State =
|
pd.read_csv('https://covidtracking.com/api/v1/states/current.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
from scipy import signal
import os
def get_timedeltas(login_timestamps, return_floats=True):
"""
Helper function that returns the time differences (delta t's) between consecutive logins for a user.
We just input the datetime stamps as an index, hence this method will also work when called on a DataFrame of
customer logins.
Parameters:
login_timestamps (pd.Series): DatetimeIndex from a series or dataframe with user logins. Can be used on both binary
timeseries as returned by the method construct_binary_visit_series (see above) or from the DataFrame holding the
logins directly.
return_floats (bool): Whether or not to return the times as timedifferences (pd.Timedelta objects) or floats.
Returns:
timedeltas (list of objects): List of time differences, either in pd.Timedelta format or as floats.
"""
if len(login_timestamps.index) <= 1:
raise ValueError("Error: For computing time differences, the user must have more than one registered login")
#get the dates on which the customer visited the gym
timedeltas = pd.Series(login_timestamps.diff().values, index=login_timestamps.values)
#realign the series so that a value on a given date represents the time in days until the next visit
timedeltas.shift(-1)
timedeltas.dropna(inplace=True)
if return_floats:
timedeltas = timedeltas / pd.Timedelta(days=1)
return timedeltas
def write_timedeltas_to_file(login_data, filename, is_sorted=False, num_users=None, minimum_deltas=2, verbose=False, compression="infer"):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: pd.DataFrame, login_data for analysis
filename: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
compression: pandas compression type
"""
if os.path.exists(os.getcwd() + "/" + filename):
print("The file specified already exists. It will be overwritten in the process.")
os.remove(filename)
#get all visits from
visit_numbers = login_data["CUST_CODE"].value_counts().astype(int)
#visit number must be larger than minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.isin(eligibles.index)]
login_data_cleaned = login_data.drop(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = len(login_data_cleaned.index)
if num_users is None:
num_users = len(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
nonsense_counts = 0
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
count += 1
if verbose and (count % 100 == 0 or count == num_users):
print("Processed {} customers out of {}".format(count, num_users))
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED #pd.DatetimeIndex([visit_date for visit_date in customer_logins.DATE_SAVED])
#extract the timedeltas
timedeltas = get_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we drop it
timedeltas.dropna(inplace=True)
#logins with timedelta under 5 minutes are dropped
thresh = 5 * (1 / (24 * 60))
#drop all timedeltas under the threshold
eligible_tds = timedeltas[timedeltas > thresh]
if len(eligible_tds.index) < minimum_deltas:
nonsense_counts += 1
index += customer_visits
continue
timedeltas_df = eligible_tds.to_frame().T
#mode='a' ensures that the data are appended instead of overwritten
timedeltas_df.to_csv(filename, mode='a', header=False, compression=compression, index=False, sep=";")
if count >= num_users:
break
index += customer_visits
print("Found {} users with too many artefact logins".format(nonsense_counts))
def get_timedelta_sample(login_data, is_sorted=False, num_users=None, minimum_deltas=2, verbose=False):
"""
Function to write timedelta data to a file for HMM analysis.
login_data: pd.DataFrame, login_data for analysis
filename: Output write
num_users: Number of sequences to write, default None (= write whole dataset)
"""
#get all visits from
visit_numbers = login_data["CUST_CODE"].value_counts().astype(int)
#visit number must be larger than minimum_deltas, since we need two timedeltas for HMM estimation
eligibles = visit_numbers[visit_numbers > minimum_deltas]
ineligibles_data = login_data[~login_data.CUST_CODE.isin(eligibles.index)]
login_data_cleaned = login_data.drop(ineligibles_data.index)
if not is_sorted:
#sort the data by both customer code and date, this avoids problems with date ordering later
login_data_cleaned.sort_values(by=["CUST_CODE", "DATE_SAVED"], inplace=True)
num_logins = len(login_data_cleaned.index)
if num_users is None:
num_users = len(eligibles.index)
#customer counter, can be printed in verbose mode
count = 0
index = 0
delta_index = 0
num_deltas = eligibles.sum() - len(eligibles.index)
timedelta_sample = np.zeros(num_deltas)
while index < num_logins:
cust_code = login_data_cleaned.iloc[index].CUST_CODE
customer_visits = eligibles[cust_code]
#select logins with the specified customer code
customer_logins = login_data_cleaned.iloc[index:index+customer_visits]
visiting_dates = customer_logins.DATE_SAVED
#extract the timedeltas
timedeltas = get_timedeltas(visiting_dates, return_floats=True)
#since timedeltas involve differencing, the first value will be NaN - we drop it
timedeltas.dropna(inplace=True)
#add list
try:
timedelta_sample[delta_index:delta_index+customer_visits-1] = timedeltas.values
except:
print("#index: {}".format(index))
print("#length of td vector: {}".format(num_deltas))
count += 1
if count >= num_users:
if verbose:
print("Checked {} customers out of {}".format(count, num_users))
break
if verbose and (count % 100 == 0):
print("Checked {} customers out of {}".format(count, num_users))
index += customer_visits
delta_index += customer_visits - 1
#threshold of 5 minutes to sort out artifact logins
thresh = 5 * (1 / (24 * 60))
td_sample = pd.Series(timedelta_sample)
td_sample = td_sample[td_sample > thresh]
return td_sample
def get_periodogram(timeseries, fs=1.0, window="hann", window_size=14, detrend="constant", return_onesided=True, scaling="density", mode="radians"):
"""
Wrapped method that returns the smoothed periodogram for an input time series.
Arguments:
timeseries: pd.Series object
fs: Sampling or binning rate
window: str, matches the options in scipy.signal.get_window
window_size: int, positive. Window size for smoothing
detrend: None, "constant" or "linear". If not None, a call to scipy.signal.detrend will be made.
scaling: Whether or not to normalize the periodogram.
"""
N = len(timeseries.index)
freqs, periodogram = signal.periodogram(timeseries.values, fs=fs, detrend=detrend, return_onesided=return_onesided, scaling=scaling)
if mode == "radians":
#if radians mode, scale by 2 * pi
freqs *= 2 * np.pi
periodogram /= N
#undo some of the periodogram normalization BS
if return_onesided:
if N % 2 == 0:
periodogram[1:] /= 2
else:
periodogram[1:-1] /= 2
#get window function
if window != "none":
window = signal.get_window(window, window_size)
window = window / np.sum(window)
#smooth the periodogram by convolving with a window function
smoothed_periodogram = signal.convolve(periodogram, window, mode="same")
return
|
pd.Series(smoothed_periodogram, index=freqs)
|
pandas.Series
|
#!/usr/bin/env python3
"""Converts otu tables from diatom analysis into a suitable format for uploading to BIOSYS.
Additionally it will create a "otus_all.xlsx" for community analysis.
If extraction sheets are provided in an appropriate format then it will also calculate
the most similar sample and provide the Baroni–Urbani–Buser coefficient similarity
between the 2 samples."""
import sys
import os
import glob
import argparse
import pandas as pd
import send2trash
import re
from math import sqrt
import statistics as stat
from metadata import *
#Class Objects:
class Biosys_Version:
"""Stores and reports information about the current version of biosys.py."""
version = ("\033[1;34m" + "\nBiosys Version: 2019.10" + "\033[0m")
known_issues = '''\033[93mThis program is still in WIP stage \033[0m
current issues:
-Multicore processing not supported.
-Baroni-Urbani-Buser similarity not at 100% functional due to:
-Takes too long to perform for entire dataset. -solved by optional?
-Cut off for distance not determined.
-Similarity doesnt take into account repeat samples.
'''
messages = [version,known_issues]
class FormatError(Exception):
'''Formating of file is incompatible with this program.'''
pass
class MathsError(Exception):
'''A Maths or Logic error has occured in the code.'''
pass
class Diatom_Sample:
"""A slice of an OTU table, and associated metadata for a diatom sample."""
#this imports data from the table from Tim
def __init__(self, sampleid, siteid, area, region, prn, sitename, sampledate, barcode, folder):
if folder:
try:
self.folder = str(int(folder))
except ValueError:
self.folder = str(folder)
else:
self.folder = no_value
if sampleid:
try:
self.sampleid = str(int(str(sampleid).replace(".","")))
except ValueError:
self.sampleid = str(sampleid)
else:
self.sampleid = "F" + str(self.folder)
if siteid:
self.siteid = str(int(str(siteid).replace(".","")))
else:
self.siteid = no_value
if area:
self.area = get_capital(str(area))
else:
self.area = no_value
if region:
self.region = get_capital(str(region))
if region == "nan":
print("Sample " + self.folder + " has no region, check input metadata file.")
self.region = no_value
else:
self.region = no_value
if self.region or self.area == no_value:
pass
else:
self.reg_area = get_initials(region) + "-" + get_capital(area)
if prn:
try:
self.prn = int(prn)
except ValueError:
self.prn = prn
else:
self.prn = no_value
if sitename:
try:
regex = re.compile('[^a-zA-Z ]')
self.sitename = regex.sub('', sitename)
except AttributeError:
self.sitename = no_value
except TypeError:
self.sitename = no_value
else:
self.sitename = no_value
if sampledate:
self.sampledate = sampledate
else:
self.sampledate = no_value
if barcode:
self.barcode = barcode
else:
self.barcode = no_value
#sets these values to defaults just so if they arent added later they have a value
self.batch_num = no_value
self.count = 0
self.pass_fail = "Unsuccessful"
self.analysis_date = no_value
self.otu_tab = None
self.sim = 0
self.sim_sample = no_value
self.note = ""
self.plate_loc = no_value
def assign_results(self, otus, batch_num):
'''Assigns otu table results to the sample.'''
self.otu_tab = otus
try:
count = self.otu_tab[str(self.folder)].sum()
self.count = count
except KeyError:
self.count = 0
print("Seq count for " + str(self.folder) + " has been set to 0.")
if self.count >= 3000:
self.pass_fail = "Successful"
if batch_num:
self.batch_num = str(batch_num).split(".")[0]
try:
date = batch_num_dict[self.batch_num]
except KeyError:
date = "Run metadata has not been set"
print(date + " for sample: " + str(self.folder) + " " + str(self.batch_num))
self.analysis_date = date
else:
self.batch_num = no_value
self.analysis_date = no_value
def set_analysis_date(self):
'''Sets the date of analysis to the date of the MiSeq run.'''
if self.batch_num == no_value:
self.analysis_date = no_value
else:
try:
date = batch_num_dict[self.batch_num]
except KeyError:
date = "Run metadata has not been set"
print(date + " for sample: " + str(self.folder) + " " + str(self.batch_num))
self.analysis_date = date
def sort_control(self):
if self.region == "Control":
s_loc = str(self.folder).rfind("S")
if s_loc == -1:
self.folder = self.folder
else:
self.folder = str(self.folder)[0:s_loc]
self.sampleid = self.folder + "_" + str(self.batch_num)
if self.folder.lower()[0] == "b":
self.region = "Blanks"
elif self.folder.lower()[0] == "n":
self.region = "NTCs"
elif self.folder.lower()[0] == "p":
self.region = "Positives"
elif self.folder.lower()[0] == "g":
self.region = "Gblocks"
elif self.folder.lower()[0] == "t":
self.region = "TR"
else:
self.region = "Unknowns"
self.sampleid = "F" + str(self.folder)
self.folder = self.folder.upper()
def assign_surrounding_samples(self, sur_coords, row, col, sheet_name):
'''Assigns the plate, coordinate, and surrounding coordinates for similaity analysis.'''
if sur_coords:
self.sur_samples = sur_coords
if row and col:
self.plate_loc = [row,col]
if sheet_name:
self.plate = str(sheet_name)
def assign_most_sim_sample(self, sim, sample):
'''Assigns the most similar surrounding sample and the similarity.'''
self.sim = sim
self.sim_sample = sample
def amend_sample_note(self, note):
if self.note == "":
self.note = note
else:
self.note = self.note + "," + note
#Global functions:
def community_analysis_export(samples_otus, keep_list, control_regions):
otus_all_writer = pd.ExcelWriter("otus_all.xlsx")
print("Exporting all otus for community analysis")
if keep_list[0] == "all":
keep_list = ["Anglian", "Midlands", "South West",
"Southern", "North West", "North East",
"Thames", "Unknowns", "Blanks",
"Positives", "Gblocks", "NTCs",
"TR", "Aberdeen", "Perth",
"Eurocentrl", "Dingwall", "Dumfries",
"Galashiels", "Bowlblank" ]
for sample in samples_otus:
if sample.count >= 1:
if sample.region in keep_list:
if sample.region in control_regions:
try:
sample.otu_tab.columns = ["PrefTaxon", sample.sampleid]
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
else:
try:
interim_df = sample.otu_tab
main_df = pd.merge(main_df, interim_df, on="PrefTaxon")
except NameError:
main_df = sample.otu_tab
df = format_df(main_df)
df = df.transpose()
df.to_excel(otus_all_writer, sheet_name="comunity_analysis", header=True, index=True)
otus_all_writer.save()
def add_biosys_headers(df, biosys_dict):
biosys_df = pd.DataFrame.from_dict(biosys_dict, orient='columns')
biosys_df = biosys_df.rename(index={0:'siteid',1:'sampleid'})
df = pd.concat([biosys_df, df], sort=True)
return df
def format_df(df):
df = df[~df.PrefTaxon.str.contains("batch_num")]
df = df.set_index(list(df)[0])
df = df.loc[(df!=0).any(axis=1)]
return df
def filter_otus_by_region(region, samples_otus, writer, control_regions):
print(region)
if region == no_value:
no_reg = open("samples_with_no_region_values.text", "w")
no_reg.write("Region for the below samples is " + region + "\n")
no_reg.write("Folder_id\tCounts\tSample_id\tSite_id\tPRN\n")
for sample in metadata_list:
if sample.region == region:
no_reg.write(i.folder + "\t" + str(i.count) + "\t" + i.sampleid + "\t" + i.siteid + "\t" + i.prn + "\n")
no_reg.close()
elif region == "TR":
biosys_siteid_dict = {}
for sample_tr in samples_otus:
if sample_tr.region == "TR":
if sample_tr.count >= 3000:
if sample_tr.folder[2] == "3":
sample_original_fn = sample_tr.folder[2:8]
elif sample_tr.folder[2] == "4":
sample_original_fn = sample_tr.folder[2:9]
else:
sample_orignal_fn = sample_tr.folder
try:
interim_df = sample_tr.otu_tab
main_df =
|
pd.merge(main_df, interim_df, on="PrefTaxon")
|
pandas.merge
|
# coding: utf-8
"""Mapping of production and consumption mixes in Europe and their effect on
the carbon footprint of electric vehicles
This code performs the following:
- Import data from ENTSO-E (production quantities, trades relationships)
- Calculates the production and consumption electricity mixes for European countries
- Calculates the carbon footprint (CF) for the above electricity mixes](#CF_el)
- Calculates the production, use-phase and end-of-life emissions for battery electric vehicles (BEVs) under
the following assumptions:](#BEV_calcs)
- Production in Korea (with electricity intensity 684 g CO2-eq/kWh)
- Use phase uses country-specific production and consumption mix
- End-of-life emissions static for all countries
Requires the following files for input:
- ENTSO_production_volumes.csv (from hybridized_impact_factors.py)
- final_emission_factors.csv (from hybridized_impact_factors.py)
- trades.csv (from hybridized_impact_factors.py)
- trade_ef_hv.csv (from hybridized_impact_factors.py)
- API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv (transmission losses, from OECD)
- car_specifications.xlsx
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import logging
#%% Main function
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
#%% Load and format data for calculations
def load_prep_el_data(fp, year):
"""Load electricity data and emissions factors."""
fp_output = os.path.join(fp, 'output')
# Output from bentso.py
filepath_production = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_'+ str(year) +'.csv')
filepath_intensities = os.path.join(fp_output, 'final_emission_factors_'+ str(year) +'.csv')
filepath_trades = os.path.join(fp_output, 'entsoe', 'trades_'+ str(year) +'.csv')
filepath_tradeonly_ef = os.path.join(fp_output, 'ecoinvent_ef_hv.csv')
# read in production mixes (annual average)
production = pd.read_csv(filepath_production, index_col=0)
production.rename_axis(index='', inplace=True)
# matrix of total imports/exports of electricity between regions; aka Z matrix
trades = pd.read_csv(filepath_trades, index_col=0)
trades.fillna(0, inplace=True) # replace np.nan with 0 for matrix math, below
# manually remove Cyprus for now
production.drop(index='CY', inplace=True)
trades = trades.drop(columns='CY').drop(index='CY')
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
""" Make into sum of production and production + import - export"""
country_total_prod_disagg = production.sum(axis=1)
country_total_cons_disagg = country_total_prod_disagg + imports - exports
waste = (production['Waste'] / production.sum(axis=1))
waste_min = waste[waste > 0].min()
waste_max = waste.max()
g_raw = production.sum(axis=1) # Vector of total electricity production (regionalized)
""" Read power plant CO2 intensities [tech averages] """
# average technology CO2 intensities (i.e., non-regionalized)
all_C = pd.read_csv(filepath_intensities, index_col=0)
all_C.drop(index='CY', inplace=True)
# use ecoinvent factors for these countries as a proxy to calculate consumption mixes for receiving countries
trade_ef = pd.read_csv(filepath_tradeonly_ef, index_col=[0, 1, 2, 3], header=[0])
trade_ef.index = trade_ef.index.droplevel([0, 1, 3]) # remove DSID, activityName and productName (leaving geography)
trade_ef.index.rename('geo', inplace=True)
trade_ef.columns = ['emission factor']
# Generate regionalized tech generation matrix
C = all_C.T
C.sort_index(axis=1, inplace=True)
C.sort_index(axis=0, inplace=True)
return production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C
#%% el_calcs
def el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data):
fp_data = os.path.join(fp, 'data')
# Make list of full-country resolution
original_countries = list(production.index)
# Make list of aggregated countries (affects Nordic countries + GB (UK+NI))
# read 3-letter ISO codes
countries = list(trades.index)
""" Calculates national production mixes and consumption mixes using Leontief assumption """
# Start electricity calculations (ELFP.m)
# Calculate production and consumption mixes
# Carbon intensity of production mix
CFPI_no_TD = pd.DataFrame(production.multiply(C.T).sum(axis=1) / production.sum(axis=1), columns=['Production mix intensity']) # production mix intensity without losses
CFPI_no_TD.fillna(0, inplace=True)
# List of countries that have trade relationships, but no production data
trade_only = list(set(trades.index) - set(production.loc[production.sum(axis=1) > 0].index))
# Add ecoinvent proxy emission factors for trade-only countries
logging.info('Replacing missing production mix intensities with values from ecoinvent:')
for country in trade_only:
if CFPI_no_TD.loc[country, 'Production mix intensity'] == 0:
logging.info(country)
CFPI_no_TD.loc[country] = trade_ef.loc[country].values
i = country_total_cons_disagg.size # Number of European regions
g = g_raw
g = g.sort_index() # total generation vector (local production for each country)
total_imported = trades.sum(axis=0) # sum rows for total imports
total_exported = trades.sum(axis=1) # sum columns for total exports
y = total_imported + g - total_exported # total final demand (consumption) of electricity
q = g + total_imported # vector of total consumption
q.replace(np.nan, 0, inplace=True)
if flowtrace_el:
# For flow tracing approach: make Leontief production functions (normalize columns of A)
# normalized trade matrix quadrant
Atmx = pd.DataFrame(np.matmul(trades, np.linalg.pinv(np.diag(q))))
# normalized production matrix quadrant
Agen = pd.DataFrame(np.diag(g) * np.linalg.pinv(np.diag(q)), index=countries, columns=countries) # coefficient matrix, generation
# "Trade" Leontief inverse
# Total imports from region i to j per unit demand on j
Ltmx = pd.DataFrame(np.linalg.pinv(np.identity(i) - Atmx), trades.columns, trades.index)
# Production in country i for trade to country j
# Total generation in i (rows) per unit demand j
Lgen = pd.DataFrame(np.matmul(Agen, Ltmx), index=Agen.index, columns=Ltmx.columns)
y_diag = pd.DataFrame(np.diag(y), index=countries, columns=countries)
# total imports for given demand
Xtmx = pd.DataFrame(np.matmul(np.linalg.pinv(np.identity(i) - Atmx), y_diag))
# Total generation to satisfy demand (consumption)
Xgen = np.matmul(np.matmul(Agen, Ltmx), y_diag)
Xgen.sum(axis=0)
Xgen_df = pd.DataFrame(Xgen, index=Agen.index, columns=y_diag.columns)
# ### Check electricity generated matches demand
totgen = Xgen.sum(axis=0)
r_gendem = totgen / y # All countries should be 1
#%% Generation techonlogy matrix
# TC is a country-by-generation technology matrix - normalized to share of total domestic generation, i.e., normalized generation/production mix
# technology generation, kWh/ kWh domestic generated electricity
TC = pd.DataFrame(np.matmul(np.linalg.pinv(np.diag(g)), production), index=g.index, columns=production.columns)
TCsum = TC.sum(axis=1) # Quality assurance - each country should sum to 1
# Calculate technology generation mix in GWh based on production in each region
TGP = pd.DataFrame(np.matmul(TC.transpose(), np.diag(g)), index=TC.columns, columns=g.index) #.== production
# Carbon intensity of consumption mix
CFCI_no_TD = pd.DataFrame(np.matmul(CFPI_no_TD.T.values, Lgen), columns=CFPI_no_TD.index).T
else:
# Use grid-average assumption for trade
prod_emiss = production.multiply(C.T).sum(axis=1)
trade_emiss = (pd.DataFrame(np.diag(CFPI_no_TD.iloc(axis=1)[0]), index=CFPI_no_TD.index, columns=CFPI_no_TD.index)).dot(trades)
CFCI_no_TD = pd.DataFrame((prod_emiss + trade_emiss.sum(axis=0) - trade_emiss.sum(axis=1)) / y)
CFCI_no_TD.columns = ['Consumption mix intensity']
# use ecoinvent for missing countries
if incl_ei:
CFCI_no_TD.update(trade_ef.rename(columns={'emission factor':'Consumption mix intensity'}))
#%% Calculate losses
# Transpose added after removing country aggregation as data pre-treatment
if include_TD_losses:
# Calculate technology characterization factors including transmission and distribution losses
# First, read transmission and distribution losses, downloaded from World Bank economic indicators (most recent values from 2014)
if isinstance(include_TD_losses, float):
TD_losses = include_TD_losses # apply constant transmission and distribution losses to all countries
elif isinstance(include_TD_losses, bool):
losses_fp = os.path.join(fp_data, 'API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv')
try:
TD_losses = pd.read_csv(losses_fp, skiprows=[0,1,2,3], usecols=[1, 58], index_col=0)
TD_losses = TD_losses.iloc[:, -7:].dropna(how='all', axis=1)
TD_losses = TD_losses.apply(lambda x: x / 100 + 1) # convert losses to a multiplicative factor
# ## Calculate total national carbon emissions from el - production and consumption mixes
TD_losses.index = coco.convert(names=TD_losses.index.tolist(), to='ISO2', not_found=None)
TD_losses = TD_losses.loc[countries]
TD_losses = pd.Series(TD_losses.iloc[:, 0])
except:
print("Warning! Transmission and distribution losses input files not found!")
TD_losses = pd.Series(np.zeros(len(production.index)), index=production.index)
else:
print('invalid entry for losses')
# Caclulate carbon intensity of production and consumption mixes including losses
CFPI_TD_losses = CFPI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0) # apply transmission and distribution losses to production mix intensity
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0)
if len(CFCI_TD_losses) < len(CFPI_TD_losses):
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0)
CFPI = CFPI_TD_losses
CFCI = CFCI_TD_losses
else:
CFPI = CFPI_no_TD
CFCI = CFCI_no_TD
elmixes = (CFPI.copy()).join(CFCI.copy()).T
#%%
# Aggregate multi-nodes to single countries using weighted average of production/consumption as appropriate
country_total_prod_disagg.columns = ["Total production (TWh)"]
country_total_prod_disagg.index = original_countries
country_total_cons_disagg.columns = ["Total consumption (TWh)"]
country_total_cons_disagg.index = original_countries
country_el = pd.concat([country_total_prod_disagg, country_total_cons_disagg], axis=1)
country_el.columns = ['Total production (TWh)', 'Total consumption (TWh)']
CFEL_mixes = elmixes.T
CFEL = pd.concat([country_el, CFEL_mixes], axis=1)
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
CFEL['Trade percentage, gross'] = (imports + exports) / CFEL['Total production (TWh)']
CFEL['Import percentage'] = imports / CFEL['Total production (TWh)']
CFEL['Export percentage'] = exports / CFEL['Total production (TWh)']
CFEL['imports'] = imports
CFEL['exports'] = exports
#Calculate total carbon footprint intensity ratio production vs consumption
rCP = CFCI['Consumption mix intensity'].divide(CFPI['Production mix intensity'])
rCP.columns = ["ratio consumption:production mix"]
# Export intermediate variables from calculations for troubleshooting
if export_data:
keeper = run_id + "{:%d-%m-%y, %H_%M}".format(datetime.now())
fp_results = os.path.join(fp, 'results')
codecheck_file = os.path.join(os.path.abspath(fp_results), 'code_check_' + keeper + '.xlsx')
writer = pd.ExcelWriter(codecheck_file)
g.to_excel(writer, "g")
q.to_excel(writer, "q")
y.to_excel(writer, 'y')
if flowtrace_el:
Atmx.to_excel(writer, "Atmx")
Agen.to_excel(writer, "Agen")
Ltmx.to_excel(writer, "LTmx")
Lgen.to_excel(writer, "Lgen")
Xtmx.to_excel(writer, "Xtmx")
TGP.to_excel(writer, "TGP")
CFPI.T.to_excel(writer, "CFPI")
CFCI.T.to_excel(writer, "CFCI")
rCP.to_excel(writer, "rCP")
C.T.to_excel(writer, "C")
writer.save()
return codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI
#%%
def BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation=True, energy_sens=False):
"""Calculate BEV lifecycle emissions."""
# First, setup calculations
# read in data
fp_data = os.path.join(fp, 'data')
vehicle_fp = os.path.join(fp_data, 'car_specifications.xlsx')
cars = pd.read_excel(vehicle_fp, sheet_name='veh_emiss', index_col=[0, 1, 2], usecols='A:G')
cars = cars.sort_index()
vehicle_CO2 = ["BEV", "ICEV"]
if energy_sens:
# if performing the experiment for battery energy demand in manufacturing,
# update with new energy values
alt_energy = pd.read_excel(vehicle_fp, sheet_name='alt_energy', index_col=[0,1,2], usecols='A:H') # column A is scenario name
if isinstance(energy_sens, str):
cars.update(alt_energy.loc[energy_sens])
# Impacts from electricity demand in cell production
battery_prod_el = production_el_intensity / 1e6 * cars.loc["BEV", "Production el, battery"] # in t CO2/vehicle
batt_prod_impacts = cars.loc["BEV", "Production, RObattery"].add(battery_prod_el, fill_value=0).sum(axis=0)
if allocation:
alloc_share = BEV_lifetime / ((cars.loc["BEV", "Max EFC", "cycles"] * (cars.loc["BEV", "Batt size", "kWh"]*.9) * 1000) / cars.loc["BEV", "Use phase", "Wh/km"])
else:
alloc_share = 1
alloc_batt_prod_impacts = alloc_share * batt_prod_impacts
# Total vehicle production impacts - sum of battery emissions + rest of vehicle
BEV_prod_impacts = cars.loc["BEV", "Production, ROV"] + alloc_batt_prod_impacts
# Modify for battery production in Europe
# batt_prod_EU = pd.DataFrame(np.matmul(CFCI.values / 1e6, cars.loc["BEV", "Production el, battery"].values), index=CFCI.index, columns=cars.columns)
batt_prod_EU = pd.DataFrame(np.matmul((elmixes.T['Consumption mix intensity'].values / 1e6).reshape(-1, 1),
cars.loc["BEV", "Production el, battery"].values),
index=elmixes.columns, columns=cars.columns)
# Total battery production impacts in Europe
batt_prod_EU = batt_prod_EU + cars.loc["BEV", "Production, RObattery", "t CO2"]
alloc_batt_prod_EU = alloc_share * batt_prod_EU
BEV_prod_EU = pd.DataFrame(index=elmixes.columns, columns=["A", "C", "JC", "JE"])
BEV_prod_EU = alloc_batt_prod_EU + cars.loc["BEV", "Production, ROV", "t CO2"]
BEV_prod_EU.columns = pd.MultiIndex.from_product([["EUR production impacts BEV"], BEV_prod_EU.columns, ["Consumption mix"]], names=["", "Segment", "Elmix"])
# Calculate use phase emissions
segs = cars.loc['BEV', 'Use phase', 'Wh/km']
mi = pd.MultiIndex.from_product([list(elmixes.index), list(segs.index)])
segs = segs.reindex(mi, level=1)
segs = pd.DataFrame(segs)
segs.columns = ['a']
segs = segs.reindex(elmixes.columns, axis=1, method='bfill')
elmixes_for_calc = elmixes.reindex(mi, level=0, axis=0)
BEV_use = (segs.multiply(elmixes_for_calc / 1000)).T
# Add production and EOL intensity for BEVs
BEV_other = BEV_prod_impacts + cars.loc["BEV", "EOL", "t CO2"].values
BEV_other_intensity = BEV_other / BEV_lifetime * 1e6 # in g CO2-eq
BEV_other_intensity.index = ["g CO2/km"]
# Calculate full lifecycle intensity using production and consumption mixes
BEVp =
|
pd.DataFrame(BEV_use["Production mix intensity"] + BEV_other_intensity.loc["g CO2/km"])
|
pandas.DataFrame
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas._libs.algos import Infinity, NegInfinity
import pandas.util._test_decorators as td
from pandas import DataFrame, Series
import pandas._testing as tm
class TestRank:
s = Series([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3])
df = DataFrame({"A": s, "B": s})
results = {
"average": np.array([1.5, 5.5, 7.0, 3.5, np.nan, 3.5, 1.5, 8.0, np.nan, 5.5]),
"min": np.array([1, 5, 7, 3, np.nan, 3, 1, 8, np.nan, 5]),
"max": np.array([2, 6, 7, 4, np.nan, 4, 2, 8, np.nan, 6]),
"first": np.array([1, 5, 7, 3, np.nan, 4, 2, 8, np.nan, 6]),
"dense": np.array([1, 3, 4, 2, np.nan, 2, 1, 5, np.nan, 3]),
}
@pytest.fixture(params=["average", "min", "max", "first", "dense"])
def method(self, request):
"""
Fixture for trying all rank methods
"""
return request.param
@td.skip_if_no_scipy
def test_rank(self, float_frame):
import scipy.stats # noqa:F401
from scipy.stats import rankdata
float_frame["A"][::2] = np.nan
float_frame["B"][::3] = np.nan
float_frame["C"][::4] = np.nan
float_frame["D"][::5] = np.nan
ranks0 = float_frame.rank()
ranks1 = float_frame.rank(1)
mask = np.isnan(float_frame.values)
fvals = float_frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
|
tm.assert_almost_equal(ranks0.values, exp0)
|
pandas._testing.assert_almost_equal
|
"""Legacy feature computation from depart."""
import itertools
import re
import numpy as np
import pandas as pd
from Bio.SeqUtils.ProtParam import ProteinAnalysis
from Bio.SeqUtils.ProtParamData import kd
from pyteomics import parser
from sklearn.preprocessing import PolynomialFeatures
from xirt import sequences
def create_simple_features(df, seq_column="Sequence"):
"""
Create a simple feature matrix using the complete sequence (not position specific).
Parameters:
df: dataframe,
containing a "Sequence" column
Returns:
df, feature dataframe
"""
df[seq_column] = df[seq_column].apply(simply_alphabet).values
ff_df = pd.DataFrame()
# biopython features
ff_df["turnP"] = df[seq_column].apply(get_turn_indicator)
ff_df["turn"] = df[seq_column].apply(get_structure_perc, args=["turn"])
ff_df["helix"] = df[seq_column].apply(get_structure_perc, args=["helix"])
ff_df["sheet"] = df[seq_column].apply(get_structure_perc, args=["sheet"])
ff_df["pi"] = df[seq_column].apply(get_pi)
ff_df["aromaticity"] = df[seq_column].apply(get_aromaticity)
ff_df["estimated_charge"] = df[seq_column].apply(get_estimated_charge)
ff_df["loglength"] = df[seq_column].apply(get_loglength)
return ff_df
def create_all_features(df, alphabet=parser.std_amino_acids, pos_specific=False, lcp=0.2,
correct=True):
"""
Compute all features for the given sequence column of the dataframe.
Args:
df: Must contain a "Sequence" column
alphabet: list, amino acids in alphabet
pos_specific: bool, if AAs should be treated with position specific indices
lcp: float, length correctionf actor
correct: bool, if lcp should be used
Returns:
df, feature dataframe
"""
df["Sequence"] = df["Sequence"].apply(simply_alphabet).values
ff_df = pd.DataFrame()
# features
# make sure the sequence column has no strange formatting
ff_df["estimated_charge"] = df["Sequence"].apply(get_estimated_charge)
ff_df["loglength"] = df["Sequence"].apply(get_loglength)
ff_df["cterm"] = df["Sequence"].apply(get_shortest_distance, args=["cterm"])
ff_df["nterm"] = df["Sequence"].apply(get_shortest_distance, args=["nterm"])
ff_df["netcharge"] = df["Sequence"].apply(get_residue_charge)
ff_df["nterm_res"] = df["Sequence"].apply(get_cterm_residue_indicator)
# biopython features
ff_df["turnP"] = df["Sequence"].apply(get_turn_indicator)
ff_df["turn"] = df["Sequence"].apply(get_structure_perc, args=["turn"])
ff_df["helix"] = df["Sequence"].apply(get_structure_perc, args=["helix"])
ff_df["sheet"] = df["Sequence"].apply(get_structure_perc, args=["sheet"])
ff_df["pi"] = df["Sequence"].apply(get_pi)
ff_df["aromaticity"] = df["Sequence"].apply(get_aromaticity)
ff_df["hydrophobicity"] = df["Sequence"].apply(get_hydrophobicity)
# attention here we should use the modified sequences
if "Modified sequence" in df.columns:
orig_sequences = df["Modified sequence"]
else:
orig_sequences = df["Sequence"]
nterm_mods = np.ravel(extract_nterm_mods(orig_sequences))
orig_sequences = orig_sequences.apply(sequences.remove_brackets_underscores)
# add gl/ac features
for mod in nterm_mods:
ff_df[mod] = orig_sequences.apply(get_nterm_mod, mod=mod)
orig_sequences = orig_sequences.apply(sequences.replace_numbers)
orig_sequences = orig_sequences.apply(sequences.rewrite_modsequences)
aa_df = get_AA_matrix(orig_sequences, pos_specific=pos_specific, lcp=lcp,
correct=correct, residues=alphabet)
aa_df.index = ff_df.index
ff_df = pd.concat([ff_df, aa_df], axis=1)
return ff_df
def get_hydrophobicity(sequence):
"""Compute the overall hydrophobicity of a peptide sequence.
Simple summation is used of the indices according to kyte/doolittle.
Modifications are non-standard amino acids are ignored.
Parameters
sequence: str. peptide sequence
Return
float, hydrophobicity
"""
return np.sum([kd[i] for i in sequence if i in kd])
def get_nterm_mod(seq, mod):
"""Check for a given nterminal mod.
If the sequences contains the mod a 1 is returned, else 0.
"""
if seq.startswith(mod):
return 1
else:
return 0
def get_loglength(seq):
"""Compute loglength of the sequence.
Parameters:
-----------------------
seq: str,
peptide sequence
"""
return (np.log(len(seq)))
def get_shortest_distance(seq, opt="nterm"):
"""Compute the shortest distance of D/E, K/R to the C, N-term.
Parameters:
seq: str, peptide sequence
"""
return (1. * add_shortest_distance(seq, opt=opt))
def get_cterm_residue_indicator(seq):
"""
Return 1 if Lysine 0 if Arg.
Args:
seq: str, sequence
Returns:
int (1, 0)
"""
if seq[-1:] == "K":
return 1
else:
return 0
def get_nmods(mod_seq, mod_str="Oxidation"):
"""
Get the number of modifications.
Args:
mod_seq: str, modified sequence string
mod_str: str, modification string to count
Returns:
int, number of mods seen in mod_seq
"""
return mod_seq.count(mod_str)
def get_estimated_charge(seq):
"""
Compute net charge - or be more accurate an estimate of the contributed residue charge.
Parameters:
sequence: str, Peptide Sequence
Returns:
float, estimated charge
"""
return (seq.count("D") + seq.count("E") + (0.3 * seq.count("F")
+ 0.8 * seq.count("W")
+ 0.6 * seq.count("Y")) - seq.count("K") - seq.count(
"R"))
def get_residue_charge(seq):
"""
Compute net charge - by summing D/E/K/R charges.
Parameters:
seq: str,
Peptide Sequence
Returns:
float, charge
"""
return seq.count("D") + seq.count("E") - seq.count("K") - seq.count("R")
def get_aa_count(pepseq, residue, pos=-1, direction="N"):
"""
Return the AA count of a specific residue.
Args:
pepseq: str, peptide sequence
residue: char, residue
pos: int, position index
direction: str, either N or C.
Returns:
int, number of amino acids at a specific position / in the sequence.
"""
if pos == -1:
return pepseq.count(residue)
else:
if direction == "N":
return pepseq[pos:pos + 1].count(residue)
else:
return pepseq[-pos - 1:][0].count(residue)
def add_shortest_distance(orig_sequence, opt="cterm"):
"""Compute the shortest distance of a amino acids to n/cterm. E, D, C-term / K, R, N-term.
Parameters:
orig_sequence: string,
amino acid string
opt: str,
either "cterm" or "nterm". Each are defined with a set of amino acids
Returns:
int: distance to either termini specified
"""
# define shortest distance of tragets to cterm
if opt == "cterm":
targets = "|".join(["E", "D"])
sequence = orig_sequence[::-1]
match = re.search(targets, sequence)
elif opt == "nterm":
targets = "|".join(["K", "R"])
sequence = orig_sequence
match = re.search(targets, sequence)
return match.start() + 1 if match else 0
def extract_nterm_mods(seq):
"""
Extract nterminal mods, e.g. acA.
Args:
seq: str, peptide sequence
Returns:
ar-like, list of modifications
"""
# matches all nterminal mods, e.g. glD or acA
nterm_pattern = re.compile(r'^([a-z]+)([A-Z])')
mods = []
# test each sequence for non-AA letters
for ii, seqi in enumerate(seq):
nterm_match = re.findall(nterm_pattern, seqi)
# nterminal acetylation
if len(nterm_match) != 0:
mods.append([nterm_match[0][0]])
return mods
def get_patches(seq, aa_set1=["D", "E"], aa_set2=None, counts_only=True):
"""Add counts for patches of amino acids.
A pattern is loosely defined as string of amino acids of a specific class, e.g. aromatic (FYW).
The pattern is only counted if at least two consecutive residues occur: XXXXFFXXXX
would be a pattern but also XFFFXXX.
Parameters
seq: str, peptide sequence
aa_set1/aa_set2: ar-like, list of amino acids to look for patches
The following features were intended:
- D, E (acidic)
- K, R (basic)
- W, F, Y (aromatics)
- K, R, D, E (mixed)
counts_only: bool,
if True DE and ED are added in a single column
"acidic_patterns",
if False, DE, ED are counts are added in separate columns.
Same is true for the combinations of KRH and WYF.
"""
# this representation is used to be easily also used if not only
# the counts but also the patterns are requested.
if aa_set2 is None:
ac_combs = ["".join(i) for i in
itertools.combinations_with_replacement(aa_set1, 2)]
pattern = re.compile("[" + "|".join(ac_combs) + "]{2,}")
else:
ac_combs = ["".join(i) for i in
list(itertools.product(aa_set1, aa_set2))]
ac_combs += ["".join(reversed(i)) for i in list(itertools.product(aa_set1, aa_set2))]
p1 = "|".join(aa_set1)
p2 = "|".join(aa_set2)
pattern = re.compile("([{}]+[{}]+)|[{}]+[{}]+".format(p1, p2, p2, p1))
# just count the patterns (DD, DDD) and do not distinguish between
# different patterns of the same type
if counts_only:
return len(re.findall(pattern, seq))
else:
res = {}
for pattern in ac_combs:
res[pattern] = str(seq).count(pattern)
return res
def get_sandwich(seq, aa="FYW"):
"""
Add sandwich counts based on aromatics.
Parameters:
seq: str, peptide sequence
aa: str,
amino acids to check fo rsandwiches. Def:FYW
"""
# count sandwich patterns between all aromatic aminocds and do not
# distinguish between WxY and WxW.
pattern = re.compile(r"(?=([" + aa + "][^" + aa + "][" + aa + "]))")
return len(re.findall(pattern, seq))
def get_structure_perc(seq, structure="helix"):
"""
Get the percentage of amino acids that are in specific secondary structure elements.
Args:
seq: str, peptide sequence
structure: str, one of helix, sturn, sheet
Returns:
float, percentage of amino acids in secondary structure.
"""
if structure == "helix":
aa_structure = "VIYFWL"
elif structure == "turn":
aa_structure = "NPGS"
else:
aa_structure = "EMAL"
return sum(seq.count(i) for i in aa_structure) / len(seq)
def get_gravy(seq):
"""
Compute the gravy of the sequence.
Args:
seq: peptide sequence
Returns:
float, grav score from biopython
"""
bio_seq = ProteinAnalysis(seq)
return bio_seq.gravy()
def get_aromaticity(seq):
"""Get the aromaticity of the sequence.
Parameters:
seq: str, peptide sequence
Returns:
float, aromaticity (biopython)
"""
bio_seq = ProteinAnalysis(seq)
return bio_seq.aromaticity()
def get_pi(seq):
"""Get the pI of the sequence.
Parameters:
seq: str, peptide sequence
Returns:
float, isoelectric point (biopython)
"""
bio_seq = ProteinAnalysis(seq)
return bio_seq.isoelectric_point()
def get_turn_indicator(seq):
"""
Compute the average number of amino acids between Proline residues.
Parameters:
seq: str, peptide sequence
Returns:
float, turn indicator (biopython)
"""
starts = [i.start() for i in re.finditer("P", seq)]
# no prolines
if not starts:
return 0.0
elif len(starts) == 1:
return starts[0] / (len(seq) * 1.)
else:
return np.mean(np.diff(starts)) / (len(seq) * 1.)
def get_weight(seq):
"""
Get weight of peptide.
Parameters:
seq: str, peptide sequence
Returns:
float, weight (biopython)
"""
bio_seq = ProteinAnalysis(seq)
return bio_seq.molecular_weight()
def get_AA_matrix(sequences, pos_specific=False, ntermini=5, lcp=1,
correct=False, residues=parser.std_amino_acids):
"""Count the amino acid in a peptide sequence.
Counting uses the pyteomics amino_acid composition. Modified residues of the pattern "modA"
are already supported.
If the modifications should not be considered another sequence column
can be used. As read on the pyteomics doc an "lcp" factor can substantially
increase the prediction accuracy.
Parameters:
sequences: ar-like, with peptide sequences
pos_specific: bool, indicator if position specific amino acid should be counted
ntermini: int,
the number of termini to consider in the pos_specific case
lcp: float, length correction factor
correct: bool, if lcp should be applied
residues: ar-like, residues in the alphabet
Examples:
#modification and termini supporting
>>mystr = "nAAAAAAAAAAAAAAAGAAGcK"
#just aa composition
>>mystr = "AAAAAAAAAAAAAAAGAAGK"
Returns:
df: dataframe with amino acid count columns
"""
df = pd.DataFrame()
df["Sequence"] = sequences.copy()
# create dataframe with counts
aa_counts = [parser.amino_acid_composition(i) for i in df["Sequence"]]
aa_count_df =
|
pd.DataFrame(aa_counts)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.