content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import numpy as np
import scipy.stats as scst
import scipy.special as scsp
import scipy.optimize as scopt
import tensorflow as tf
import tensorflow_probability as tfp
import pickle
import os
import sys
try:
import gpflow
except:
raise Exception("Requires gpflow!")
import utils
def fit_gp(
X,
Y,
noise_var=None,
train_noise_var=True,
min_var=1e-4,
max_var=4.0,
kernel_type="matern52",
):
# use gpflow to get the hyperparameters for the function
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph).as_default():
xdim = X.shape[1]
if kernel_type == "se":
kernel = gpflow.kernels.RBF(xdim, ARD=True)
elif kernel_type == "matern52":
kernel = gpflow.kernels.Matern52(xdim, ARD=True)
else:
raise Exception("Unknown kernel:", kernel_type)
meanf = gpflow.mean_functions.Constant()
with gpflow.defer_build():
m = gpflow.models.GPR(X, Y, kern=kernel, mean_function=meanf)
if train_noise_var:
# concentration, rate = 1.1, 1./0.5 (in BoRisk)
# => shape, scale = 1.1, 0.5
gamma_shape = 1.1
gamma_scale = 0.5
m.likelihood.variance.prior = gpflow.priors.Gamma(
gamma_shape, gamma_scale
) # shape, scale
m.likelihood.variance.transform = gpflow.transforms.Logistic(
min_var, max_var
)
# "Initialize likelihood variance at the mode of the prior (from BoRisk)"
prior_mode = (gamma_shape - 1) * gamma_scale
m.likelihood.variance.assign(prior_mode) # 1e-4
elif noise_var is not None:
m.likelihood.variance = noise_var
m.likelihood.variance.trainable = False
else:
raise Exception("Require noise variance!")
m.compile()
opt = gpflow.train.ScipyOptimizer()
has_error = False
try:
opt.minimize(m)
except:
has_error = True
if has_error:
return has_error, None
else:
gpf_lscale = m.kern.lengthscales.value
gpf_signal_var = m.kern.variance.value
lscale = 1.0 / (gpf_lscale * gpf_lscale)
meanf_const = m.mean_function.c.value
noise_var = m.likelihood.variance.value
return has_error, {
"meanf": meanf_const,
"signal_var": gpf_signal_var,
"lengthscale": lscale,
"noise_var": noise_var,
}
def get_meshgrid(xmin, xmax, nx, xdim):
x1d = np.linspace(xmin, xmax, nx)
vals = [x1d] * xdim
xds = np.meshgrid(*vals)
xs = np.concatenate([xd.reshape(-1, 1) for xd in xds], axis=1)
return xs
def func_gp_prior(xdim, l, sigma, seed, name=""):
np.random.seed(seed)
filename = "func_gp_prior_param_seed{}_{}.pkl".format(seed, name)
n_feats = 1000
if os.path.isfile(filename):
with open(filename, "rb") as infile:
data = pickle.load(infile)
W = data["W"]
b = data["b"]
theta = data["theta"]
else:
l = np.ones([1, xdim]) * l
W = np.random.randn(n_feats, xdim) * np.tile(np.sqrt(l), (n_feats, 1))
b = 2.0 * np.pi * np.random.rand(n_feats, 1)
theta = np.random.randn(n_feats, 1)
with open(filename, "wb") as outfile:
pickle.dump(
{"W": W, "b": b, "theta": theta},
outfile,
protocol=pickle.HIGHEST_PROTOCOL,
)
def f(x):
x = np.array(x).reshape(-1, xdim)
return (
theta.T.dot(np.sqrt(2.0 * sigma / n_feats)).dot(
np.cos(W.dot(x.T) + np.tile(b, (1, x.shape[0])))
)
).squeeze()
return f
def func_gp_prior_tf(xdim, l, sigma, seed, name="", dtype=tf.float64):
filename = "func_gp_prior_param_seed{}_{}.pkl".format(seed, name)
n_feats = 1000
if os.path.isfile(filename):
with open(filename, "rb") as infile:
data = pickle.load(infile)
W = tf.constant(data["W"], dtype=dtype)
b = tf.constant(data["b"], dtype=dtype)
theta = tf.constant(data["theta"], dtype=dtype)
else:
raise Exception("Require to run func_gp_prior to generate the parameters!")
def f(x):
x = tf.reshape(x, shape=(-1, xdim))
return tf.squeeze(
tf.cast(tf.sqrt(2.0 * sigma / n_feats), dtype=dtype)
* tf.linalg.matrix_transpose(theta)
@ (
tf.cos(
W @ tf.linalg.matrix_transpose(x)
+ tf.tile(b, multiples=(1, tf.shape(x)[0]))
)
)
)
return f
def negative_branin_uniform(dtype=tf.float64):
xdim = 1
zdim = 1
input_dim = xdim + zdim
xmin = 0.0
xmax = 1.0
# zmin, zmax only used for continuous z
zmin = 0.0
zmax = 1.0
xs = get_meshgrid(xmin, xmax, 50, xdim)
# xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin
def f(x):
x = x.reshape(-1, input_dim)
x = 15.0 * x - np.array([5.0, 0.0])
val = (
-1.0
/ 51.95
* (
(
x[:, 1]
- 5.1 * x[:, 0] ** 2 / (4 * np.pi ** 2)
+ 5.0 * x[:, 0] / np.pi
- 6.0
)
** 2
+ (10.0 - 10.0 / (8.0 * np.pi)) * np.cos(x[:, 0])
- 44.81
)
)
return val
def f_tf(x):
x = tf.reshape(x, shape=(-1, input_dim))
x = tf.cast(15.0, dtype) * x - tf.cast([5.0, 0.0], dtype)
val = (
tf.cast(-1.0, dtype)
/ tf.cast(51.95, dtype)
* (
tf.math.pow(
x[:, 1]
- tf.cast(5.1, dtype)
* x[:, 0]
* x[:, 0]
/ tf.cast(4 * np.pi ** 2, dtype)
+ tf.cast(5.0, dtype) * x[:, 0] / tf.cast(np.pi, dtype)
- tf.cast(6.0, dtype),
2,
)
+ (
tf.cast(10.0, dtype)
- tf.cast(10.0, dtype) / tf.cast(8.0 * np.pi, dtype)
)
* tf.cos(x[:, 0])
- tf.cast(44.81, dtype)
)
)
return val
mean_tnorm = (zmin + zmax) / 2.0
std_tnorm = (zmax - zmin) / 8.0
low_tnorm = mean_tnorm - 2.0 * std_tnorm
high_tnorm = mean_tnorm + 2.0 * std_tnorm
truncated_normal = tfp.distributions.TruncatedNormal(
loc=tf.cast(mean_tnorm, dtype=tf.float64),
scale=tf.cast(std_tnorm, dtype=tf.float64),
low=tf.cast(low_tnorm, dtype=tf.float64),
high=tf.cast(high_tnorm, dtype=tf.float64),
name="branin_truncated_normal",
)
def z_tnorm_generator(n):
return truncated_normal.sample(sample_shape=(n, zdim))
def z_lpdf(z): # (None,zdim)
return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)
zmid = (zmin + zmax) / 2.0
z_values = np.linspace(zmin, zmax, 30).reshape(-1, 1)
z_probs = np.ones(30) / 30.0
z_lprobs = np.log(z_probs)
return {
"function": f,
"function_tf": f_tf,
"name": "negative_branin_uniform",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"z_generator": z_tnorm_generator,
"z_lpdf": z_lpdf,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": np.array([12.14689435, 0.3134626]),
"signal_variance": 1.5294688560240726,
"likelihood_variance": 1e-2,
"rand_opt_init_x": xs,
"max_var_discrete": 0.7324786070977395,
"max_var_continuous": 0.64118695,
"max_cvar_discrete": -0.2899622792949111,
}
def negative_goldstein_uniform(dtype=tf.float64):
xdim = 1
zdim = 1
input_dim = xdim + zdim
xmin = 0.0
xmax = 1.0
# zmin, zmax only used for continuous z
zmin = 0.0
zmax = 1.0
xs = get_meshgrid(xmin, xmax, 50, xdim)
# xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin
def f(x):
x = x.reshape(-1, input_dim)
xb = x * 4.0 - 2.0
val = -(
np.log(
(
1
+ (xb[:, 0] + xb[:, 1] + 1.0) ** 2
* (
19
- 14 * xb[:, 0]
+ 3 * xb[:, 0] ** 2
- 14 * xb[:, 1]
+ 6 * xb[:, 0] * xb[:, 1]
+ 3 * xb[:, 1] ** 2
)
)
* (
30
+ (2 * xb[:, 0] - 3 * xb[:, 1]) ** 2
* (
18
- 32 * xb[:, 0]
+ 12 * xb[:, 0] ** 2
+ 48 * xb[:, 1]
- 36 * xb[:, 0] * xb[:, 1]
+ 27 * xb[:, 1] ** 2
)
)
)
- 8.693
) # / 2.427
return val
def f_tf(x):
x = tf.reshape(x, shape=(-1, input_dim))
xb = x * tf.cast(4.0, dtype) - tf.cast(2.0, dtype)
val = -(
tf.log(
(
tf.cast(1.0, dtype)
+ tf.math.pow(xb[:, 0] + xb[:, 1] + tf.cast(1.0, dtype), 2)
* (
tf.cast(19.0, dtype)
- tf.cast(14.0, dtype) * xb[:, 0]
+ tf.cast(3.0, dtype) * xb[:, 0] * xb[:, 0]
- tf.cast(14.0, dtype) * xb[:, 1]
+ tf.cast(6.0, dtype) * xb[:, 0] * xb[:, 1]
+ tf.cast(3.0, dtype) * xb[:, 1] * xb[:, 1]
)
)
* (
tf.cast(30.0, dtype)
+ tf.math.pow(
tf.cast(2.0, dtype) * xb[:, 0] - tf.cast(3.0, dtype) * xb[:, 1],
2,
)
* (
tf.cast(18.0, dtype)
- tf.cast(32.0, dtype) * xb[:, 0]
+ tf.cast(12.0, dtype) * xb[:, 0] * xb[:, 0]
+ tf.cast(48.0, dtype) * xb[:, 1]
- tf.cast(36.0, dtype) * xb[:, 0] * xb[:, 1]
+ tf.cast(27.0, dtype) * xb[:, 1] * xb[:, 1]
)
)
)
- tf.cast(8.693, dtype)
) # / tf.cast(2.427, dtype)
return val
mean_tnorm = (zmin + zmax) / 2.0
std_tnorm = (zmax - zmin) / 8.0
low_tnorm = mean_tnorm - 2.0 * std_tnorm
high_tnorm = mean_tnorm + 2.0 * std_tnorm
truncated_normal = tfp.distributions.TruncatedNormal(
loc=tf.cast(mean_tnorm, dtype=tf.float64),
scale=tf.cast(std_tnorm, dtype=tf.float64),
low=tf.cast(low_tnorm, dtype=tf.float64),
high=tf.cast(high_tnorm, dtype=tf.float64),
name="branin_truncated_normal",
)
def z_tnorm_generator(n):
return truncated_normal.sample(sample_shape=(n, zdim))
def z_lpdf(z): # (None,zdim)
return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)
zmid = (zmin + zmax) / 2.0
z_values = np.linspace(zmin, zmax, 50).reshape(-1, 1)
z_probs = np.ones(50) / 50.0
z_lprobs = np.log(z_probs)
return {
"function": f,
"function_tf": f_tf,
"name": "negative_goldstein_uniform",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"z_generator": z_tnorm_generator,
"z_lpdf": z_lpdf,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": np.array([81.1012626, 83.22416009]),
"signal_variance": 0.02584212360067521,
"likelihood_variance": 1e-2,
"rand_opt_init_x": xs,
"max_var_discrete": 1.7992384381492217,
"max_var_continuous": 1.50360403,
"max_cvar_discrete": -2.394406754560626,
}
def portfolio_computeKmm_np(X, l, sigma):
n = X.shape[0]
xdim = X.shape[1]
l = l.reshape(1, xdim)
X = X / l
Q = np.tile(np.sum(X * X, axis=1, keepdims=True), reps=(1, n))
dist = Q + Q.T - 2 * X.dot(X.T)
kmm = sigma * np.exp(-0.5 * dist)
return kmm
def portfolio_computeKnm_np(X, Xbar, l, sigma):
"""
X: n x d
l: d
"""
n = np.shape(X)[0]
m = np.shape(Xbar)[0]
xdim = np.shape(X)[1]
l = l.reshape(1, xdim)
X = X / l
Xbar = Xbar / l
Q = np.tile(np.sum(X * X, axis=1, keepdims=True), reps=(1, m))
Qbar = np.tile(np.sum(Xbar * Xbar, axis=1, keepdims=True).T, reps=(n, 1))
dist = Qbar + Q - 2 * X.dot(Xbar.T)
knm = sigma * np.exp(-0.5 * dist)
return knm
def portfolio_computeKnm(X, Xbar, l, sigma, dtype=tf.float32):
"""
X: n x d
l: d
"""
n = tf.shape(X)[0]
m = tf.shape(Xbar)[0]
X = X / l
Xbar = Xbar / l
Q = tf.tile(tf.reduce_sum(tf.square(X), axis=1, keepdims=True), multiples=(1, m))
Qbar = tf.tile(
tf.transpose(tf.reduce_sum(tf.square(Xbar), axis=1, keepdims=True)),
multiples=(n, 1),
)
dist = Qbar + Q - 2 * X @ tf.transpose(Xbar)
knm = sigma * tf.exp(-0.5 * dist)
return knm
def negative_portfolio_optimization_gaussian(dtype=tf.float64):
# noise is 1e-2
# z follows Gaussian
xdim = 3
zdim = 2
input_dim = xdim + zdim
xmin = 0.0
xmax = 1.0
# zmin, zmax only used for continuous z
zmin = 0.0
zmax = 1.0
xs = get_meshgrid(xmin, xmax, 5, xdim)
with open("portfolio_data/data.pkl", "rb") as readfile:
data = pickle.load(readfile)
X = data["X"].astype(np.float64) # (3000,5)
Y = data["Y"].astype(np.float64) # (3000,1)
with open("portfolio_data/GP_params.pkl", "rb") as readfile:
params = pickle.load(readfile)
lengthscales = params["lengthscales"]
kern_var = params["kern_variance"]
noise_var = params["noise_variance"]
mean_constant = params["mean_constant"]
invKmm = params["invKmm"]
print(Y)
print("**PARAMS:", params)
invKmm_tf = tf.constant(invKmm, dtype=dtype)
mean_constant_tf = tf.constant(mean_constant, dtype=dtype)
Y_tf = tf.constant(Y, dtype=dtype)
def f(x):
x = x.reshape(-1, input_dim)
Knm = portfolio_computeKnm_np(x, X, lengthscales, kern_var)
val = mean_constant + Knm @ invKmm @ (Y - mean_constant) # posterior mean
return -val.reshape(x.shape[0])
def f_tf(x):
x = tf.reshape(x, shape=(-1, input_dim))
Knm = portfolio_computeKnm(x, X, lengthscales, kern_var)
val = mean_constant_tf + Knm @ invKmm_tf @ (Y_tf - mean_constant_tf)
return -tf.reshape(val, shape=(tf.shape(x)[0],))
def z_tnorm_generator(n):
return tf.random.uniform(shape=(n, zdim), minval=0.0, maxval=1.0, dtype=dtype)
def z_lpdf(z):
# dummy, not really pdf
# but returning a constant
return tf.reduce_sum(tf.ones_like(z, dtype=dtype), axis=1)
zmid = (zmin + zmax) / 2.0
z_values = get_meshgrid(zmid - 0.25, zmid + 0.25, 5, zdim)
z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.15 ** 2
z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))
z_probs = np.exp(z_lprobs)
return {
"function": f,
"function_tf": f_tf,
"name": "negative_portfolio_optimization_gaussian",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"z_generator": z_tnorm_generator,
"z_lpdf": z_lpdf,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": lengthscales,
"signal_variance": kern_var,
"likelihood_variance": 1e-4,
"rand_opt_init_x": xs,
"max_var_discrete": 17.835917287050652,
"max_var_continuous": 17.835917287050652, # it takes too long to optimize, so we use the discrete case as an approximation
"max_cvar_X": [0.0, 1.0, 0.08484073],
"max_cvar_discrete": 21.21,
} # at [0., 1., 0.081978]
def negative_rescaled_hartmann6d_51(dtype=tf.float64):
# xdim = 3
# range: (0,1) for all dimensions
# global maximum: -3.86278 at (0.114614, 0.555649, 0.852547)
xdim = 5
zdim = 1
input_dim = xdim + zdim
xmin = 0.0
xmax = 1.0
zmin = 0.0
zmax = 1.0
# maximum = 3.13449414
# minimum = -1.30954062
xs = get_meshgrid(xmin, xmax, 3, xdim)
# xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin
A = np.array(
[
[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0],
]
)
A_tf = tf.constant(A, dtype=dtype)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
alpha_tf = tf.constant(alpha, dtype=dtype)
P = 1e-4 * np.array(
[
[1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],
[2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],
[2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],
[4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0],
]
)
P_tf = tf.constant(P, dtype=dtype)
def f(x):
x = np.tile(x.reshape(-1, 1, input_dim), reps=(1, 4, 1))
val = (
2.58 + np.sum(alpha * np.exp(-np.sum(A * (x - P) ** 2, axis=2)), axis=1)
) / 1.94
# val = (val - minimum) / (maximum - minimum) * 2.0 - 1.0
return val * 10.0
def f_tf(x):
x = tf.tile(tf.reshape(x, shape=(-1, 1, input_dim)), multiples=(1, 4, 1))
val = (
tf.constant(2.58, dtype)
+ tf.reduce_sum(
alpha_tf
* tf.exp(-tf.reduce_sum(A_tf * (x - P_tf) * (x - P_tf), axis=2)),
axis=1,
)
) / tf.constant(1.94, dtype)
return val * tf.cast(10.0, dtype)
mean_tnorm = (zmin + zmax) / 2.0
std_tnorm = (zmax - zmin) / 8.0
low_tnorm = mean_tnorm - 2.0 * std_tnorm
high_tnorm = mean_tnorm + 2.0 * std_tnorm
truncated_normal = tfp.distributions.TruncatedNormal(
loc=tf.cast(mean_tnorm, dtype=tf.float64),
scale=tf.cast(std_tnorm, dtype=tf.float64),
low=tf.cast(low_tnorm, dtype=tf.float64),
high=tf.cast(high_tnorm, dtype=tf.float64),
name="branin_truncated_normal",
)
def z_tnorm_generator(n):
return truncated_normal.sample(sample_shape=(n, zdim))
def z_lpdf(z): # (None,zdim)
return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)
zmid = (zmin + zmax) / 2.0
z_values = get_meshgrid(zmid - 0.2, zmid + 0.2, 15, zdim)
z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.2 ** 2
z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))
z_probs = np.exp(z_lprobs)
return {
"function": f,
"function_tf": f_tf,
"name": "negative_rescaled_hartmann6d_51",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"z_generator": z_tnorm_generator,
"z_lpdf": z_lpdf,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": np.array([6.9512, 1.9341, 0.506, 4.2067, 5.0986, 3.5949]),
"signal_variance": 1.423,
"likelihood_variance": 1e-2,
"rand_opt_init_x": xs,
"max_cvar_discrete": 20.5428,
}
def negative_rescaled_hartmann6d_15(dtype=tf.float64):
# xdim = 3
# range: (0,1) for all dimensions
# global maximum: -3.86278 at (0.114614, 0.555649, 0.852547)
xdim = 1
zdim = 5
input_dim = xdim + zdim
xmin = 0.0
xmax = 1.0
zmin = 0.0
zmax = 1.0
xs = get_meshgrid(xmin, xmax, 50, xdim)
# xs = np.random.rand(1000, xdim) * (xmax - xmin) + xmin
A = np.array(
[
[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0],
]
)
A_tf = tf.constant(A, dtype=dtype)
alpha = np.array([1.0, 1.2, 3.0, 3.2])
alpha_tf = tf.constant(alpha, dtype=dtype)
P = 1e-4 * np.array(
[
[1312.0, 1696.0, 5569.0, 124.0, 8283.0, 5886.0],
[2329.0, 4135.0, 8307.0, 3736.0, 1004.0, 9991.0],
[2348.0, 1451.0, 3522.0, 2883.0, 3047.0, 6650.0],
[4047.0, 8828.0, 8732.0, 5743.0, 1091.0, 381.0],
]
)
P_tf = tf.constant(P, dtype=dtype)
def f(x):
x = np.tile(x.reshape(-1, 1, input_dim), reps=(1, 4, 1))
val = (
2.58 + np.sum(alpha * np.exp(-np.sum(A * (x - P) ** 2, axis=2)), axis=1)
) / 1.94
# val = (val - minimum) / (maximum - minimum) * 2.0 - 1.0
return val * 10.0
def f_tf(x):
x = tf.tile(tf.reshape(x, shape=(-1, 1, input_dim)), multiples=(1, 4, 1))
val = (
tf.constant(2.58, dtype)
+ tf.reduce_sum(
alpha_tf
* tf.exp(-tf.reduce_sum(A_tf * (x - P_tf) * (x - P_tf), axis=2)),
axis=1,
)
) / tf.constant(1.94, dtype)
return val * tf.cast(10.0, dtype)
mean_tnorm = (zmin + zmax) / 2.0
std_tnorm = (zmax - zmin) / 8.0
low_tnorm = mean_tnorm - 2.0 * std_tnorm
high_tnorm = mean_tnorm + 2.0 * std_tnorm
truncated_normal = tfp.distributions.TruncatedNormal(
loc=tf.cast(mean_tnorm, dtype=tf.float64),
scale=tf.cast(std_tnorm, dtype=tf.float64),
low=tf.cast(low_tnorm, dtype=tf.float64),
high=tf.cast(high_tnorm, dtype=tf.float64),
name="branin_truncated_normal",
)
def z_tnorm_generator(n):
return truncated_normal.sample(sample_shape=(n, zdim))
def z_lpdf(z): # (None,zdim)
return tf.reduce_sum(truncated_normal.log_prob(z), axis=1)
zmid = (zmin + zmax) / 2.0
z_values = get_meshgrid(zmid - 0.2, zmid + 0.2, 3, zdim)
z_lprobs = -np.sum((z_values - np.ones(zdim) * zmid) ** 2, axis=1) / 0.2 ** 2
z_lprobs = np.squeeze(z_lprobs - scsp.logsumexp(z_lprobs))
z_probs = np.exp(z_lprobs)
return {
"function": f,
"function_tf": f_tf,
"name": "negative_rescaled_hartmann6d_15",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"z_generator": z_tnorm_generator,
"z_lpdf": z_lpdf,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": np.array([6.9512, 1.9341, 0.506, 4.2067, 5.0986, 3.5949]),
"signal_variance": 1.423,
"likelihood_variance": 1e-4,
"rand_opt_init_x": xs,
"max_cvar_discrete": 14.1203, # near [0.2544893]
} # haven't optimize yet
def yacht_hydrodynamics(dtype=tf.float64):
filename = "yacht_data/gp_hyperparameters.pkl"
with open(filename, "rb") as readfile:
yacht = pickle.load(readfile)
X = yacht["X"]
Y = yacht["Y"]
gp_hyper = yacht["gp_hyper"]
NK = utils.computeNKmm(
X,
gp_hyper["lengthscale"],
gp_hyper["signal_var"],
gp_hyper["noise_var"],
dtype=dtype,
kernel_type="se",
)
NKInv = utils.chol2inv(NK, dtype=dtype)
NKInvs = tf.expand_dims(NKInv, axis=0)
input_dim = X.shape[1]
zdim = 1
xdim = input_dim - zdim
xmin = 0.0
xmax = 1.0
zmin = 0.0
zmax = 1.0
xs = get_meshgrid(xmin, xmax, 4, xdim)
def f(x):
x = x.reshape(-1, input_dim)
mean_f = (
utils.compute_mean_f_np(
x,
X,
Y - gp_hyper["meanf"],
gp_hyper["lengthscale"],
gp_hyper["signal_var"],
gp_hyper["noise_var"],
kernel_type="se",
)
+ gp_hyper["meanf"]
)
return -mean_f.reshape(-1, 1)
def f_tf(x):
x = tf.reshape(x, (-1, input_dim))
mean_f = (
utils.compute_mean_f(
x,
input_dim,
1,
X,
Y - gp_hyper["meanf"],
gp_hyper["lengthscale"].reshape(1, input_dim),
gp_hyper["signal_var"].reshape(1, 1),
gp_hyper["noise_var"].reshape(1, 1),
NKInvs,
dtype=dtype,
kernel_type="se",
)
+ gp_hyper["meanf"]
)
return -mean_f
zmid = 0.0
z_values = np.linspace(zmin, zmax, 15).reshape(-1, 1)
z_probs = np.ones(z_values.shape[0]) / z_values.shape[0]
z_lprobs = np.log(z_probs)
return {
"function": f,
"function_tf": f_tf,
"name": "yacht_hydrodynamics",
"xdim": xdim,
"zdim": zdim,
"xmin": xmin,
"xmax": xmax,
"zmin": zmin,
"zmax": zmax,
"zvalues": z_values,
"zlprobs": z_lprobs,
"zprobs": z_probs,
"lengthscale": gp_hyper["lengthscale"],
"signal_variance": gp_hyper["signal_var"],
"likelihood_variance": 0.0001, # gp_hyper["noise_var"],
"rand_opt_init_x": xs,
"max_cvar_discrete": -1.009, # at [0.35523405 1. 0. 0. 0.85907464], alpha=0.3
} | 29.751708 | 131 | 0.497741 | [
"MIT"
] | qphong/BayesOpt-LV | functions.py | 26,122 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from ..pipelineState import PipelineStateInterface
from ..data import BrewPipeDataFrame
__author__ = 'Dominik Meyer <[email protected]>'
class NumpyNullPreprocessor(PipelineStateInterface):
"""
This is an example class of preprocessor, that
takes numpy data from the data loader and outputs
numpy data again. Basically, it does nothing, and is
just a testcase to get some interface definitions going.
"""
def __init__(self, intermediate_directory="intermediates"):
"""
:param intermediate_directory: Directory, where the
intermediate pandas dataframe should be persisted
to.
"""
super(NumpyNullPreprocessor, self).__init__()
self._intermediate_directory = intermediate_directory
self._cached = False
self._cached_object = None
def _persist_numpy(self, arr, name):
filename = os.path.join(self._intermediate_directory,
'NumpyNullPreprocessor' + name)
with open(filename, 'w') as f:
np.save(f, arr)
return filename
def _load_numpy(self, name):
filename = os.path.join(self._intermediate_directory,
'NumpyNullPreprocessor' + name)
with open(filename, 'r') as f:
arr = np.load(f)
return arr
def preprocess(self, dataframe):
def cb(name):
obj = self
inp = dataframe
h = obj.get(inp.name)
tmp = None
if not h or h != inp.hash:
org = inp.data
# preprocessing would happen here and be put to tmp
tmp = org
h = inp.hash
obj._persist_numpy(tmp, inp.name)
obj.put(inp.name, h)
else:
if self._cached and self._cached == inp.hash:
return self._cached_object
tmp = obj._load_numpy(inp.name)
self._cached_object = tmp
self._cached = inp.hash
return tmp
h = 0
if not self.get(dataframe.name) is None:
h = self.get(dataframe.name)
r = BrewPipeDataFrame(dataframe.name, lazy_frame=True, hash=h, callback=cb)
return r
| 31.837838 | 83 | 0.578947 | [
"Apache-2.0"
] | meyerd/brewPipe | brewPipe/preprocess/numpy_null.py | 2,356 | Python |
# -*- coding: utf-8 -*-
"""
mslib.msui._tests.test_mscolab_project
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is used to test mscolab-project related gui.
This file is part of mss.
:copyright: Copyright 2019 Shivashis Padhi
:copyright: Copyright 2019-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import pytest
from mslib.msui.mscolab import MSSMscolabWindow
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.models import Message
from PyQt5 import QtCore, QtTest, QtWidgets, Qt
from mslib._tests.utils import mscolab_start_server
PORTS = list(range(9571, 9590))
class Actions(object):
DOWNLOAD = 0
COPY = 1
REPLY = 2
EDIT = 3
DELETE = 4
@pytest.mark.skipif(os.name == "nt",
reason="multiprocessing needs currently start_method fork")
class Test_MscolabProject(object):
def setup(self):
self.process, self.url, self.app, _, self.cm, self.fm = mscolab_start_server(PORTS)
QtTest.QTest.qWait(500)
self.application = QtWidgets.QApplication(sys.argv)
self.window = MSSMscolabWindow(data_dir=mscolab_settings.MSCOLAB_DATA_DIR,
mscolab_server_url=self.url)
self.window.show()
self._connect_to_mscolab()
self._login()
self._activate_project_at_index(0)
# activate project window here by clicking button
QtTest.QTest.mouseClick(self.window.chatWindowBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
self.chat_window = self.window.chat_window
QtTest.QTest.qWaitForWindowExposed(self.window)
QtWidgets.QApplication.processEvents()
def teardown(self):
if self.window.chat_window:
self.window.chat_window.hide()
if self.window.conn:
self.window.conn.disconnect()
self.window.hide()
QtWidgets.QApplication.processEvents()
self.application.quit()
QtWidgets.QApplication.processEvents()
self.process.terminate()
def test_send_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
with self.app.app_context():
assert Message.query.filter_by(text='**test message**').count() == 2
def test_search_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
message_index = self.chat_window.messageList.count() - 1
self.window.chat_window.searchMessageLineEdit.setText("test message")
QtWidgets.QApplication.processEvents()
QtTest.QTest.mouseClick(self.window.chat_window.searchPrevBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
assert self.chat_window.messageList.item(message_index).isSelected() is True
QtTest.QTest.mouseClick(self.window.chat_window.searchPrevBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
assert self.chat_window.messageList.item(message_index - 1).isSelected() is True
QtTest.QTest.mouseClick(self.window.chat_window.searchNextBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
assert self.chat_window.messageList.item(message_index).isSelected() is True
def test_copy_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
self._activate_context_menu_action(Actions.COPY)
assert Qt.QApplication.clipboard().text() == "**test message**"
def test_reply_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
parent_message_id = self._get_message_id(self.chat_window.messageList.count() - 1)
self._activate_context_menu_action(Actions.REPLY)
self.chat_window.messageText.setPlainText('test reply')
QtTest.QTest.mouseClick(self.chat_window.sendMessageBtn, QtCore.Qt.LeftButton)
QtTest.QTest.qWait(100)
with self.app.app_context():
message = Message.query.filter_by(text='test reply')
assert message.count() == 1
assert message.first().reply_id == parent_message_id
def test_edit_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
self._activate_context_menu_action(Actions.EDIT)
self.chat_window.messageText.setPlainText('test edit')
QtTest.QTest.mouseClick(self.chat_window.editMessageBtn, QtCore.Qt.LeftButton)
QtTest.QTest.qWait(100)
with self.app.app_context():
assert Message.query.filter_by(text='test edit').count() == 1
def test_delete_message(self):
self._send_message("**test message**")
self._send_message("**test message**")
self._activate_context_menu_action(Actions.DELETE)
QtTest.QTest.qWait(100)
with self.app.app_context():
assert Message.query.filter_by(text='test edit').count() == 0
def _connect_to_mscolab(self):
self.window.url.setEditText(self.url)
QtTest.QTest.mouseClick(self.window.toggleConnectionBtn, QtCore.Qt.LeftButton)
QtTest.QTest.qWait(500)
def _login(self):
# login
self.window.emailid.setText('a')
self.window.password.setText('a')
QtTest.QTest.mouseClick(self.window.loginButton, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
def _activate_project_at_index(self, index):
item = self.window.listProjects.item(index)
point = self.window.listProjects.visualItemRect(item).center()
QtTest.QTest.mouseClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
QtTest.QTest.mouseDClick(self.window.listProjects.viewport(), QtCore.Qt.LeftButton, pos=point)
QtWidgets.QApplication.processEvents()
def _activate_context_menu_action(self, action_index):
item = self.chat_window.messageList.item(self.chat_window.messageList.count() - 1)
message_widget = self.chat_window.messageList.itemWidget(item)
message_widget.context_menu.actions()[action_index].trigger()
def _send_message(self, text):
self.chat_window.messageText.setPlainText(text)
QtTest.QTest.mouseClick(self.chat_window.sendMessageBtn, QtCore.Qt.LeftButton)
QtWidgets.QApplication.processEvents()
QtTest.QTest.qWait(500)
def _get_message_id(self, index):
item = self.chat_window.messageList.item(index)
message_widget = self.chat_window.messageList.itemWidget(item)
return message_widget.id
| 42.12069 | 102 | 0.693546 | [
"Apache-2.0"
] | withoutwaxaryan/MSS | mslib/msui/_tests/test_mscolab_project.py | 7,329 | Python |
#!/usr/bin/python
# this script will update the versions in packages and innosetup installer files to match that in config.h
import plistlib, os, datetime, fileinput, glob, sys, string
scriptpath = os.path.dirname(os.path.realpath(__file__))
projectpath = os.path.abspath(os.path.join(scriptpath, os.pardir))
IPLUG2_ROOT = "..\..\iPlug2"
sys.path.insert(0, os.path.join(os.getcwd(), IPLUG2_ROOT + '/Scripts'))
from parse_config import parse_config
def replacestrs(filename, s, r):
files = glob.glob(filename)
for line in fileinput.input(files,inplace=1):
string.find(line, s)
line = line.replace(s, r)
sys.stdout.write(line)
def main():
demo = 0
if len(sys.argv) != 2:
print("Usage: update_installer_version.py demo(0 or 1)")
sys.exit(1)
else:
demo=int(sys.argv[1])
config = parse_config(projectpath)
# MAC INSTALLER
print "Updating Mac Installer version info..."
plistpath = projectpath + "/installer/" + config['BUNDLE_NAME'] + ".pkgproj"
installer = plistlib.readPlist(plistpath)
# range = number of items in the installer (VST 2, VST 3, app, audiounit, aax)
for x in range(0,5):
installer['PACKAGES'][x]['PACKAGE_SETTINGS']['VERSION'] = config['FULL_VER_STR']
if demo:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME'] + " Demo"
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro-demo.rtf"
else:
installer['PROJECT']['PROJECT_PRESENTATION']['TITLE']['LOCALIZATIONS'][0]['VALUE'] = config['BUNDLE_NAME']
installer['PROJECT']['PROJECT_PRESENTATION']['INTRODUCTION']['LOCALIZATIONS'][0]['VALUE']['PATH'] = "intro.rtf"
plistlib.writePlist(installer, plistpath)
# replacestrs(plistpath, "//Apple//", "//Apple Computer//");
# WIN INSTALLER
print "Updating Windows Installer version info..."
for line in fileinput.input(projectpath + "/installer/" + config['BUNDLE_NAME'] + ".iss",inplace=1):
if "AppVersion" in line:
line="AppVersion=" + config['FULL_VER_STR'] + "\n"
if "OutputBaseFilename" in line:
if demo:
line="OutputBaseFilename=BigLittleGain Demo Installer\n"
else:
line="OutputBaseFilename=BigLittleGain Installer\n"
if 'Source: "readme' in line:
if demo:
line='Source: "readme-win-demo.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
else:
line='Source: "readme-win.rtf"; DestDir: "{app}"; DestName: "readme.rtf"; Flags: isreadme\n'
if "WelcomeLabel1" in line:
if demo:
line="WelcomeLabel1=Welcome to the BigLittleGain Demo installer\n"
else:
line="WelcomeLabel1=Welcome to the BigLittleGain installer\n"
if "SetupWindowTitle" in line:
if demo:
line="SetupWindowTitle=BigLittleGain Demo installer\n"
else:
line="SetupWindowTitle=BigLittleGain installer\n"
sys.stdout.write(line)
if __name__ == '__main__':
main()
| 33.362637 | 120 | 0.669631 | [
"MIT"
] | Bindernews/BigLittleGain | BigLittleGain/scripts/update_installer_version.py | 3,036 | Python |
#
# A general spatial method class
#
import pybamm
import numpy as np
from scipy.sparse import eye, kron, coo_matrix, csr_matrix
class SpatialMethod:
"""
A general spatial methods class, with default (trivial) behaviour for some spatial
operations.
All spatial methods will follow the general form of SpatialMethod in
that they contain a method for broadcasting variables onto a mesh,
a gradient operator, and a diverence operator.
Parameters
----------
mesh : :class: `pybamm.Mesh`
Contains all the submeshes for discretisation
"""
def __init__(self, mesh):
# add npts_for_broadcast to mesh domains for this particular discretisation
for dom in mesh.keys():
for i in range(len(mesh[dom])):
mesh[dom][i].npts_for_broadcast = mesh[dom][i].npts
self._mesh = mesh
@property
def mesh(self):
return self._mesh
def spatial_variable(self, symbol):
"""
Convert a :class:`pybamm.SpatialVariable` node to a linear algebra object that
can be evaluated (here, a :class:`pybamm.Vector` on either the nodes or the
edges).
Parameters
-----------
symbol : :class:`pybamm.SpatialVariable`
The spatial variable to be discretised.
Returns
-------
:class:`pybamm.Vector`
Contains the discretised spatial variable
"""
symbol_mesh = self.mesh.combine_submeshes(*symbol.domain)
if symbol.name.endswith("_edge"):
return pybamm.Vector(symbol_mesh[0].edges, domain=symbol.domain)
else:
return pybamm.Vector(symbol_mesh[0].nodes, domain=symbol.domain)
def broadcast(self, symbol, domain, auxiliary_domains, broadcast_type):
"""
Broadcast symbol to a specified domain.
Parameters
----------
symbol : :class:`pybamm.Symbol`
The symbol to be broadcasted
domain : iterable of strings
The domain to broadcast to
broadcast_type : str
The type of broadcast, either: 'primary' or 'full'
Returns
-------
broadcasted_symbol: class: `pybamm.Symbol`
The discretised symbol of the correct size for the spatial method
"""
primary_pts_for_broadcast = sum(
self.mesh[dom][0].npts_for_broadcast for dom in domain
)
full_pts_for_broadcast = sum(
subdom.npts_for_broadcast for dom in domain for subdom in self.mesh[dom]
)
if broadcast_type == "primary":
out = pybamm.Outer(
symbol, pybamm.Vector(np.ones(primary_pts_for_broadcast), domain=domain)
)
out.auxiliary_domains = auxiliary_domains
elif broadcast_type == "full":
out = symbol * pybamm.Vector(np.ones(full_pts_for_broadcast), domain=domain)
return out
def gradient(self, symbol, discretised_symbol, boundary_conditions):
"""
Implements the gradient for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol that we will take the gradient of.
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised gradient on
the child discretised_symbol
"""
raise NotImplementedError
def divergence(self, symbol, discretised_symbol, boundary_conditions):
"""
Implements the divergence for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol that we will take the gradient of.
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised divergence on
the child discretised_symbol
"""
raise NotImplementedError
def laplacian(self, symbol, discretised_symbol, boundary_conditions):
"""
Implements the laplacian for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol that we will take the gradient of.
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised laplacian on
the child discretised_symbol
"""
raise NotImplementedError
def gradient_squared(self, symbol, discretised_symbol, boundary_conditions):
"""
Implements the inner product of the gradient with itself for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol that we will take the gradient of.
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class: `pybamm.Array`
Contains the result of taking the inner product of the result of acting
the discretised gradient on the child discretised_symbol with itself
"""
raise NotImplementedError
def integral(self, child, discretised_child):
"""
Implements the integral for a spatial method.
Parameters
----------
child: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_child: :class:`pybamm.Symbol`
The discretised symbol of the correct size
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised integral on
the child discretised_symbol
"""
raise NotImplementedError
def indefinite_integral(self, child, discretised_child):
"""
Implements the indefinite integral for a spatial method.
Parameters
----------
child: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_child: :class:`pybamm.Symbol`
The discretised symbol of the correct size
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised indefinite integral on
the child discretised_symbol
"""
raise NotImplementedError
def boundary_integral(self, child, discretised_child, region):
"""
Implements the boundary integral for a spatial method.
Parameters
----------
child: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_child: :class:`pybamm.Symbol`
The discretised symbol of the correct size
region: str
The region of the boundary over which to integrate. If region is None
(default) the integration is carried out over the entire boundary. If
region is `negative tab` or `positive tab` then the integration is only
carried out over the appropriate part of the boundary corresponding to
the tab.
Returns
-------
:class: `pybamm.Array`
Contains the result of acting the discretised boundary integral on
the child discretised_symbol
"""
raise NotImplementedError
def delta_function(self, symbol, discretised_symbol):
"""
Implements the delta function on the approriate side for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Symbol`
The symbol to which is being integrated
discretised_symbol: :class:`pybamm.Symbol`
The discretised symbol of the correct size
"""
raise NotImplementedError
def internal_neumann_condition(
self, left_symbol_disc, right_symbol_disc, left_mesh, right_mesh
):
"""
A method to find the internal neumann conditions between two symbols
on adjacent subdomains.
Parameters
----------
left_symbol_disc : :class:`pybamm.Symbol`
The discretised symbol on the left subdomain
right_symbol_disc : :class:`pybamm.Symbol`
The discretised symbol on the right subdomain
left_mesh : list
The mesh on the left subdomain
right_mesh : list
The mesh on the right subdomain
"""
raise NotImplementedError
def boundary_value_or_flux(self, symbol, discretised_child):
"""
Returns the boundary value or flux using the approriate expression for the
spatial method. To do this, we create a sparse vector 'bv_vector' that extracts
either the first (for side="left") or last (for side="right") point from
'discretised_child'.
Parameters
-----------
symbol: :class:`pybamm.Symbol`
The boundary value or flux symbol
discretised_child : :class:`pybamm.StateVector`
The discretised variable from which to calculate the boundary value
Returns
-------
:class:`pybamm.MatrixMultiplication`
The variable representing the surface value.
"""
if any(len(self.mesh[dom]) > 1 for dom in discretised_child.domain):
raise NotImplementedError("Cannot process 2D symbol in base spatial method")
if isinstance(symbol, pybamm.BoundaryGradient):
raise TypeError("Cannot process BoundaryGradient in base spatial method")
n = sum(self.mesh[dom][0].npts for dom in discretised_child.domain)
if symbol.side == "left":
# coo_matrix takes inputs (data, (row, col)) and puts data[i] at the point
# (row[i], col[i]) for each index of data. Here we just want a single point
# with value 1 at (0,0).
# Convert to a csr_matrix to allow indexing and other functionality
left_vector = csr_matrix(coo_matrix(([1], ([0], [0])), shape=(1, n)))
bv_vector = pybamm.Matrix(left_vector)
elif symbol.side == "right":
# as above, but now we want a single point with value 1 at (0, n-1)
right_vector = csr_matrix(coo_matrix(([1], ([0], [n - 1])), shape=(1, n)))
bv_vector = pybamm.Matrix(right_vector)
out = bv_vector @ discretised_child
# boundary value removes domain
out.domain = []
return out
def mass_matrix(self, symbol, boundary_conditions):
"""
Calculates the mass matrix for a spatial method.
Parameters
----------
symbol: :class:`pybamm.Variable`
The variable corresponding to the equation for which we are
calculating the mass matrix.
boundary_conditions : dict
The boundary conditions of the model
({symbol.id: {"left": left bc, "right": right bc}})
Returns
-------
:class:`pybamm.Matrix`
The (sparse) mass matrix for the spatial method.
"""
# NOTE: for different spatial methods the matrix may need to be adjusted
# to account for Dirichlet boundary conditions. Here, we just have the default
# behaviour that the mass matrix is the identity.
# Create appropriate submesh by combining submeshes in domain
submesh = self.mesh.combine_submeshes(*symbol.domain)
# Get number of points in primary dimension
n = submesh[0].npts
# Create mass matrix for primary dimension
prim_mass = eye(n)
# Get number of points in secondary dimension
sec_pts = len(submesh)
# Convert to csr_matrix as required by some solvers
mass = csr_matrix(kron(eye(sec_pts), prim_mass))
return pybamm.Matrix(mass)
def process_binary_operators(self, bin_op, left, right, disc_left, disc_right):
"""Discretise binary operators in model equations. Default behaviour is to
return a new binary operator with the discretised children.
Parameters
----------
bin_op : :class:`pybamm.BinaryOperator`
Binary operator to discretise
left : :class:`pybamm.Symbol`
The left child of `bin_op`
right : :class:`pybamm.Symbol`
The right child of `bin_op`
disc_left : :class:`pybamm.Symbol`
The discretised left child of `bin_op`
disc_right : :class:`pybamm.Symbol`
The discretised right child of `bin_op`
Returns
-------
:class:`pybamm.BinaryOperator`
Discretised binary operator
"""
return bin_op.__class__(disc_left, disc_right)
def concatenation(self, disc_children):
"""Discrete concatenation object.
Parameters
----------
disc_children : list
List of discretised children
Returns
-------
:class:`pybamm.DomainConcatenation`
Concatenation of the discretised children
"""
return pybamm.DomainConcatenation(disc_children, self.mesh)
| 35.042607 | 88 | 0.609355 | [
"BSD-3-Clause"
] | jedgedrudd/PyBaMM | pybamm/spatial_methods/spatial_method.py | 13,982 | Python |
import os
import numpy as np
def save_gif(gif_fname, images, fps):
"""
To generate a gif from image files, first generate palette from images
and then generate the gif from the images and the palette.
ffmpeg -i input_%02d.jpg -vf palettegen -y palette.png
ffmpeg -i input_%02d.jpg -i palette.png -lavfi paletteuse -y output.gif
Alternatively, use a filter to map the input images to both the palette
and gif commands, while also passing the palette to the gif command.
ffmpeg -i input_%02d.jpg -filter_complex "[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse" -y output.gif
To directly pass in numpy images, use rawvideo format and `-i -` option.
"""
from subprocess import Popen, PIPE
head, tail = os.path.split(gif_fname)
if head and not os.path.exists(head):
os.makedirs(head)
h, w, c = images[0].shape
cmd = ['ffmpeg', '-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-r', '%.02f' % fps,
'-s', '%dx%d' % (w, h),
'-pix_fmt', {1: 'gray', 3: 'rgb24', 4: 'rgba'}[c],
'-i', '-',
'-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse',
'-r', '%.02f' % fps,
'%s' % gif_fname]
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc
def encode_gif(images, fps):
from subprocess import Popen, PIPE
h, w, c = images[0].shape
cmd = ['ffmpeg', '-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-r', '%.02f' % fps,
'-s', '%dx%d' % (w, h),
'-pix_fmt', {1: 'gray', 3: 'rgb24', 4: 'rgba'}[c],
'-i', '-',
'-filter_complex', '[0:v]split[x][z];[z]palettegen[y];[x][y]paletteuse',
'-r', '%.02f' % fps,
'-f', 'gif',
'-']
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in images:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
err = '\n'.join([' '.join(cmd), err.decode('utf8')])
raise IOError(err)
del proc
return out
def main():
images_shape = (12, 64, 64, 3) # num_frames, height, width, channels
images = np.random.randint(256, size=images_shape).astype(np.uint8)
save_gif('output_save.gif', images, 4)
with open('output_save.gif', 'rb') as f:
string_save = f.read()
string_encode = encode_gif(images, 4)
with open('output_encode.gif', 'wb') as f:
f.write(string_encode)
print(np.all(string_save == string_encode))
if __name__ == '__main__':
main()
| 33.023256 | 111 | 0.565493 | [
"MIT"
] | Bonennult/video_prediction | video_prediction/utils/ffmpeg_gif.py | 2,840 | Python |
# -*- coding: utf-8 -*-
import datetime
import json
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
class DBEngine(object):
def __init__(self, db_uri):
"""
db_uri = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database}?charset=utf8mb4'
"""
engine = create_engine(db_uri)
self.session = sessionmaker(bind=engine)()
@staticmethod
def value_decode(row: dict):
"""
Try to decode value of table
datetime.datetime-->string
datetime.date-->string
json str-->dict
:param row:
:return:
"""
for k, v in row.items():
if isinstance(v, datetime.datetime):
row[k] = v.strftime("%Y-%m-%d %H:%M:%S")
elif isinstance(v, datetime.date):
row[k] = v.strftime("%Y-%m-%d")
elif isinstance(v, str):
try:
row[k] = json.loads(v)
except ValueError:
pass
def _fetch(self, query, size=-1, commit=True):
result = self.session.execute(query)
self.session.commit() if commit else 0
if query.upper()[:6] == "SELECT":
if size < 0:
al = result.fetchall()
al = [dict(el) for el in al]
return al or None
elif size == 1:
on = dict(result.fetchone())
self.value_decode(on)
return on or None
else:
mny = result.fetchmany(size)
mny = [dict(el) for el in mny]
return mny or None
elif query.upper()[:6] in ("UPDATE", "DELETE", "INSERT"):
return {"rowcount": result.rowcount}
def fetchone(self, query, commit=True):
return self._fetch(query, size=1, commit=commit)
def fetchmany(self, query, size, commit=True):
return self._fetch(query=query, size=size, commit=commit)
def fetchall(self, query, commit=True):
return self._fetch(query=query, size=-1, commit=commit)
def insert(self, query, commit=True):
return self._fetch(query=query, commit=commit)
def delete(self, query, commit=True):
return self._fetch(query=query, commit=commit)
def update(self, query, commit=True):
return self._fetch(query=query, commit=commit)
if __name__ == "__main__":
db = DBEngine(f"mysql+pymysql://xxxxx:[email protected]:3306/dbname?charset=utf8mb4")
| 31.772152 | 98 | 0.556574 | [
"Apache-2.0"
] | AlanFightting/httprunner | httprunner/database/engine.py | 2,510 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import sys
class Binutils(AutotoolsPackage, GNUMirrorPackage):
"""GNU binutils, which contain the linker, assembler, objdump and others"""
homepage = "http://www.gnu.org/software/binutils/"
gnu_mirror_path = "binutils/binutils-2.28.tar.bz2"
version('2.34', sha256='89f010078b6cf69c23c27897d686055ab89b198dddf819efb0a4f2c38a0b36e6')
version('2.33.1', sha256='0cb4843da15a65a953907c96bad658283f3c4419d6bcc56bf2789db16306adb2')
version('2.32', sha256='de38b15c902eb2725eac6af21183a5f34ea4634cb0bcef19612b50e5ed31072d')
version('2.31.1', sha256='ffcc382695bf947da6135e7436b8ed52d991cf270db897190f19d6f9838564d0')
version('2.29.1', sha256='1509dff41369fb70aed23682351b663b56db894034773e6dbf7d5d6071fc55cc')
version('2.28', sha256='6297433ee120b11b4b0a1c8f3512d7d73501753142ab9e2daa13c5a3edd32a72')
version('2.27', sha256='369737ce51587f92466041a97ab7d2358c6d9e1b6490b3940eb09fb0a9a6ac88')
version('2.26', sha256='c2ace41809542f5237afc7e3b8f32bb92bc7bc53c6232a84463c423b0714ecd9')
version('2.25.1', sha256='b5b14added7d78a8d1ca70b5cb75fef57ce2197264f4f5835326b0df22ac9f22')
version('2.25', sha256='22defc65cfa3ef2a3395faaea75d6331c6e62ea5dfacfed3e2ec17b08c882923')
version('2.24', sha256='e5e8c5be9664e7f7f96e0d09919110ab5ad597794f5b1809871177a0f0f14137')
version('2.23.2', sha256='fe914e56fed7a9ec2eb45274b1f2e14b0d8b4f41906a5194eac6883cfe5c1097')
version('2.20.1', sha256='71d37c96451333c5c0b84b170169fdcb138bbb27397dc06281905d9717c8ed64')
variant('plugins', default=False,
description="enable plugins, needed for gold linker")
variant('gold', default=(sys.platform != 'darwin'),
description="build the gold linker")
variant('libiberty', default=False, description='Also install libiberty.')
variant('nls', default=True, description='Enable Native Language Support')
variant('headers', default=False, description='Install extra headers (e.g. ELF)')
variant('lto', default=False, description='Enable lto.')
variant('ld', default=False, description='Enable ld.')
variant('interwork', default=False, description='Enable interwork.')
patch('cr16.patch', when='@:2.29.1')
patch('update_symbol-2.26.patch', when='@2.26')
depends_on('zlib')
depends_on('gettext', when='+nls')
# Prior to 2.30, gold did not distribute the generated files and
# thus needs bison, even for a one-time build.
depends_on('m4', type='build', when='@:2.29.99 +gold')
depends_on('bison', type='build', when='@:2.29.99 +gold')
# 2.34 needs makeinfo due to a bug, see:
# https://sourceware.org/bugzilla/show_bug.cgi?id=25491
depends_on('texinfo', type='build', when='@2.34')
conflicts('+gold', when='platform=darwin',
msg="Binutils cannot build linkers on macOS")
def configure_args(self):
spec = self.spec
configure_args = [
'--disable-dependency-tracking',
'--disable-werror',
'--enable-multilib',
'--enable-shared',
'--enable-64-bit-bfd',
'--enable-targets=all',
'--with-system-zlib',
'--with-sysroot=/',
]
if '+lto' in spec:
configure_args.append('--enable-lto')
if '+ld' in spec:
configure_args.append('--enable-ld')
if '+interwork' in spec:
configure_args.append('--enable-interwork')
if '+gold' in spec:
configure_args.append('--enable-gold')
if '+plugins' in spec:
configure_args.append('--enable-plugins')
if '+libiberty' in spec:
configure_args.append('--enable-install-libiberty')
if '+nls' in spec:
configure_args.append('--enable-nls')
configure_args.append('LDFLAGS=-lintl')
else:
configure_args.append('--disable-nls')
# To avoid namespace collisions with Darwin/BSD system tools,
# prefix executables with "g", e.g., gar, gnm; see Homebrew
# https://github.com/Homebrew/homebrew-core/blob/master/Formula/binutils.rb
if spec.satisfies('platform=darwin'):
configure_args.append('--program-prefix=g')
return configure_args
@run_after('install')
def install_headers(self):
# some packages (like TAU) need the ELF headers, so install them
# as a subdirectory in include/extras
if '+headers' in self.spec:
extradir = join_path(self.prefix.include, 'extra')
mkdirp(extradir)
# grab the full binutils set of headers
install_tree('include', extradir)
# also grab the headers from the bfd directory
for current_file in glob.glob(join_path(self.build_directory,
'bfd', '*.h')):
install(current_file, extradir)
def flag_handler(self, name, flags):
# To ignore the errors of narrowing conversions for
# the Fujitsu compiler
if name == 'cxxflags'\
and (self.compiler.name == 'fj' or self.compiler.name == 'clang')\
and self.version <= ver('2.31.1'):
flags.append('-Wno-narrowing')
elif name == 'cflags':
if self.spec.satisfies('@:2.34 %gcc@10:'):
flags.append('-fcommon')
return (flags, None, None)
| 42.946565 | 96 | 0.658194 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 0t1s1/spack | var/spack/repos/builtin/packages/binutils/package.py | 5,626 | Python |
#
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_vpnnexthopserver_binding(base_resource) :
""" Binding class showing the vpnnexthopserver that can be bound to vpnvserver.
"""
def __init__(self) :
self._nexthopserver = None
self._acttype = None
self._name = None
self.___count = None
@property
def name(self) :
r"""Name of the virtual server.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the virtual server.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def nexthopserver(self) :
r"""The name of the next hop server bound to the VPN virtual server.
"""
try :
return self._nexthopserver
except Exception as e:
raise e
@nexthopserver.setter
def nexthopserver(self, nexthopserver) :
r"""The name of the next hop server bound to the VPN virtual server.
"""
try :
self._nexthopserver = nexthopserver
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_vpnnexthopserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_vpnnexthopserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = vpnvserver_vpnnexthopserver_binding()
addresource.name = resource.name
addresource.nexthopserver = resource.nexthopserver
return addresource
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = cls.filter_add_parameters(resource)
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnvserver_vpnnexthopserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_add_parameters(resource[i])
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = vpnvserver_vpnnexthopserver_binding()
deleteresource.name = resource.name
deleteresource.nexthopserver = resource.nexthopserver
return deleteresource
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnvserver_vpnnexthopserver_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource[i])
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch vpnvserver_vpnnexthopserver_binding resources.
"""
try :
if not name :
obj = vpnvserver_vpnnexthopserver_binding()
response = obj.get_resources(service, option_)
else :
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of vpnvserver_vpnnexthopserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count vpnvserver_vpnnexthopserver_binding resources configued on NetScaler.
"""
try :
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of vpnvserver_vpnnexthopserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_vpnnexthopserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
ICA_REQUEST = "ICA_REQUEST"
OTHERTCP_REQUEST = "OTHERTCP_REQUEST"
AAA_REQUEST = "AAA_REQUEST"
AAA_RESPONSE = "AAA_RESPONSE"
class vpnvserver_vpnnexthopserver_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver_vpnnexthopserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver_vpnnexthopserver_binding = [vpnvserver_vpnnexthopserver_binding() for _ in range(length)]
| 30.656904 | 137 | 0.733179 | [
"Apache-2.0"
] | guardicore/nitro-python | build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnvserver_vpnnexthopserver_binding.py | 7,327 | Python |
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWebEngineWidgets import *
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setWindowTitle('加载外部网页的例子')
self.setGeometry(5,30,1355,730)
self.browser=QWebEngineView()
#加载外部的web界面
self.browser.load(QUrl('https://blog.csdn.net/jia666666'))
self.setCentralWidget(self.browser)
if __name__ == '__main__':
app=QApplication(sys.argv)
win=MainWindow()
win.show()
app.exit(app.exec_())
| 28.666667 | 66 | 0.684385 | [
"MIT"
] | Skylark0924/Vocabulary_Analysis | ui/textUI.py | 634 | Python |
import csv
import json
import random
import re
import socket
import string
import tempfile
from base64 import b64decode, b64encode
from pathlib import Path
import ipaddress
from subprocess import check_output, CalledProcessError, TimeoutExpired
from yaml import safe_load
from charmhelpers.core import hookenv
from charmhelpers.core.templating import render
from charmhelpers.core import unitdata
from charmhelpers.fetch import apt_install
from charms.reactive import endpoint_from_flag, is_flag_set
from charms.layer import kubernetes_common
AUTH_BACKUP_EXT = 'pre-secrets'
AUTH_BASIC_FILE = '/root/cdk/basic_auth.csv'
AUTH_SECRET_NS = 'kube-system'
AUTH_SECRET_TYPE = 'juju.is/token-auth'
AUTH_TOKENS_FILE = '/root/cdk/known_tokens.csv'
STANDARD_API_PORT = 6443
CEPH_CONF_DIR = Path('/etc/ceph')
CEPH_CONF = CEPH_CONF_DIR / 'ceph.conf'
CEPH_KEYRING = CEPH_CONF_DIR / 'ceph.client.admin.keyring'
db = unitdata.kv()
def get_external_lb_endpoints():
"""
Return a list of any external API load-balancer endpoints that have
been manually configured.
"""
ha_connected = is_flag_set('ha.connected')
forced_lb_ips = hookenv.config('loadbalancer-ips').split()
vips = hookenv.config('ha-cluster-vip').split()
dns_record = hookenv.config('ha-cluster-dns')
if forced_lb_ips:
# if the user gave us IPs for the load balancer, assume
# they know what they are talking about and use that
# instead of our information.
return [(address, STANDARD_API_PORT) for address in forced_lb_ips]
elif ha_connected and vips:
return [(vip, STANDARD_API_PORT) for vip in vips]
elif ha_connected and dns_record:
return [(dns_record, STANDARD_API_PORT)]
else:
return []
def get_lb_endpoints():
"""
Return all load-balancer endpoints, whether from manual config or via
relation.
"""
external_lb_endpoints = get_external_lb_endpoints()
loadbalancer = endpoint_from_flag('loadbalancer.available')
if external_lb_endpoints:
return external_lb_endpoints
elif loadbalancer:
lb_addresses = loadbalancer.get_addresses_ports()
return [(host.get('public-address'), host.get('port'))
for host in lb_addresses]
else:
return []
def get_api_endpoint(relation=None):
"""
Determine the best endpoint for a client to connect to.
If a relation is given, it will take that into account when choosing an
endpoint.
"""
endpoints = get_lb_endpoints()
if endpoints:
# select a single endpoint based on our local unit number
return endpoints[kubernetes_common.get_unit_number() % len(endpoints)]
elif relation:
ingress_address = hookenv.ingress_address(relation.relation_id,
hookenv.local_unit())
return (ingress_address, STANDARD_API_PORT)
else:
return (hookenv.unit_public_ip(), STANDARD_API_PORT)
def install_ceph_common():
"""Install ceph-common tools.
:return: None
"""
ceph_admin = endpoint_from_flag('ceph-storage.available')
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': 'true',
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
CEPH_CONF_DIR.mkdir(exist_ok=True, parents=True)
# Render the ceph configuration from the ceph conf template.
render('ceph.conf', str(CEPH_CONF), ceph_context)
# The key can rotate independently of other ceph config, so validate it.
try:
with open(str(CEPH_KEYRING), 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
def query_cephfs_enabled():
install_ceph_common()
try:
out = check_output(['ceph', 'mds', 'versions',
'-c', str(CEPH_CONF)], timeout=60)
return bool(json.loads(out.decode()))
except CalledProcessError:
hookenv.log('Unable to determine if CephFS is enabled', 'ERROR')
return False
except TimeoutExpired:
hookenv.log('Timeout attempting to determine if CephFS is enabled', "ERROR")
return False
def get_cephfs_fsname():
install_ceph_common()
try:
data = json.loads(check_output(['ceph', 'fs', 'ls', '-f', 'json'], timeout=60))
except TimeoutExpired:
hookenv.log('Timeout attempting to determine fsname', "ERROR")
return None
for fs in data:
if 'ceph-fs_data' in fs['data_pools']:
return fs['name']
def deprecate_auth_file(auth_file):
"""
In 1.19+, file-based authentication was deprecated in favor of webhook
auth. Write out generic files that inform the user of this.
"""
csv_file = Path(auth_file)
csv_file.parent.mkdir(exist_ok=True)
csv_backup = Path('{}.{}'.format(csv_file, AUTH_BACKUP_EXT))
if csv_file.exists() and not csv_backup.exists():
csv_file.rename(csv_backup)
with csv_file.open('w') as f:
f.write('# File-based authentication was removed in Charmed Kubernetes 1.19\n')
def migrate_auth_file(filename):
'''Create secrets or known tokens depending on what file is being migrated.'''
with open(str(filename), 'r') as f:
rows = list(csv.reader(f))
for row in rows:
try:
if row[0].startswith('#'):
continue
else:
if filename == AUTH_BASIC_FILE:
create_known_token(*row)
elif filename == AUTH_TOKENS_FILE:
create_secret(*row)
else:
# log and return if we don't recognize the auth file
hookenv.log('Unknown auth file: {}'.format(filename))
return False
except IndexError:
pass
deprecate_auth_file(filename)
return True
def generate_rfc1123(length=10):
'''Generate a random string compliant with RFC 1123.
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names
param: length - the length of the string to generate
'''
length = 253 if length > 253 else length
first_last_opts = string.ascii_lowercase + string.digits
middle_opts = first_last_opts + '-' + '.'
# ensure first and last chars are alphanum
length -= 2
rand_str = (
random.SystemRandom().choice(first_last_opts) +
''.join(random.SystemRandom().choice(middle_opts) for _ in range(length)) +
random.SystemRandom().choice(first_last_opts)
)
return rand_str
def token_generator(length=32):
'''Generate a random token for use in account tokens.
param: length - the length of the token to generate
'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
def create_known_token(token, username, user, groups=None):
known_tokens = Path(AUTH_TOKENS_FILE)
known_tokens.parent.mkdir(exist_ok=True)
csv_fields = ['token', 'username', 'user', 'groups']
try:
with known_tokens.open('r') as f:
tokens_by_user = {r['user']: r for r in csv.DictReader(f, csv_fields)}
except FileNotFoundError:
tokens_by_user = {}
tokens_by_username = {r['username']: r for r in tokens_by_user.values()}
if user in tokens_by_user:
record = tokens_by_user[user]
elif username in tokens_by_username:
record = tokens_by_username[username]
else:
record = tokens_by_user[user] = {}
record.update({
'token': token,
'username': username,
'user': user,
'groups': groups,
})
if not record['groups']:
del record['groups']
with known_tokens.open('w') as f:
csv.DictWriter(f, csv_fields, lineterminator='\n').writerows(
tokens_by_user.values())
def create_secret(token, username, user, groups=None):
# secret IDs must be unique and rfc1123 compliant
sani_name = re.sub('[^0-9a-z.-]+', '-', user.lower())
secret_id = 'auth-{}-{}'.format(sani_name, generate_rfc1123(10))
# The authenticator expects tokens to be in the form user::token
token_delim = '::'
if token_delim not in token:
token = '{}::{}'.format(user, token)
context = {
'type': AUTH_SECRET_TYPE,
'secret_name': secret_id,
'secret_namespace': AUTH_SECRET_NS,
'user': b64encode(user.encode('UTF-8')).decode('utf-8'),
'username': b64encode(username.encode('UTF-8')).decode('utf-8'),
'password': b64encode(token.encode('UTF-8')).decode('utf-8'),
'groups': b64encode(groups.encode('UTF-8')).decode('utf-8') if groups else ''
}
with tempfile.NamedTemporaryFile() as tmp_manifest:
render(
'cdk.master.auth-webhook-secret.yaml', tmp_manifest.name, context=context)
if kubernetes_common.kubectl_manifest('apply', tmp_manifest.name):
hookenv.log("Created secret for {}".format(username))
return True
else:
hookenv.log("WARN: Unable to create secret for {}".format(username))
return False
def delete_secret(secret_id):
'''Delete a given secret id.'''
# If this fails, it's most likely because we're trying to delete a secret
# that doesn't exist. Let the caller decide if failure is a problem.
return kubernetes_common.kubectl_success(
'delete', 'secret', '-n', AUTH_SECRET_NS, secret_id)
def get_csv_password(csv_fname, user):
"""Get the password for the given user within the csv file provided."""
root_cdk = '/root/cdk'
tokens_fname = Path(root_cdk) / csv_fname
if not tokens_fname.is_file():
return None
with tokens_fname.open('r') as stream:
for line in stream:
record = line.split(',')
try:
if record[1] == user:
return record[0]
except IndexError:
# probably a blank line or comment; move on
continue
return None
def get_secret_password(username):
"""Get the password for the given user from the secret that CK created."""
try:
output = kubernetes_common.kubectl(
'get', 'secrets', '-n', AUTH_SECRET_NS,
'--field-selector', 'type={}'.format(AUTH_SECRET_TYPE),
'-o', 'json').decode('UTF-8')
except CalledProcessError:
# NB: apiserver probably isn't up. This can happen on boostrap or upgrade
# while trying to build kubeconfig files. If we need the 'admin' token during
# this time, pull it directly out of the kubeconfig file if possible.
token = None
if username == 'admin':
admin_kubeconfig = Path('/root/.kube/config')
if admin_kubeconfig.exists():
with admin_kubeconfig.open('r') as f:
data = safe_load(f)
try:
token = data['users'][0]['user']['token']
except (KeyError, ValueError):
pass
return token
except FileNotFoundError:
# New deployments may ask for a token before the kubectl snap is installed.
# Give them nothing!
return None
secrets = json.loads(output)
if 'items' in secrets:
for secret in secrets['items']:
try:
data_b64 = secret['data']
password_b64 = data_b64['password'].encode('UTF-8')
username_b64 = data_b64['username'].encode('UTF-8')
except (KeyError, TypeError):
# CK authn secrets will have populated 'data', but not all secrets do
continue
password = b64decode(password_b64).decode('UTF-8')
secret_user = b64decode(username_b64).decode('UTF-8')
if username == secret_user:
return password
return None
try:
ipaddress.IPv4Network.subnet_of
except AttributeError:
# Returns True if a is subnet of b
# This method is copied from cpython as it is available only from
# python 3.7
# https://github.com/python/cpython/blob/3.7/Lib/ipaddress.py#L1000
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError("{} and {} are not of the same version".format(
a, b))
return (b.network_address <= a.network_address and
b.broadcast_address >= a.broadcast_address)
except AttributeError:
raise TypeError("Unable to test subnet containment "
"between {} and {}".format(a, b))
ipaddress.IPv4Network.subnet_of = _is_subnet_of
ipaddress.IPv6Network.subnet_of = _is_subnet_of
def is_service_cidr_expansion():
service_cidr_from_db = db.get('kubernetes-master.service-cidr')
service_cidr_from_config = hookenv.config('service-cidr')
if not service_cidr_from_db:
return False
# Do not consider as expansion if both old and new service cidr are same
if service_cidr_from_db == service_cidr_from_config:
return False
current_networks = kubernetes_common.get_networks(service_cidr_from_db)
new_networks = kubernetes_common.get_networks(service_cidr_from_config)
if len(current_networks) != len(new_networks) or \
not all(cur.subnet_of(new) for cur, new in zip(current_networks,
new_networks)):
hookenv.log("WARN: New k8s service cidr not superset of old one")
return False
return True
def service_cidr():
''' Return the charm's service-cidr config'''
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
frozen_service_cidr = db.get('kubernetes-master.service-cidr')
if not frozen_service_cidr or is_service_cidr_expansion():
db.set('kubernetes-master.service-cidr', hookenv.config(
'service-cidr'))
def get_preferred_service_network(service_cidrs):
'''Get the network preferred for cluster service, preferring IPv4'''
net_ipv4 = kubernetes_common.get_ipv4_network(service_cidrs)
net_ipv6 = kubernetes_common.get_ipv6_network(service_cidrs)
return net_ipv4 or net_ipv6
def get_dns_ip():
return kubernetes_common.get_service_ip('kube-dns',
namespace='kube-system')
def get_kubernetes_service_ips():
'''Get the IP address(es) for the kubernetes service based on the cidr.'''
return [next(network.hosts()).exploded
for network in kubernetes_common.get_networks(service_cidr())]
def get_snap_revs(snaps):
'''Get a dict of snap revisions for a given list of snaps.'''
channel = hookenv.config('channel')
rev_info = {}
for s in sorted(snaps):
try:
# valid info should looke like:
# ...
# channels:
# latest/stable: 1.18.8 2020-08-27 (1595) 22MB classic
# latest/candidate: 1.18.8 2020-08-27 (1595) 22MB classic
# ...
info = check_output(['snap', 'info', s]).decode('utf8', errors='ignore')
except CalledProcessError:
# If 'snap info' fails for whatever reason, just empty the info
info = ''
snap_rev = None
yaml_data = safe_load(info)
if yaml_data and 'channels' in yaml_data:
try:
# valid data should look like:
# ['1.18.8', '2020-08-27', '(1604)', '21MB', 'classic']
d = yaml_data['channels'][channel].split()
snap_rev = d[2].strip("()")
except (KeyError, IndexError):
hookenv.log('Could not determine revision for snap: {}'.format(s),
level=hookenv.WARNING)
rev_info[s] = snap_rev
return rev_info
| 35.80303 | 96 | 0.63394 | [
"ECL-2.0",
"Apache-2.0"
] | hemanthnakkina/charm-kubernetes-master | lib/charms/layer/kubernetes_master.py | 16,541 | Python |
# -*- coding:utf-8 -*-
"""
some constants and global queues
Author: QiaoXiaofeng
Date: 2020/01/11
Email: [email protected]
"""
from collections import deque
# Version
VERSION = "1.1.3_201123_alpha"
# Exchange Names
HUOBI_SWAP = "huobi_swap" # Huobi Swap https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/
HUOBI_FUTURE = "huobi_future" # Huobi Future https://huobiapi.github.io/docs/dm/v1/cn/#5ea2e0cde2
HUOBI_OPTION = "huobi_option" # Huobi Option
HUOBI_USDT_SWAP = "huobi_usdt_swap" # Huobi Usdt Swap
# Market Types
MARKET_TYPE_TRADE = "trade"
MARKET_TYPE_ORDERBOOK = "orderbook"
MARKET_TYPE_KLINE = "kline"
# REQUEST AGENT
USER_AGENT = "AlphaQuant" + VERSION
| 23.655172 | 98 | 0.749271 | [
"MIT"
] | Jscorpio1611/huobi_futures_Python | alpha/const.py | 686 | Python |
#!/usr/bin/python
import requests
import json
import cv2
import time
import logging
import random
ledServerURL = 'http://192.168.1.51'
nbPixels = 30
def getServerPath(url):
return "%s%s" % (ledServerURL, url)
def getImage():
retval, im = camera.read()
return im
def getWarmUpImage():
# Warmup
for i in range(5):
temp = getImage()
# Save result
#file = "/notebooks/test-capture-hot-point/image%d.png" % (i)
#cv2.imwrite(file, temp)
return getImage()
def rgb2hex(r, g, b):
return '0x{:02x}{:02x}{:02x}'.format(r, g, b)
# logging
logger = logging.getLogger('esp8266')
stderr_log_handler = logging.StreamHandler()
logger.addHandler(stderr_log_handler)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
stderr_log_handler.setFormatter(formatter)
logger.setLevel('INFO')
# Open Camera
camera = cv2.VideoCapture(0)
# Set definition
camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1024)
time.sleep(2)
# camera.set(15, -8.0)
# Image to update
base_image = getWarmUpImage()
r = requests.get(getServerPath('/status'))
r = requests.post(getServerPath('/settings?animation-mode=paint'))
colormap = [(0, 0)]*nbPixels
for i in range(0,nbPixels):
# time.sleep(3)
print("Setting pixel... %d" % (i))
r = requests.post(getServerPath('/pixels/reset'), data = {})
pixels = [{ 'index':i, 'color': rgb2hex(255, 255, 255) }]
payload = { 'pixels': pixels }
# logger.info("Pixels %s" % (json.dumps(payload)))
r = requests.post(getServerPath('/pixels/set'), data = json.dumps(payload) )
print("Capturing image... %d" % (i))
capture = getWarmUpImage()
# Convert and process
gray = cv2.cvtColor(capture, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (25,25),0)
# Find spot
(minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(gray)
# Materialize spot
# cv2.circle(base_image,(maxLoc),2,(0,255,0),-1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(base_image, "%d" % (i), (maxLoc), font, 0.5, (255,0,0), 1, cv2.LINE_AA)
# Record pixel
colormap[i] = (maxLoc)
#file = "/notebooks/image_%d.png" % (i)
file = "/notebooks/image_tmp.png"
cv2.imwrite(file, gray)
#time.sleep(0.3)
# Save result
file = "/notebooks/image.png"
cv2.imwrite(file, base_image)
# Cleanup
del(camera)
| 24.159574 | 85 | 0.690004 | [
"MIT"
] | kalemena/no-hell-lights | src/opencv-match-pixels.py | 2,271 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class WindowDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def _structuredDataset(self, structure, shape, dtype):
if structure is None:
return dataset_ops.Dataset.from_tensors(
array_ops.zeros(shape, dtype=dtype))
else:
return dataset_ops.Dataset.zip(
tuple([
self._structuredDataset(substructure, shape, dtype)
for substructure in structure
]))
def _structuredElement(self, structure, shape, dtype):
if structure is None:
return array_ops.zeros(shape, dtype=dtype)
else:
return tuple([
self._structuredElement(substructure, shape, dtype)
for substructure in structure
])
def _assertEqual(self, xs, ys):
self.assertEqual(type(xs), type(ys))
if isinstance(xs, tuple) and isinstance(ys, tuple):
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self._assertEqual(x, y)
elif isinstance(xs, np.ndarray) and isinstance(ys, np.ndarray):
self.assertAllEqual(xs, ys)
else:
self.assertEqual(xs, ys)
@parameterized.named_parameters(
("1", None, np.int32([]), dtypes.bool),
("2", None, np.int32([]), dtypes.int32),
("3", None, np.int32([]), dtypes.float32),
("4", None, np.int32([]), dtypes.string),
("5", None, np.int32([2]), dtypes.int32),
("6", None, np.int32([2, 2]), dtypes.int32),
("7", (None, None, None), np.int32([]), dtypes.int32),
("8", (None, (None, None)), np.int32([]), dtypes.int32),
)
def testWindowDatasetFlatMap(self, structure, shape, dtype):
"""Tests windowing by chaining it with flat map.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type
"""
def fn(*args):
if len(args) == 1 and not isinstance(args[0], tuple):
return args[0]
return dataset_ops.Dataset.zip(
tuple([fn(*arg) if isinstance(arg, tuple) else arg for arg in args]))
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(
grouping.window_dataset(5)).flat_map(fn)
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(self._structuredElement(structure, shape, dtype))
for _ in range(5):
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", None, np.int32([]), dtypes.bool),
("2", None, np.int32([]), dtypes.int32),
("3", None, np.int32([]), dtypes.float32),
("4", None, np.int32([]), dtypes.string),
("5", None, np.int32([2]), dtypes.int32),
("6", None, np.int32([2, 2]), dtypes.int32),
("7", (None, None, None), np.int32([]), dtypes.int32),
("8", (None, (None, None)), np.int32([]), dtypes.int32),
)
def testWindowDatasetBatchDense(self, structure, shape, dtype):
"""Tests batching of dense tensor windows.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type
"""
def fn(*args):
if len(args) == 1 and not isinstance(args[0], tuple):
return batching.batch_window(args[0])
return tuple([
fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)
for arg in args
])
dataset = self._structuredDataset(structure, shape, dtype).repeat(5).apply(
grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(
self._structuredElement(structure, np.concatenate(
([5], shape), axis=0), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int32([])),
("2", np.int32([1])),
("3", np.int32([1, 2, 3])),
)
def testWindowDatasetBatchDenseDynamicShape(self, shape):
"""Tests batching of dynamically shaped dense tensor windows.
Args:
shape: the input shape
"""
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(
array_ops.zeros(shape_t)).repeat(5).apply(
grouping.window_dataset(5)).apply(
grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(
self._structuredElement(None, np.concatenate(([5], shape), axis=0),
dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
def _make_dense_to_sparse_fn(self, is_scalar):
def dense_to_sparse_scalar(tensor):
indices = [[]]
values = array_ops.expand_dims(tensor, 0)
shape = []
return sparse_tensor.SparseTensorValue(indices, values, shape)
def dense_to_sparse_non_scalar(tensor):
indices = array_ops.where(array_ops.ones_like(tensor, dtype=dtypes.bool))
values = array_ops.gather_nd(tensor, indices)
shape = array_ops.shape(tensor, out_type=dtypes.int64)
return sparse_tensor.SparseTensorValue(indices, values, shape)
if is_scalar:
return dense_to_sparse_scalar
return dense_to_sparse_non_scalar
def _structuredSparseDataset(self, structure, shape, dtype):
dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
if structure is None:
return dataset_ops.Dataset.from_tensors(
dense_to_sparse(array_ops.zeros(shape, dtype=dtype)))
else:
return dataset_ops.Dataset.zip(
tuple([
self._structuredSparseDataset(substructure, shape, dtype)
for substructure in structure
]))
def _structuredSparseElement(self, structure, shape, dtype):
dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
if structure is None:
return dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
else:
return tuple([
self._structuredSparseElement(substructure, shape, dtype)
for substructure in structure
])
@parameterized.named_parameters(
("1", None, np.int32([]), dtypes.bool),
("2", None, np.int32([]), dtypes.int32),
("3", None, np.int32([]), dtypes.float32),
("4", None, np.int32([]), dtypes.string),
("5", None, np.int32([2]), dtypes.int32),
("6", None, np.int32([2, 2]), dtypes.int32),
("7", (None, None, None), np.int32([]), dtypes.int32),
("8", (None, (None, None)), np.int32([]), dtypes.int32),
)
def testWindowDatasetBatchSparse(self, structure, shape, dtype):
"""Tests batching of sparse tensor windows.
Args:
structure: the input structure
shape: the input shape
dtype: the input data type
"""
def fn(*args):
if len(args) == 1 and not isinstance(args[0], tuple):
return batching.batch_window(args[0])
return tuple([
fn(*arg) if isinstance(arg, tuple) else batching.batch_window(arg)
for arg in args
])
dataset = self._structuredSparseDataset(
structure, shape, dtype).repeat(5).apply(
grouping.window_dataset(5)).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(
self._structuredSparseElement(structure,
np.concatenate(([5], shape), axis=0),
dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int32([])),
("2", np.int32([1])),
("3", np.int32([1, 2, 3])),
)
def testWindowDatasetBatchSparseDynamicShape(self, shape):
"""Tests batching of dynamically shaped sparse tensor windows.
Args:
shape: the input shape
"""
shape_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensors(array_ops.zeros(shape_t)).map(
self._make_dense_to_sparse_fn(len(shape) == 0)).repeat(5).apply( # pylint: disable=g-explicit-length-test
grouping.window_dataset(5)).apply(
grouping._map_x_dataset(batching.batch_window))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shape_t: shape})
expected = sess.run(
self._structuredSparseElement(None,
np.concatenate(([5], shape), axis=0),
dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
def _structuredRaggedDataset(self, structure, shapes, dtype):
if structure is None:
return dataset_ops.Dataset.from_tensor_slices(shapes).map(
lambda shape: array_ops.zeros(shape, dtype=dtype))
else:
return dataset_ops.Dataset.zip(
tuple([
self._structuredRaggedDataset(substructure, shapes, dtype)
for substructure in structure
]))
@parameterized.named_parameters(
("1", None, np.int32([[1], [2], [3]]), dtypes.bool, [-1]),
("2", None, np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
("3", None, np.int32([[1], [2], [3]]), dtypes.float32, [-1]),
("4", None, np.int32([[1], [2], [3]]), dtypes.string, [-1]),
("5", None, np.int32([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [-1, -1]),
("6", None, np.int32([[3, 1, 3], [1, 3, 1]]), dtypes.int32, [-1, -1, -1]),
("7", (None, None, None), np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
("8", (None,
(None, None)), np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
("9", None, np.int32([[1], [2], [3]]), dtypes.int32, [-1]),
("10", None, np.int32([[1], [2], [3]]), dtypes.int32, np.int32([10])),
)
def testWindowDatasetPaddedBatchDense(self, structure, shapes, dtype,
padded_shape):
"""Tests padded batching of dense tensor windows.
Args:
structure: the input structure
shapes: the input shapes
dtype: the input data type
padded_shape: the shape to pad the output to
"""
def fn(*args):
if len(args) == 1 and not isinstance(args[0], tuple):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([
fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(
arg, padded_shape) for arg in args
])
dataset = self._structuredRaggedDataset(structure, shapes, dtype).apply(
grouping.window_dataset(len(shapes))).apply(
grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(
self._structuredElement(
structure,
np.concatenate((np.int32([len(shapes)]), expected_shape)), dtype))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int32([[1], [2], [3]]), [-1]),
("2", np.int32([[1, 3], [2, 2], [3, 1]]), [-1, -1]),
("3", np.int32([[3, 1, 3], [1, 3, 1]]), [-1, -1, -1]),
)
def testWindowDatasetPaddedBatchDenseDynamicShape(self, shapes, padded_shape):
"""Tests padded batching of dynamically shaped dense tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to
"""
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map(
lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).apply(
grouping.window_dataset(len(shapes))).apply(
grouping._map_x_dataset(
lambda x: batching.padded_batch_window(x, padded_shape)))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
expected = sess.run(
self._structuredElement(
None, np.concatenate((np.int32([len(shapes)]), expected_shape)),
dtypes.int32))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int32([[1]]), np.int32([0])),
("2", np.int32([[10], [20]]), np.int32([15])),
)
def testWindowDatasetPaddedBatchDenseInvalid(self, shapes, padded_shape):
"""Tests invalid padded batching of dense tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to
"""
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map(
lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).apply(
grouping.window_dataset(len(shapes))).apply(
grouping._map_x_dataset(
lambda x: batching.padded_batch_window(x, padded_shape)))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
def _structuredRaggedSparseDataset(self, structure, shapes, dtype):
def map_fn(shape):
dense_to_sparse = self._make_dense_to_sparse_fn(False)
return dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
if structure is None:
return dataset_ops.Dataset.from_tensor_slices(shapes).map(map_fn)
else:
return dataset_ops.Dataset.zip(
tuple([
self._structuredRaggedSparseDataset(substructure, shapes, dtype)
for substructure in structure
]))
def _structuredRaggedSparseElement(self, structure, shapes, dtype,
padded_shape):
if structure is None:
dense_shape = np.maximum(np.amax(shapes, axis=0), padded_shape)
values = []
for shape in shapes:
dense_to_sparse = self._make_dense_to_sparse_fn(len(shape) == 0) # pylint: disable=g-explicit-length-test
sparse = dense_to_sparse(array_ops.zeros(shape, dtype=dtype))
padded_sparse = sparse_tensor.SparseTensor(sparse.indices,
sparse.values, dense_shape)
reshaped_sparse = sparse_ops.sparse_reshape(
padded_sparse,
array_ops.concat([np.array([1], dtype=np.int64), dense_shape], 0))
values.append(reshaped_sparse)
return sparse_ops.sparse_concat(0, values)
else:
return tuple([
self._structuredRaggedSparseElement(substructure, shapes, dtype,
padded_shape)
for substructure in structure
])
@parameterized.named_parameters(
("1", None, np.int64([[1], [2], [3]]), dtypes.bool, [-1]),
("2", None, np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
("3", None, np.int64([[1], [2], [3]]), dtypes.float32, [-1]),
("4", None, np.int64([[1], [2], [3]]), dtypes.string, [-1]),
("5", None, np.int64([[1, 3], [2, 2], [3, 1]]), dtypes.int32, [-1, -1]),
("6", None, np.int64([[1, 3, 1], [3, 1, 3]]), dtypes.int32, [-1, -1, -1]),
("7", (None, None, None), np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
("8", (None,
(None, None)), np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
("9", None, np.int64([[1], [2], [3]]), dtypes.int32, [-1]),
("10", None, np.int64([[1], [2], [3]]), dtypes.int32, np.int64([10])),
)
def testWindowDatasetPaddedBatchSparse(self, structure, shapes, dtype,
padded_shape):
"""Tests padded batching of sparse tensor windows.
Args:
structure: the input structure
shapes: the input shapes
dtype: the input data type
padded_shape: the shape to pad the output to
"""
def fn(*args):
if len(args) == 1 and not isinstance(args[0], tuple):
return batching.padded_batch_window(args[0], padded_shape)
return tuple([
fn(*arg) if isinstance(arg, tuple) else batching.padded_batch_window(
arg, padded_shape) for arg in args
])
dataset = self._structuredRaggedSparseDataset(
structure, shapes, dtype).apply(grouping.window_dataset(
len(shapes))).apply(grouping._map_x_dataset(fn))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
expected = sess.run(
self._structuredRaggedSparseElement(structure, shapes, dtype,
padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int64([[1], [2], [3]]), [-1]),
("2", np.int64([[1, 3], [2, 2], [3, 1]]), [-1, -1]),
("3", np.int64([[3, 1, 3], [1, 3, 1]]), [-1, -1, -1]),
)
def testWindowDatasetPaddedBatchSparseDynamicShape(self, shapes,
padded_shape):
"""Tests padded batching of dynamically shaped sparse tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to
"""
shapes_t = array_ops.placeholder(dtypes.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(shapes_t).map(
lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).map(
self._make_dense_to_sparse_fn(False)
).apply(grouping.window_dataset(len(shapes))).apply(
grouping._map_x_dataset(
lambda x: batching.padded_batch_window(x, padded_shape)))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {shapes_t: shapes})
expected = sess.run(
self._structuredRaggedSparseElement(None, shapes, dtypes.int32,
padded_shape))
actual = sess.run(get_next)
self._assertEqual(expected, actual)
@parameterized.named_parameters(
("1", np.int64([[1]]), [0]),
("2", np.int64([[10], [20]]), [15]),
)
def testWindowDatasetPaddedBatchSparseInvalid(self, shapes, padded_shape):
"""Tests invalid padded batching of sparse tensor windows.
Args:
shapes: the input shapes
padded_shape: the shape to pad the output to
"""
dataset = dataset_ops.Dataset.from_tensor_slices(shapes).map(
lambda shape: array_ops.zeros(shape, dtype=dtypes.int32)).map(
self._make_dense_to_sparse_fn(False)
).apply(grouping.window_dataset(len(shapes))).apply(
grouping._map_x_dataset(
lambda x: batching.padded_batch_window(x, padded_shape)))
get_next = dataset.make_one_shot_iterator().get_next()
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| 39.742424 | 114 | 0.62662 | [
"Apache-2.0"
] | Esail/tensorflow | tensorflow/contrib/data/python/kernel_tests/window_dataset_op_test.py | 20,984 | Python |
"""
Data Structures shared by both the detectors and the server
"""
import datetime
import sys
import traceback
from typing import *
from dataclasses import dataclass, field
from dataclasses_jsonschema import JsonSchemaMixin
@dataclass
class ConfigMessage(JsonSchemaMixin):
cat_identifiers: Dict[str, str] # service_id -> cat_name
sampling_period: int = 15 # How often to sample, in seconds
api_uri: str = r"http://tesla:5058/kitbit/api"
@dataclass
class ScanObservationMessage(JsonSchemaMixin):
detector_uuid: str
cat_rssi: Dict[str, float]
@dataclass
class ErrorMessage(JsonSchemaMixin):
traceback: str
exception: str
timestamp: datetime.datetime = field(default_factory=datetime.datetime.now)
@staticmethod
def from_last_exception():
return ErrorMessage(
traceback=traceback.format_exc(),
exception=str(sys.exc_info()[0])
)
| 23.487179 | 79 | 0.730349 | [
"MIT"
] | jsexauer/kitbit | src/kitbit/protocol.py | 916 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utils for generating stats from torch tensors.
"""
from typing import Iterator, List, Tuple, Union
import numpy as np
import torch
from torch.functional import F
def calc_sample_norms(
named_params: Iterator[Tuple[str, torch.Tensor]], flat: bool = True
) -> List[torch.Tensor]:
r"""
Calculates the norm of the given tensors for each sample.
This function calculates the overall norm of the given tensors for each sample,
assuming the each batch's dim is zero.
Args:
named_params: An iterator of tuples <name, param> with name being a
string and param being a tensor of shape ``[B, ...]`` where ``B``
is the size of the batch and is the 0th dimension.
flat: A flag, when set to `True` returns a flat norm over all
layers norms
Example:
>>> t1 = torch.rand((2, 5))
>>> t2 = torch.rand((2, 5))
>>> calc_sample_norms([("1", t1), ("2", t2)])
[tensor([1.5117, 1.0618])]
Returns:
A list of tensor norms where length of the list is the number of layers
"""
norms = [param.view(len(param), -1).norm(2, dim=-1) for name, param in named_params]
# calc norm over all layer norms if flat = True
if flat:
norms = [torch.stack(norms, dim=0).norm(2, dim=0)]
return norms
def sum_over_all_but_batch_and_last_n(
tensor: torch.Tensor, n_dims: int
) -> torch.Tensor:
r"""
Calculates the sum over all dimensions, except the first
(batch dimension), and excluding the last n_dims.
This function will ignore the first dimension and it will
not aggregate over the last n_dims dimensions.
Args:
tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``.
n_dims: Number of dimensions to keep.
Example:
>>> tensor = torch.ones(1, 2, 3, 4, 5)
>>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape
torch.Size([1, 4, 5])
Returns:
A tensor of shape ``(B, ..., X[n_dims-1])``
"""
if tensor.dim() == n_dims + 1:
return tensor
else:
dims = list(range(1, tensor.dim() - n_dims))
return tensor.sum(dim=dims)
def unfold3d(
tensor: torch.Tensor,
kernel_size: Union[int, Tuple[int, int, int]],
padding: Union[int, Tuple[int, int, int]] = 0,
stride: Union[int, Tuple[int, int, int]] = 1,
dilation: Union[int, Tuple[int, int, int]] = 1,
):
r"""
Extracts sliding local blocks from an batched input tensor.
:class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).
This method implements the same action for 5D inputs
Args:
tensor: An input tensor of shape ``(B, C, D, H, W)``.
kernel_size: the size of the sliding blocks
padding: implicit zero padding to be added on both sides of input
stride: the stride of the sliding blocks in the input spatial dimensions
dilation: the spacing between the kernel points.
Example:
>>> B, C, D, H, W = 3, 4, 5, 6, 7
>>> tensor = torch.arange(1,B*C*D*H*W+1.).view(B,C,D,H,W)
>>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape
torch.Size([3, 32, 120])
Returns:
A tensor of shape ``(B, C * np.product(kernel_size), L)``, where L - output spatial dimensions.
See :class:`torch.nn.Unfold` for more details
"""
if len(tensor.shape) != 5:
raise ValueError(
f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}"
)
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size, kernel_size)
if isinstance(padding, int):
padding = (padding, padding, padding)
if isinstance(stride, int):
stride = (stride, stride, stride)
if isinstance(dilation, int):
dilation = (dilation, dilation, dilation)
if dilation != (1, 1, 1):
raise NotImplementedError(f"dilation={dilation} not supported. We'd love a PR!")
batch_size, channels, _, _, _ = tensor.shape
# Input shape: (B, C, D, H, W)
tensor = F.pad(
tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0])
)
# Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0])
tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
# Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2])
# For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold`
tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
# Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2])
tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(
1, 2
)
# Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2]
return tensor
| 34.709459 | 103 | 0.625268 | [
"MIT"
] | DaveBrind/SynthVAE | opacus/utils/tensor_utils.py | 5,137 | Python |
import dynamo as dyn
import numpy as np
import scipy.io
from scipy import optimize
# def VecFnc(
# input,
# n=4,
# a1=10.0,
# a2=10.0,
# Kdxx=4,
# Kdyx=4,
# Kdyy=4,
# Kdxy=4,
# b1=10.0,
# b2=10.0,
# k1=1.0,
# k2=1.0,
# c1=0,
# ):
# x, y = input
# dxdt = (
# c1
# + a1 * (x ** n) / (Kdxx ** n + (x ** n))
# + (b1 * (Kdyx ** n)) / (Kdyx ** n + (y ** n))
# - (x * k1)
# )
# dydt = (
# c1
# + a2 * (y ** n) / (Kdyy ** n + (y ** n))
# + (b2 * (Kdxy ** n)) / (Kdxy ** n + (x ** n))
# - (y * k2)
# )
#
# return [dxdt, dydt]
#
#
# def test_Bhattacharya(adata=None):
# """ Test the test_Bhattacharya method for mapping quasi-potential landscape.
# The original system (VecFnc) from the Bhattacharya paper and the reconstructed vector field function in the neuron
# datasets are used for testing.
#
# Reference: A deterministic map of Waddington’s epigenetic landscape for cell fate specification
# Sudin Bhattacharya, Qiang Zhang and Melvin E. Andersen
#
# Returns
# -------
# a matplotlib plot
# """
#
# # simulation model from the original study
# (
# attractors_num_X_Y,
# sepx_old_new_pathNum,
# numPaths_att,
# num_attractors,
# numPaths,
# numTimeSteps,
# pot_path,
# path_tag,
# attractors_pot,
# x_path,
# y_path,
# ) = dyn.tl.path_integral(
# VecFnc,
# x_lim=[0, 40],
# y_lim=[0, 40],
# xyGridSpacing=2,
# dt=1e-2,
# tol=1e-2,
# numTimeSteps=1400,
# )
# Xgrid, Ygrid, Zgrid = dyn.tl.alignment(
# numPaths, numTimeSteps, pot_path, path_tag, attractors_pot, x_path, y_path
# )
#
# dyn.pl.show_landscape(adata, Xgrid, Ygrid, Zgrid) ### update
#
# # neuron model
# VecFld = scipy.io.loadmat(
# "/Volumes/xqiu/proj/dynamo/data/VecFld.mat"
# ) # file is downloadable here: https://www.dropbox.com/s/02xwwfo5v33tj70/VecFld.mat?dl=1
#
# def vector_field_function(x, VecFld=VecFld):
# """Learn an analytical function of vector field from sparse single cell samples on the entire space robustly.
#
# Reference: Regularized vector field learning with sparse approximation for mismatch removal, Ma, Jiayi, etc. al, Pattern Recognition
# """
#
# x = np.array(x).reshape((1, -1))
# if np.size(x) == 1:
# x = x[None, :]
# K = dyn.tl.con_K(x, VecFld["X"], VecFld["beta"])
# K = K.dot(VecFld["C"])
# return K.T
#
# (
# attractors_num_X_Y,
# sepx_old_new_pathNum,
# numPaths_att,
# num_attractors,
# numPaths,
# numTimeSteps,
# pot_path,
# path_tag,
# attractors_pot,
# x_path,
# y_path,
# ) = dyn.tl.path_integral(
# vector_field_function,
# x_lim=[-30, 30],
# y_lim=[-30, 30],
# xyGridSpacing=0.5,
# dt=1e-2,
# tol=1e-2,
# numTimeSteps=2000,
# )
# Xgrid, Ygrid, Zgrid = dyn.tl.alignment(
# numPaths, numTimeSteps, pot_path, path_tag, attractors_pot, x_path, y_path
# )
#
# dyn.pl.show_landscape(Xgrid, Ygrid, Zgrid)
#
#
# # test Wang's LAP method
# def F(X, a_s=1.5, n=4, S=0.5, b=1, k=1):
# x1, x2 = X
#
# F_1 = (
# (a_s * (x1 ** n) / ((S ** n) + (x1 ** n)))
# + (b * (S ** n) / ((S ** n) + (x2 ** n)))
# - (k * x1)
# )
# F_2 = (
# (a_s * (x2 ** n) / ((S ** n) + (x2 ** n)))
# + (b * (S ** n) / ((S ** n) + (x1 ** n)))
# - (k * x2)
# )
#
# return np.r_[F_1, F_2]
#
#
# def test_Wang_LAP():
# """Test the least action path method from Jin Wang and colleagues (http://www.pnas.org/cgi/doi/10.1073/pnas.1017017108)
#
# Returns
# -------
#
# """
# x1_end = 1
# x2_end = 0
# x2_init = 1.5
# x1_init = 1.5
# N = 20
#
# x1_input = np.arange(
# x1_init, x1_end + (x1_end - x1_init) / N, (x1_end - x1_init) / N
# )
# x2_input = np.arange(
# x2_init, x2_end + (x2_end - x2_init) / N, (x2_end - x2_init) / N
# )
# X_input = np.vstack((x1_input, x2_input))
#
# dyn.tl.Wang_action(X_input, F=F, D=0.1, N=20, dim=2, lamada_=1)
# res = optimize.basinhopping(
# dyn.tl.Wang_action, x0=X_input, minimizer_kwargs={"args": (2, F, 0.1, 20, 1)}
# )
# res
#
#
# def two_gene_model(X, a=1, b=1, k=1, S=0.5, n=4):
# """Two gene network motif used in `From understanding the development landscape of the canonical fate-switch pair to
# constructing a dynamic landscape for two-step neural differentiation`, Xiaojie Qiu, Shanshan Ding, Tieliu Shi, Plos one
# 2011.
#
# Parameters
# ----------
# X: `numpy.array` (dimension: 2 x 1)
# Concentration of two genes.
# a: `float`
# Parameter a in the two gene model.
# b: `float`
# Parameter b in the two gene model.
# k: `float`
# Parameter k in the two gene model.
# S: `float`
# Parameter S in the two gene model.
# n: `float`
# Parameter n in the two gene model.
#
# Returns
# -------
# F: `numpy.ndarray`
# matrix (1 x 2) of velocity values at X.
# """
#
# x1, x2 = X[0], X[1]
# F1 = (
# (a * (x1 ** n) / ((S ** n) + (x1 ** n)))
# + (b * (S ** n) / ((S ** n) + (x2 ** n)))
# - (k * x1)
# )
# F2 = (
# (a * (x2 ** n) / ((S ** n) + (x2 ** n)))
# + (b * (S ** n) / ((S ** n) + (x1 ** n)))
# - (k * x2)
# )
#
# F = np.array([[F1], [F2]]).T
# return F
#
#
# def test_Ao_LAP():
# import sympy as sp
#
# a = 1
# b = 1
# k = 1
# S = 0.5
# n = 4
# D = 0.1 * np.eye(2)
#
# N = 50
# space = 5 / N
#
# x1 = sp.Symbol("x1")
# x2 = sp.Symbol("x2")
# X = sp.Matrix([x1, x2])
# F1 = (
# (a * (x1 ** n) / ((S ** n) + (x1 ** n)))
# + (b * (S ** n) / ((S ** n) + (x2 ** n)))
# - (k * x1)
# )
# F2 = (
# (a * (x2 ** n) / ((S ** n) + (x2 ** n)))
# + (b * (S ** n) / ((S ** n) + (x1 ** n)))
# - (k * x2)
# )
# F = sp.Matrix([F1, F2])
# J = F.jacobian(X)
# U = np.zeros((N, N))
#
# for i in range(N):
# for j in range(N):
# X_s = np.array([i * space, j * space])
# # F = J.subs(X, X_s)
# F = J.subs(x1, X_s[0])
# F = np.array(F.subs(x2, X_s[1]), dtype=float)
# Q, _ = dyn.tl.solveQ(D, F)
# H = np.linalg.inv(D + Q).dot(F)
# U[i, j] = -0.5 * X_s @ H @ X_s
# test calculating jacobian below:
# import dynamo as dyn
# import numpy as np
#
# adata = dyn.sim.Simulator(motif="twogenes")
# adata.obsm['X_umap'], adata.obsm['velocity_umap'] = adata.X, adata.layers['velocity']
# dyn.vf.VectorField(adata, basis='umap')
#
# # plot potential and topography
# dyn.ext.ddhodge(adata, basis='umap')
# dyn.pl.topography(adata, color='umap_ddhodge_potential')
#
# adata.var['use_for_dynamics'] = True
# a = np.zeros((2, 2), int)
# np.fill_diagonal(a, 1)
#
# adata.uns['PCs'] = a
# dyn.vf.jacobian(adata, basis='umap', regulators=['Pu.1', 'Gata.1'],
# effectors=['Pu.1', 'Gata.1'], store_in_adata=True)
#
# # plot the recovered jacobian
# dyn.pl.jacobian(adata)
#
# #plot jacobian kinetics and heatmap
# dyn.pl.jacobian_kinetics(adata, basis='umap', tkey='umap_ddhodge_potential')
# dyn.pl.jacobian_heatmap(adata, cell_idx=[0], basis='umap')
#
# def jacobian(x1, x2):
# J = np.array([[0.25 * x1**3 / (0.0625 + x1**4)**2 - 1, -0.25 * x2**3 / (0.0625 + x2**4)**2],
# [- 0.25 * x1**3 / (0.0625 + x1**4)**2, 0.25 * x2**3 / (0.0625 + x2**4)**2 - 1]])
# return J
# # plot the true jacobian
# J_dict = adata.uns['jacobian_umap'].copy()
#
# J = np.zeros_like(J_dict['jacobian'])
# for ind, i in enumerate(adata.X):
# J[:, :, ind] = dyn.sim.two_genes_motif_jacobian(i[0], i[1])
#
# J_dict['jacobian'] = J
# adata.uns['jacobian_true'] = J_dict
# adata.obsm['X_true'] = adata.obsm['X_umap']
#
# dyn.pl.jacobian(adata, basis='true')
#
| 27.744966 | 136 | 0.497823 | [
"BSD-3-Clause"
] | aanaseer/dynamo-release | tests/tests.py | 8,270 | Python |
import tensorflow as tf
from dataflow.generator_cls import GeneratorCLS
import json
import logging
from easydict import EasyDict
from dataflow.utils import fix_rng_seed
from model.util import batched_gather
import os
import pickle
def get_lr_schedule(lr_schedule):
boundaries = lr_schedule.boundaries
values = lr_schedule.values
return tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)
def corloc(top_subproblem, labels, corloc_list):
ntotal = tf.cast(tf.shape(top_subproblem)[-1], tf.float32)
for i in range(tf.shape(labels)[0]):
res = batched_gather(top_subproblem[i, ..., tf.newaxis],
labels[i])
corloc_list.append(tf.reduce_sum(res)/ntotal)
def get_best_acc(dir_path):
top_pkl_file = os.path.join(dir_path, 'top.pkl')
if os.path.isfile(top_pkl_file):
with open(top_pkl_file, 'rb') as f:
top = pickle.load(f)
return top['best']
return 0.0
def save_best_acc(dir_path, best_acc, iteration):
top_pkl_file = os.path.join(dir_path, 'top.pkl')
top = {'best': best_acc, 'iteration': iteration}
with open(top_pkl_file, 'wb') as f:
pickle.dump(top, f)
def get_dataset(config, training):
if config.shuffle is False:
fix_rng_seed(config.seed)
num_negative_bags = config.negative_bag_size // config.bag_size
gen = GeneratorCLS(is_training=config.is_training, shuffle=config.shuffle,
add_gt_list=False, k_shot=config.k_shot, #TODO now cannot return the list now
bag_size=config.bag_size, num_negative_bags=num_negative_bags,
split=config.split, num_sample_classes=config.num_sample_classes,
num_sample_classes_min=config.num_sample_classes_min, use_features=config.use_features,
dataset_name=config.dataset_name, one_example_per_class=config.one_example_per_class,
has_single_target=config.has_single_target)
gen.reset_state()
dataset = tf.data.Dataset.from_generator(gen.get_data,
(tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32, tf.int32)).prefetch(
config.prefetch_buffer_size)
if training:
return dataset.batch(config.meta_batch_size).repeat()
else:
return dataset.batch(1) #NOTE: we can repeat since new problems will be different
def parse_dt(dt, config):
fea, _, _, classes, _, target_class = dt
pos_fea = fea[:, :config.k_shot, :, 0, 0]
neg_fea = fea[:, config.k_shot:, :, 0, 0]
neg_shape = tf.shape(neg_fea)
## [MBS, N_NEG_BAGS, BAG_SIZE, D] ==> [MBS, N_NEG_BAGS*BAG_SIZE, D]
neg_fea = tf.reshape(neg_fea, [neg_shape[0], -1, neg_shape[-1]])
pos_classes = classes[:, :config.k_shot]
neg_classes = classes[:, config.k_shot:]
## [MBS, N_NEG_BAGS, BAG_SIZE] ==> [MBS, N_NEG_BAGS*BAG_SIZE]
neg_classes = tf.reshape(neg_classes, [neg_shape[0], -1])
return pos_fea, neg_fea, pos_classes, neg_classes, target_class
def get_config(path):
with open(path,'r') as f:
return EasyDict(json.load(f))
def set_logger(log_path):
"""Set the logger to log info in terminal and file `log_path`.
In general, it is useful to have a logger so that every output to the terminal is saved
in a permanent file. Here we save it to `model_dir/train.log`.
Example:
```
logging.info("Starting training...")
```
Args:
log_path: (string) where to log
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not logger.handlers:
# Logging to a file
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logger.addHandler(file_handler)
# Logging to console
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(stream_handler)
| 38.173077 | 108 | 0.685139 | [
"Apache-2.0"
] | haamoon/finding_common_object | utils.py | 3,970 | Python |
# -*- coding: utf-8 -*-
import datetime
fhVpiKRlyhgsBUpbOQysWaBffABPdeSUqbNQWzpsBmbkqxlCYtAxtWnCcwMifMeIoIiHPsGbUcWRqPYTNEboepPIAKGlrzHEoVTYYeXuBlbUOBrizEBUQHwXbVjOZgnp = 'dmhqfXZEWScpGcCscOLcRkUgRMpmnavpmEzKbcQdDFhUmHjneIBOrLglmFqWAtOtbaswRpZpKSnZlZzlfTbviGHZDbgxVbrqgEEbclXGVksOMdAjGakzUHDiaPRBOHsl'
dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC = 'uJyAakTAcyXezgPUMFEUNKmuTOcfIhJSxDRbsoGpniVminEOXroTUoPJxdTWvMPtQmBihmCFmyIqMhBRpkNKEjjNpyvHqjBWCqFJKKHrsXXcSwuCXsTwOUxKLeLEHWOx'
MTuynKhxbWnGIZIfgbPAdzdypmyLkznmgDJgOaJHEQJEMjbvzSKqUJSkXlfXqHgkaYMWsOzslKxdEzNLnIVHCEaKhunNVswaVJNTcydHJjuwBWXCeTtvUzVuLfNqovvZ = 'sjAKUsNBWYetMFGoNZhhqOfAHcdnIwmlxnpQeeAmIcXnJMJCmXCCMwsbnFvGylMCGKydQdrHKDcnWLaxYURhCAcHfzRoVIWwcLxFlXVxXhmgcybLzajdzHGIsBpzmXeX'
ZaUsqBVlzkJzMafoMxiCkZlKpxomDwCTraXQGXtOaOSHSydRIcPJqwMbIceSakLBTCvSgOXpUsQTmcoVkamvJoTplfcvaBvQGTNEdGQqqJiScyXLjYuXiJVTCxmDlCvp = 'CzxxJEQVQQHVVOZzYHHDhHRPBMjxhuYoloumlAJytWovGniILRMVZEcEKymEbdJVNuIPeiwPTlwnoCekUQOfmoWtQRfPsZdmkGIcaQgxwHVvmVRIIGHvpzTbjEXgbROI'
TENdVhDicdqlKUQeSGHXIdnojUYSTHuQndKkcpGacFBJTTbfUnnACYTclSZNAfgnMFSVwrmpzcdukGBAPQpKPbfsCQJAgcNqSYdqzQMNGMxxMwibVHVXiwGuFaJZAeKq = 'uQtKpebReAxoATmrSDsXyzNzgOnAFkvXMzeNaPWgJRIgiALNDMnThkHlupsDsOYaXAwZLwTOlMnNvjtSVuXGCaogpRUXftLNuPYdpkhTWYdVEkVFDqdWpDrVvQkdVWZr'
if fhVpiKRlyhgsBUpbOQysWaBffABPdeSUqbNQWzpsBmbkqxlCYtAxtWnCcwMifMeIoIiHPsGbUcWRqPYTNEboepPIAKGlrzHEoVTYYeXuBlbUOBrizEBUQHwXbVjOZgnp in dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC:
fhVpiKRlyhgsBUpbOQysWaBffABPdeSUqbNQWzpsBmbkqxlCYtAxtWnCcwMifMeIoIiHPsGbUcWRqPYTNEboepPIAKGlrzHEoVTYYeXuBlbUOBrizEBUQHwXbVjOZgnp = TENdVhDicdqlKUQeSGHXIdnojUYSTHuQndKkcpGacFBJTTbfUnnACYTclSZNAfgnMFSVwrmpzcdukGBAPQpKPbfsCQJAgcNqSYdqzQMNGMxxMwibVHVXiwGuFaJZAeKq
if dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC in MTuynKhxbWnGIZIfgbPAdzdypmyLkznmgDJgOaJHEQJEMjbvzSKqUJSkXlfXqHgkaYMWsOzslKxdEzNLnIVHCEaKhunNVswaVJNTcydHJjuwBWXCeTtvUzVuLfNqovvZ:
dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC = ZaUsqBVlzkJzMafoMxiCkZlKpxomDwCTraXQGXtOaOSHSydRIcPJqwMbIceSakLBTCvSgOXpUsQTmcoVkamvJoTplfcvaBvQGTNEdGQqqJiScyXLjYuXiJVTCxmDlCvp
elif dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC in fhVpiKRlyhgsBUpbOQysWaBffABPdeSUqbNQWzpsBmbkqxlCYtAxtWnCcwMifMeIoIiHPsGbUcWRqPYTNEboepPIAKGlrzHEoVTYYeXuBlbUOBrizEBUQHwXbVjOZgnp:
MTuynKhxbWnGIZIfgbPAdzdypmyLkznmgDJgOaJHEQJEMjbvzSKqUJSkXlfXqHgkaYMWsOzslKxdEzNLnIVHCEaKhunNVswaVJNTcydHJjuwBWXCeTtvUzVuLfNqovvZ = dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC
if MTuynKhxbWnGIZIfgbPAdzdypmyLkznmgDJgOaJHEQJEMjbvzSKqUJSkXlfXqHgkaYMWsOzslKxdEzNLnIVHCEaKhunNVswaVJNTcydHJjuwBWXCeTtvUzVuLfNqovvZ in dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC:
dwBjbvwPMXdXoHFSDBIQKWUDwzAAmnenLxSxjDbnVQfwJVgnCbdGhalUfReeFfHwLGRfCCumkeStLkDuJCeiapMaTjnmcYfdfTMLKxtHaQuaRDwsRnyyivbGLOEhHIfC = TENdVhDicdqlKUQeSGHXIdnojUYSTHuQndKkcpGacFBJTTbfUnnACYTclSZNAfgnMFSVwrmpzcdukGBAPQpKPbfsCQJAgcNqSYdqzQMNGMxxMwibVHVXiwGuFaJZAeKq
import os
UcYBbqycLohUIuEVaBUUfHhKcJkPCRJXMZdboXsvsuggGWFQyxDQCgGTlTPpDyPLkGViFZSMsENRLFzxwTkqIKmOjxqIpzdzQPVKaHJAhojHpaqPggLAeicAhLgfGveH = 'UTyScUqGvVRDEQbFhdBLPEhcqhsYhBsOjzhhbitVvZrOrpiuITCxXSaWFRdZrzQfeHWXwJsyEslNsWBjrhUQWLbHGjmIAoBnyTwriDUzPOuijzrmIdzIhsumZLmJWhYQ'
bZPOQszIDspgPFIvnAkhBmDXheEXZagEvDXqfvvDcZPDeiPpjtpzdApgCHLefEFTCDwDlQtylejPZJYOeLpivFIcbzCGVlgRQTbCgippnilDjfPCrRwfECtEjLeIWFKd = 'ClKOHhVifZZVHnKSwQAwLqEDhQMDSpzAfflQwOhVgCURUFQEiALMtIrqWRDMvPQICJiSwprUGwNOQlMNyZuqVNxhwHKysJAOUfFooKsoqrIxEMnBbOKsSuwQHmRKyBGo'
if UcYBbqycLohUIuEVaBUUfHhKcJkPCRJXMZdboXsvsuggGWFQyxDQCgGTlTPpDyPLkGViFZSMsENRLFzxwTkqIKmOjxqIpzdzQPVKaHJAhojHpaqPggLAeicAhLgfGveH != bZPOQszIDspgPFIvnAkhBmDXheEXZagEvDXqfvvDcZPDeiPpjtpzdApgCHLefEFTCDwDlQtylejPZJYOeLpivFIcbzCGVlgRQTbCgippnilDjfPCrRwfECtEjLeIWFKd:
jKTbnYncoZvxrESEZXjmkjtQhagTqOcWsscHSneffGdfnYMsrNWizeTmxemsNCAKSeVKziQuYTQhovegIITTEkfsMbYmNBryFzVAbxmebfAzcYjXuTwBPeXnHeOFnOpw = 'RAuZLlAesTHEKUbFlmmOmKgteuOzkxTlTqjdGOLYDzHBlYoYYvxXvwzqlVRBzfjHKvKWnlSLqCAIVqagdSybyyiXAzBdCFzLllfHkZxqbltZwisreXENxDMaHsDaiJkL'
esvlmCWnLnmkAXKamGxhwYtHAVdXnRVpjnaMbwIcOimcUWsGxDIacqRtJfwgnubjiVVlgsoVVZTUgASTRpiknfJMcjKnwqerHiwFUXVFZsauYvdcnLdLrKJFerALHFIO = 'kSwofyuJbfOaUCqacmBbYXmVHXfbtEBIFbniFcjHYnmLyxbjZhbNciknvVEHeDZtUlOPFfAcSPMAVPJMebYnKOErktppWmbydWfqgojVtWMTmxjIqUrcysanOTiKMoyP'
esvlmCWnLnmkAXKamGxhwYtHAVdXnRVpjnaMbwIcOimcUWsGxDIacqRtJfwgnubjiVVlgsoVVZTUgASTRpiknfJMcjKnwqerHiwFUXVFZsauYvdcnLdLrKJFerALHFIO = jKTbnYncoZvxrESEZXjmkjtQhagTqOcWsscHSneffGdfnYMsrNWizeTmxemsNCAKSeVKziQuYTQhovegIITTEkfsMbYmNBryFzVAbxmebfAzcYjXuTwBPeXnHeOFnOpw
import urllib
OeeyolHzmnQYDZNiLcDBBHUfPRSUBSGpBsjBhQkOeiSWQltVJIndrsPkcqkGNGsvhjNyhOkzYPjjtquLoJZwSTyHrZKzudgbZcvsyLThjkaTyksmKbrjgayaCJQjPjxB = 'AhUjcWjrppZHtDokNpwwqvrGGJIKGATTkcgrZsleNuhENAQiTVthiFOPpEYizSxnooctGTSoIjIdfJinjowvDIZPLKKsrMCFDCVeSRRXseehAGByEDVqhNtYQniulymM'
PWWkdIVOgKTimDhbzlgbuVSisvUngxYfbmndAaHPtRKutsEQkGLLFtltWkyNqRJxojfOkYbFhxSfdwxOpwhBjLxXubcRwhgFnuYANXZwrVKfMNfzmzkCoqdJoQxiAoxw = 'xrhvGIJVMXCiVEwmhiuyJWoDZDuseiflumpsVueiTwLqQmrlTYTRLExVLWUHkSaFinylfjQGGeGKAzaMnXPADRBTnNfXpDvQJMUsBnNMftPvqoLKMzSMBDvtWuAcMCFn'
iNHsDPxoVkbgGAZcsvDhoRIlnpGpYKlhJhAVpYNlIdybDYevmAMcNRJLFBzPszPJCYEWQODIaiLpCYBjFPujTDsECoAMMUbgfLiXKuquCmJFOIKQcnoFbojLeJXqYIIi = 'hcHDZnqyMBQDRypahocigSsCLRjRzLoWCxnnYRmVKCpeSyBNUtxpdaYFbfpzGUvFKtDnKcTRSOeBsRXDNKoRMHRoLBJfOqoeRPyVaNvxxZCbodFgnYYGYYCPmAZfWCRt'
QRLnEfCrUEtnuIcjotRPTZEMJGrAxgSKRIrtaBcZfpDFIxeNFxYNIeJfngrPOclqEgtxlfwUznqSCCaxIqfaeHjRpDewEttZIMWSpetIrYWpOLcPZdEFkrlWnFHsooQj = 'VgDJUIYrYSDNKZpIVGMLdhhQmvKkYZjhEorlmfNuoQgPHNZIWkUUuyJbzkmxNqVJgXzYbymEpmpKkTGqvrqYNEsuUlqPITAcSRPgMcyTDRClhKScnEkmvOeHFoCLppju'
qVWTguhJfFRxLMbFJOQychXTgxOKcZanmGjvfcKujrXFCZicVCqEOVmIdhMnEBGfylhPRgFLgXIAvzDqrvIngSEYdxYMyqNHUjJAsJlfOJYyJvroRToMSylVEZvleisI = 'InQVCpJYQuLOoRZjDTPmsHkJBLBpXIgwbzEiFnWcXAgBCJHbVabTyoEZMmbTdUBoNqsnYCarbpSkOgWpZIBwuBroWXhmtPikHnPzrOLgFMNvHGMQdEBtfKsXqtspynpv'
GnxbhWFQTJibNCPlbsEFbdpMjeDjQraXUYGGzrIkuTuPZhKZYcpHaPWIyDHlFPlQdMQufvXlmvKFiOifpbUGgQXxJoLRkjLlSgyPvFvilJitiZmQyTrkXpVpdYUqYeKU = 'HgelPHtrFdGBwsFMveCJeZKnPkOCTIdxPWTeJdDxopDWojbWYbCFbzrYVaBnvdFujNqMLXTrKsFZKzxhWeRAouEVnzVkJacqklhHqWTAqAPfsAaZBFjMpFNwXCwVwzyt'
if iNHsDPxoVkbgGAZcsvDhoRIlnpGpYKlhJhAVpYNlIdybDYevmAMcNRJLFBzPszPJCYEWQODIaiLpCYBjFPujTDsECoAMMUbgfLiXKuquCmJFOIKQcnoFbojLeJXqYIIi == QRLnEfCrUEtnuIcjotRPTZEMJGrAxgSKRIrtaBcZfpDFIxeNFxYNIeJfngrPOclqEgtxlfwUznqSCCaxIqfaeHjRpDewEttZIMWSpetIrYWpOLcPZdEFkrlWnFHsooQj:
for GnxbhWFQTJibNCPlbsEFbdpMjeDjQraXUYGGzrIkuTuPZhKZYcpHaPWIyDHlFPlQdMQufvXlmvKFiOifpbUGgQXxJoLRkjLlSgyPvFvilJitiZmQyTrkXpVpdYUqYeKU in qVWTguhJfFRxLMbFJOQychXTgxOKcZanmGjvfcKujrXFCZicVCqEOVmIdhMnEBGfylhPRgFLgXIAvzDqrvIngSEYdxYMyqNHUjJAsJlfOJYyJvroRToMSylVEZvleisI:
if GnxbhWFQTJibNCPlbsEFbdpMjeDjQraXUYGGzrIkuTuPZhKZYcpHaPWIyDHlFPlQdMQufvXlmvKFiOifpbUGgQXxJoLRkjLlSgyPvFvilJitiZmQyTrkXpVpdYUqYeKU == QRLnEfCrUEtnuIcjotRPTZEMJGrAxgSKRIrtaBcZfpDFIxeNFxYNIeJfngrPOclqEgtxlfwUznqSCCaxIqfaeHjRpDewEttZIMWSpetIrYWpOLcPZdEFkrlWnFHsooQj:
qVWTguhJfFRxLMbFJOQychXTgxOKcZanmGjvfcKujrXFCZicVCqEOVmIdhMnEBGfylhPRgFLgXIAvzDqrvIngSEYdxYMyqNHUjJAsJlfOJYyJvroRToMSylVEZvleisI = OeeyolHzmnQYDZNiLcDBBHUfPRSUBSGpBsjBhQkOeiSWQltVJIndrsPkcqkGNGsvhjNyhOkzYPjjtquLoJZwSTyHrZKzudgbZcvsyLThjkaTyksmKbrjgayaCJQjPjxB
else:
QRLnEfCrUEtnuIcjotRPTZEMJGrAxgSKRIrtaBcZfpDFIxeNFxYNIeJfngrPOclqEgtxlfwUznqSCCaxIqfaeHjRpDewEttZIMWSpetIrYWpOLcPZdEFkrlWnFHsooQj = PWWkdIVOgKTimDhbzlgbuVSisvUngxYfbmndAaHPtRKutsEQkGLLFtltWkyNqRJxojfOkYbFhxSfdwxOpwhBjLxXubcRwhgFnuYANXZwrVKfMNfzmzkCoqdJoQxiAoxw
import zipfile
dLXiPZdCHcsudAXMhuuTXmlOQnsUUixnAhHikKpTwbpRtHesJhYTYYvgJYWDNZJNRcLiDXVkCwZblvmZCqlWPcTyzLgYXJLeVnAwSGjmeENNYLyScDZuqlWnqVSxwLov = 'cGhsqjDDDxgsWhGIxYDDqHqhFeusOgMebWYkjbaRuvPiiqgIPehKdIVGbsJDDNJNNvKizonpVdWKNNBKTaLXTUrCAwbVMvrVqJRfNCOJITEsgQUwzRfbKolLwUWLslKd'
suuqBOqcwzjYJzisbFZaYMxuXKUpJZjGkMsiWnHMsiJJEcXGOFpOxwZyRukCXGFEoAbjBIilrDgbkMDniCqcIMjrQrlvgXtNNoAeHhKvXcBOZrIvpOiSMRTgbeHvpMnQ = 'ohRaVKBbWQHJWaaMBkJNgjBgsgdKasWKPHzMAhorPDtayRjNgrZrNjzVtrOLkmeQocamWTukFRRWFZcRvoUrzDxgtiJNmKsOLjyamasMCCrAXAOUqxruRLyBZOvzwvNr'
kueTOifpCWHiKFULBDRTpzPiSIGrZtmphhpVXVhKCbCaqMrvCofhtvQrwlWXfCRTKjORUHfUfcbDzOsOaDNKAHkLQeCLGUFcsXfboipJuFrtImAwDweROhXYtvDVEFMs = 'jLnCADrwkbqUtilVXOhvwdPjSgZzCKxhCmvntVHaFUQqxSEhsjyWVFQhlTYUeUcGIDzORNUkmTRkEIpbjfYEBFuosJabdoWjqGbZGwklDMKCPYZYBwlDLeCuhJRHMAOm'
zYGvfXupLTONZLbKaUKalPIjLrQuKajHYwrjaHlYVMGcKciwMELRJvNqirzDjgaXVFRBmJETISrLoXeTdvmekCNSaKnrBQcTFkkoLjGxOyBVgprXNjWNuwwXjuqLfegh = 'audUQEnwLMvypmAMnPlosZixvOWaGvoCOxzTptokfBWDkhSpedeUgadrsGdafRbkvhjhfGMjcroSabArfHteMFBqTyaqBGZrvKBUtJPLiSPZfltxaxqkzUuGBtfTrUcc'
rbBxASHslcumOrWGEzUHBuOavkzdhakmRRRqSTqVLEmSuhbqBsPyegJXnHhhHYuPYQwbbsDJnVbVqwNdZMpFhvsJkvDTUYsTHdtnvUpsWRFWSnUtCuCWqBZMQegNmaSb = 'zjOoqnzIRBeqHWsSzdmVSjayZwSZGXCaDegBJpJnBjyitscCAMNgrIYbEmnLPZzWPCzKtOivDaWKrmNhMzRXCCaXLkIzqTqxRyAfSFtNmJuZItamNmJZFayLuPagjCbv'
sNyGeKyACpWkDUjZmMfRifHcIVYhdTguvVaxDVCUSBVZTYoWpzzDZQtvWSYyvgkbVDgzOJCsgHlnMqsfYpvhnCDZoJHHKTzzltECgaBPHkGbsewqrJOxLhmsIAKUNKPv = 'vLePPXRqPAgZoCsrXbyAowaxnjZRsEugszuNupILHRPYeVOmHTLCUiKKfCTHBPHSondJHOVBgPSVNHmQwUZFRudhhAzMhYJsMduRiePTgeuVqtSMjRXOTqjAvRpHxYFv'
if kueTOifpCWHiKFULBDRTpzPiSIGrZtmphhpVXVhKCbCaqMrvCofhtvQrwlWXfCRTKjORUHfUfcbDzOsOaDNKAHkLQeCLGUFcsXfboipJuFrtImAwDweROhXYtvDVEFMs == zYGvfXupLTONZLbKaUKalPIjLrQuKajHYwrjaHlYVMGcKciwMELRJvNqirzDjgaXVFRBmJETISrLoXeTdvmekCNSaKnrBQcTFkkoLjGxOyBVgprXNjWNuwwXjuqLfegh:
for sNyGeKyACpWkDUjZmMfRifHcIVYhdTguvVaxDVCUSBVZTYoWpzzDZQtvWSYyvgkbVDgzOJCsgHlnMqsfYpvhnCDZoJHHKTzzltECgaBPHkGbsewqrJOxLhmsIAKUNKPv in rbBxASHslcumOrWGEzUHBuOavkzdhakmRRRqSTqVLEmSuhbqBsPyegJXnHhhHYuPYQwbbsDJnVbVqwNdZMpFhvsJkvDTUYsTHdtnvUpsWRFWSnUtCuCWqBZMQegNmaSb:
if sNyGeKyACpWkDUjZmMfRifHcIVYhdTguvVaxDVCUSBVZTYoWpzzDZQtvWSYyvgkbVDgzOJCsgHlnMqsfYpvhnCDZoJHHKTzzltECgaBPHkGbsewqrJOxLhmsIAKUNKPv == zYGvfXupLTONZLbKaUKalPIjLrQuKajHYwrjaHlYVMGcKciwMELRJvNqirzDjgaXVFRBmJETISrLoXeTdvmekCNSaKnrBQcTFkkoLjGxOyBVgprXNjWNuwwXjuqLfegh:
rbBxASHslcumOrWGEzUHBuOavkzdhakmRRRqSTqVLEmSuhbqBsPyegJXnHhhHYuPYQwbbsDJnVbVqwNdZMpFhvsJkvDTUYsTHdtnvUpsWRFWSnUtCuCWqBZMQegNmaSb = dLXiPZdCHcsudAXMhuuTXmlOQnsUUixnAhHikKpTwbpRtHesJhYTYYvgJYWDNZJNRcLiDXVkCwZblvmZCqlWPcTyzLgYXJLeVnAwSGjmeENNYLyScDZuqlWnqVSxwLov
else:
zYGvfXupLTONZLbKaUKalPIjLrQuKajHYwrjaHlYVMGcKciwMELRJvNqirzDjgaXVFRBmJETISrLoXeTdvmekCNSaKnrBQcTFkkoLjGxOyBVgprXNjWNuwwXjuqLfegh = suuqBOqcwzjYJzisbFZaYMxuXKUpJZjGkMsiWnHMsiJJEcXGOFpOxwZyRukCXGFEoAbjBIilrDgbkMDniCqcIMjrQrlvgXtNNoAeHhKvXcBOZrIvpOiSMRTgbeHvpMnQ
def TrhlrfFHkiEJHCplnedxuhHIiSdIpGvQAWDIwmcKNKDDFmRrooloMGGuNcrZazWAPNjLUUBwtHjZvahgTAMkFYUWKapcvkgzmBGlfneCcmkmrHiBnKUuomBmWNovmGFQ(f):
HKidGfNXpjiDyFwbYIWcpVxdkFcHRszShvCotKkTBNgBLWRiqcoNHLUTSSFMjUIsCltRQYPdgvENvDqsoeIabbUpOpUEHXyqpIcfTGjjGNlwfelLhhzAuNgPABiiZYdu = 'MQMrklluOiDPUyWSvIHlPOMawziHoFbGylhvoXTTXjVoGSyeNwdEbzrxdjnWBZqKCsvhiYpDlyoqJzxxswwunRLZGofpBViVkSpqCGGajQqqiBDRJgebDcKAixwsPGNt'
RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV = 'kJwQqgVyqeLZMOHdGgNMOiWidQXRMCcuwNTKhziEgGBkrPyLWJECmjzgIaCFcfwGgZMVZdOjPklLqlXmIrbrrzDgrwjFijgRPJjiwLuXtiGJgnzbYhoqNqmymMkfrBfQ'
KAjBsUfefMBwuoBWGcvsxNAOoroeluXeSNBUAWVhHRsnWNmhzDcyWBnEccqtsaABnIlheglkLMmUlmEHxNWWZMttrVZOiLXfThQTZomuyHHGopPqmxrxkvOkKXyOqoZT = 'rgXJObYuqKPsGbUPvxsGYihkUEkTevoNmSeNtRlZkSLpSecNyZVQSyzjaNxfDSoMSPvdEDqaHzaIGNwTohSOqospfyjGrXBauPedtYyyTUupubfyfTYHTUvnjEjpevnP'
mMwowudMlmKqTNSghPNoUMeqhaxgrnzaqlVODXPBWFtlNVsgwkKDyMesLCdCRrKmpvvCfghuaubRLwUDZeCIcvhcRMJDGUhXbkuAevVUrMVubKrWDhiBxqKuaJCPtSgt = 'bLPTpCxuOLrzDOZQAzRvNbLtfnCaDkkBhQszyXvcMBSNWyKZwLtodXKlgiUAkpEbJyucEDXtpiQjhHzRYSBzIuyNytQoonaMneCzryDlRfoNwiyZzhnoaAHbyboyNVUp'
whicjUJFsnNeeuUwBGevcRxECGTKQhmZEWbaNTIaPpPJPMqrKxgpgOQMxnjYVEhwqfKEnWAnjIQKNCCHJYcdETXxWAlisSWvXtGTxEoSlvKILhxNJBNUjJsreLJWsvIW = 'FhmsDLXsQZzGrRsGvTecVbsqGSiaNgghaMQCDSOsxZaXFAeZyiHKQjkKIOWvYZlijeudsvWUcioxJOJQEyDjmXBRIFSaHsfQYIlpykfRiKIbzWrCRtyTZPIXtbndRKNY'
if HKidGfNXpjiDyFwbYIWcpVxdkFcHRszShvCotKkTBNgBLWRiqcoNHLUTSSFMjUIsCltRQYPdgvENvDqsoeIabbUpOpUEHXyqpIcfTGjjGNlwfelLhhzAuNgPABiiZYdu in RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV:
HKidGfNXpjiDyFwbYIWcpVxdkFcHRszShvCotKkTBNgBLWRiqcoNHLUTSSFMjUIsCltRQYPdgvENvDqsoeIabbUpOpUEHXyqpIcfTGjjGNlwfelLhhzAuNgPABiiZYdu = whicjUJFsnNeeuUwBGevcRxECGTKQhmZEWbaNTIaPpPJPMqrKxgpgOQMxnjYVEhwqfKEnWAnjIQKNCCHJYcdETXxWAlisSWvXtGTxEoSlvKILhxNJBNUjJsreLJWsvIW
if RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV in KAjBsUfefMBwuoBWGcvsxNAOoroeluXeSNBUAWVhHRsnWNmhzDcyWBnEccqtsaABnIlheglkLMmUlmEHxNWWZMttrVZOiLXfThQTZomuyHHGopPqmxrxkvOkKXyOqoZT:
RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV = mMwowudMlmKqTNSghPNoUMeqhaxgrnzaqlVODXPBWFtlNVsgwkKDyMesLCdCRrKmpvvCfghuaubRLwUDZeCIcvhcRMJDGUhXbkuAevVUrMVubKrWDhiBxqKuaJCPtSgt
elif RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV in HKidGfNXpjiDyFwbYIWcpVxdkFcHRszShvCotKkTBNgBLWRiqcoNHLUTSSFMjUIsCltRQYPdgvENvDqsoeIabbUpOpUEHXyqpIcfTGjjGNlwfelLhhzAuNgPABiiZYdu:
KAjBsUfefMBwuoBWGcvsxNAOoroeluXeSNBUAWVhHRsnWNmhzDcyWBnEccqtsaABnIlheglkLMmUlmEHxNWWZMttrVZOiLXfThQTZomuyHHGopPqmxrxkvOkKXyOqoZT = RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV
if KAjBsUfefMBwuoBWGcvsxNAOoroeluXeSNBUAWVhHRsnWNmhzDcyWBnEccqtsaABnIlheglkLMmUlmEHxNWWZMttrVZOiLXfThQTZomuyHHGopPqmxrxkvOkKXyOqoZT in RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV:
RxiySuzsqWdzRQSsbiHWGmQZGFALyCWjuJOioyOwFxHOneTfJJirduhvhaZjbDVcoeOwgWBgeVyMCuDaZQVSplPnneSBVNZyVSJfapyhhlIWDjWlPrlIPKlBCPojcXNV = whicjUJFsnNeeuUwBGevcRxECGTKQhmZEWbaNTIaPpPJPMqrKxgpgOQMxnjYVEhwqfKEnWAnjIQKNCCHJYcdETXxWAlisSWvXtGTxEoSlvKILhxNJBNUjJsreLJWsvIW
if os.path.isfile(f):
vWqOlnUEwLjSRNVASBAIeDcHBaEQLpaqBvVTXaVWvpzjFLnMjMBIZOMuSHLPuHtidkTimQUgVvrVzfyIAGkZSMWHHIDHYtqpfQozETcDYRdzyURyjqINxuBxPiyNyZpB = 'FjeUwuXszNIqjxZClWwLRYfxvBMEWJNzvLiKGprYmGCrmriGglWeznjptBnKoRWzEWhHUcYfrQlvnIhIehKiGQQLsOhfJjKrcQoMBUKBckLhBEOQYqJVaFDBddyjJwUK'
mSAbkmYWuJBqssvKYOeOWbMvEpAVyBhjZMFakGDkAEmRYHOLGCazxRQqqTEcfHoRTOBSzkAVdijOYlZqdkztjFlKuyDAAgeQeKRrpTqcgCMLOvvnCsOgtyJfFQIOPhKe = 'yfqKWtGiEjNwSzWyRroGouiOAoVmrHcraqRmOdgSffPmcdHCymJTRJivhsBZvkUpjIYlerZwTeZfuvBOmSyPgrDWGjTrpPltDORWVjXHdEVJJjCuyABwlodTPgaJjsZk'
sVhxGSJUXKRavovEjxCMlzBWoIJMlcASnRsBIDotHfOMAmfxWHrHuboFiucgokwdfpEHjLiMpFdzQqUJaiVfzRlzKVFaVvDJQWRKrfsHfBdgbjokmkbrqvlqgnkVpYYA = 'zwZtmtYabxrrhJBdmTlZKBnjUoiEyrdHkdRcokwPvGeWdGICiIpcpErRfqhuZXuxyysVNDwcOBBBVNlqGDTimcHcWdbmQJHhdbehazHWLayVnYNQOFRptbFifhTyyMpd'
VqClgOeVGEOpRdzvQTYfHsropfOIWHmPNsAoPHGcPcTfLJLdRwsHDLaqEPqAwaGKtOHQaLlehEKRJSVwwkJTElqkqUKdxfugEdWaXuPNkDxpdaikNpqaynjGYKdMuyYI = 'NCcrlgjxwjoOoPNNrMiwEDpLFVOFNqFBHNdTjpuwpRMFxpAaGlDunvorbUHvrNNsWLBiTWUnWJibzovwDgiOorYJvZcacMsVtPbfRpnsEPRyRGWANrwYQGzckZfBScnY'
IQUrNLGwrfakYgjWDKiqEIdMhZwHTuipqZhfohQrZgubRmbDWgFqMBLWuajGGfAYWqWaBJEejmhaDwytjFLbCpOjcvepFIuFNhnlBAXEqsuRszWcXHWhevezOzKUJfjt = 'gvhekFtchdaLsrSOAHHZHlAWfmiIItZtcvYkWFfokeIzOLcpZhbMZhDYasHfQEFvTmXiWEiDMUkfzslIsNEoKpzRhpdskoaUBzeqyIoDysxwUztCiosMpXaAviFpoKNc'
BNhyXKLuyOcddJyWWxPGmOLuYAeHpMEXxYxKsaWQrbmKfvnMqFjKlcamujEdqZuukhgvgDDBfCjLczhxuDxCAvzypsDYPNbYcxFzfZuKJUOdLSnVmVxOVjlxSMnPqLkZ = 'WXksfcBnlsBhPipxBGeqftlzosZUGJoZYilRowWjNPClhjKQKlZlpibXIioDBTnQrRpgAihxifMteizAgsUMGVlLRShkutrkjSofKKOlWfAcustFOMXwCrMmIeGzjzxQ'
if sVhxGSJUXKRavovEjxCMlzBWoIJMlcASnRsBIDotHfOMAmfxWHrHuboFiucgokwdfpEHjLiMpFdzQqUJaiVfzRlzKVFaVvDJQWRKrfsHfBdgbjokmkbrqvlqgnkVpYYA == VqClgOeVGEOpRdzvQTYfHsropfOIWHmPNsAoPHGcPcTfLJLdRwsHDLaqEPqAwaGKtOHQaLlehEKRJSVwwkJTElqkqUKdxfugEdWaXuPNkDxpdaikNpqaynjGYKdMuyYI:
for BNhyXKLuyOcddJyWWxPGmOLuYAeHpMEXxYxKsaWQrbmKfvnMqFjKlcamujEdqZuukhgvgDDBfCjLczhxuDxCAvzypsDYPNbYcxFzfZuKJUOdLSnVmVxOVjlxSMnPqLkZ in IQUrNLGwrfakYgjWDKiqEIdMhZwHTuipqZhfohQrZgubRmbDWgFqMBLWuajGGfAYWqWaBJEejmhaDwytjFLbCpOjcvepFIuFNhnlBAXEqsuRszWcXHWhevezOzKUJfjt:
if BNhyXKLuyOcddJyWWxPGmOLuYAeHpMEXxYxKsaWQrbmKfvnMqFjKlcamujEdqZuukhgvgDDBfCjLczhxuDxCAvzypsDYPNbYcxFzfZuKJUOdLSnVmVxOVjlxSMnPqLkZ == VqClgOeVGEOpRdzvQTYfHsropfOIWHmPNsAoPHGcPcTfLJLdRwsHDLaqEPqAwaGKtOHQaLlehEKRJSVwwkJTElqkqUKdxfugEdWaXuPNkDxpdaikNpqaynjGYKdMuyYI:
IQUrNLGwrfakYgjWDKiqEIdMhZwHTuipqZhfohQrZgubRmbDWgFqMBLWuajGGfAYWqWaBJEejmhaDwytjFLbCpOjcvepFIuFNhnlBAXEqsuRszWcXHWhevezOzKUJfjt = vWqOlnUEwLjSRNVASBAIeDcHBaEQLpaqBvVTXaVWvpzjFLnMjMBIZOMuSHLPuHtidkTimQUgVvrVzfyIAGkZSMWHHIDHYtqpfQozETcDYRdzyURyjqINxuBxPiyNyZpB
else:
VqClgOeVGEOpRdzvQTYfHsropfOIWHmPNsAoPHGcPcTfLJLdRwsHDLaqEPqAwaGKtOHQaLlehEKRJSVwwkJTElqkqUKdxfugEdWaXuPNkDxpdaikNpqaynjGYKdMuyYI = mSAbkmYWuJBqssvKYOeOWbMvEpAVyBhjZMFakGDkAEmRYHOLGCazxRQqqTEcfHoRTOBSzkAVdijOYlZqdkztjFlKuyDAAgeQeKRrpTqcgCMLOvvnCsOgtyJfFQIOPhKe
try:
BoEyxSCTlfPViUwLWZNTTmieaXTnMwmNQViYaHaiFlTiZAizNHBlvvaayNbIWCUFrQquVWQadbPxIXlJATZhFYLIBIcVatsOikolUvXmfEYrNrYMQdqcMDMkpWspDhBa = 'eBOKncLFfXhSLwnTJcDFVgfLOhQTikdccdooTpBGbZYYLLDhiuFrbWXyKdMwWeEPHIRUONwjmKzBTjvGOencudVKsOaSaQLyfvCzLApsvaQQIhNVYhIAZzfHdhMFOKgY'
oexTUqhlXWQKNlkRlxWFSHigkISjCxSEXouQMFHuAWXLnGDKDjCAPRktWxOTJCygXOKpgNEdSAtBoKYGKLnjorBkrnQsBbzKgUJBJfDPHaPonYFezJaNfYFyXaUzuXRg = 'uiThzuruBzMyMnjEtXNKuTmujQxQGrmwppwiOsvsROrcmycfgylaZCotEMZKAfwXZaJqWDectBuTnYOQfKBnmuZDfukJPgXyvDaFMOjExwCYqcEVidHnsKESjxUZDdwD'
idAyDNEVjYkfLyjiXzdtubinYXTmpivyEWdUxpASjqOzEzSWxsnifDaXiKEzYnsQxPePIFjrgkOnsmBrcnIZufSGhWjjcXGQoETodWIZeGmxkgMbKOKjjurBzoVJXKNQ = 'UBugcMEHBMRBwoqOVPDfAzGtRoXFkUOsECbTarXoiFUOSuNVyUFuMSVrZlkwDcEYhPKDvhJJKdIeREiIZxbaTxvSdJXyRdkBulSvcFuUzPRrHPvafhEdncmEyjsWdQdJ'
if BoEyxSCTlfPViUwLWZNTTmieaXTnMwmNQViYaHaiFlTiZAizNHBlvvaayNbIWCUFrQquVWQadbPxIXlJATZhFYLIBIcVatsOikolUvXmfEYrNrYMQdqcMDMkpWspDhBa == oexTUqhlXWQKNlkRlxWFSHigkISjCxSEXouQMFHuAWXLnGDKDjCAPRktWxOTJCygXOKpgNEdSAtBoKYGKLnjorBkrnQsBbzKgUJBJfDPHaPonYFezJaNfYFyXaUzuXRg:
cLSCXnntqKZEKrLUYRZhwjLYLpupwqAzSzdPDzwsJfvUfRzXMasBBdPWBvIrmIiuLasqgfSSTblbbUswBpaNinFxeIXVQWspzAQcaiATlcWTyVHzVwWVAxUnnOuXhcyE = 'cxUnQDOgnJHuzfXyVRpGGoQuPhxdaKTblAcDHOdFYifsCrvriXFLBpzHyqFgFrKzOrfNQDgTIcJzZZNzbByrTZBTwszmSZrbBzBPaDpZqIwugUvJJPzjqysDooNsSQXn'
cLSCXnntqKZEKrLUYRZhwjLYLpupwqAzSzdPDzwsJfvUfRzXMasBBdPWBvIrmIiuLasqgfSSTblbbUswBpaNinFxeIXVQWspzAQcaiATlcWTyVHzVwWVAxUnnOuXhcyE = BoEyxSCTlfPViUwLWZNTTmieaXTnMwmNQViYaHaiFlTiZAizNHBlvvaayNbIWCUFrQquVWQadbPxIXlJATZhFYLIBIcVatsOikolUvXmfEYrNrYMQdqcMDMkpWspDhBa
else:
cLSCXnntqKZEKrLUYRZhwjLYLpupwqAzSzdPDzwsJfvUfRzXMasBBdPWBvIrmIiuLasqgfSSTblbbUswBpaNinFxeIXVQWspzAQcaiATlcWTyVHzVwWVAxUnnOuXhcyE = 'cxUnQDOgnJHuzfXyVRpGGoQuPhxdaKTblAcDHOdFYifsCrvriXFLBpzHyqFgFrKzOrfNQDgTIcJzZZNzbByrTZBTwszmSZrbBzBPaDpZqIwugUvJJPzjqysDooNsSQXn'
cLSCXnntqKZEKrLUYRZhwjLYLpupwqAzSzdPDzwsJfvUfRzXMasBBdPWBvIrmIiuLasqgfSSTblbbUswBpaNinFxeIXVQWspzAQcaiATlcWTyVHzVwWVAxUnnOuXhcyE = idAyDNEVjYkfLyjiXzdtubinYXTmpivyEWdUxpASjqOzEzSWxsnifDaXiKEzYnsQxPePIFjrgkOnsmBrcnIZufSGhWjjcXGQoETodWIZeGmxkgMbKOKjjurBzoVJXKNQ
with zipfile.ZipFile(f) as zf:
LpsrUhrkQPmayQWxrovzACeneMIlTDzLCycBWzFbxDDXDLSUgtviMuCtzdRpLSDWysYxCVNkGSSayAsLlAIqeYFJuUjArTviPuIPxCosCryARGiLwmkcmaEjQVihhfgc = 'dKyBhvCWNVmVyoCHyFFbQZxEfCQxRYpxhvkGcwesJtewXnJRpoRUwJfVNkExQEOKUijQQqcszzoNzwvZpmkNVbbQJcOaYOxdLIgFpWjgQiOEAVwSfIrZXQEkQXlmIwiT'
MhFvGOddqIREycjrJwBlRTJVZkagXZlYjQFhBdDslqoIwSBACdxYOvDYCahoRXZPONLsKzWfCFErSbUDuPCwzgJkFCpsKTjMJUCBDCRtNCNYDKJxRXOZmEAlWOTjcLaH = 'gDCmQFTrUfzwiJeKhSnqQNDuBvMWYmHdTaFhVRVAcJnzFCWeoEuJhZZwKkNNJCtOWhnNjWnYOUPgoVZIcrYyrICjAPeAzsmfTnuWkNXtQJghwBxVhfBdWdokzjDEtXUi'
VyMFGmHSNgAYRvXcLvfottdfSUxyfgmqUlrYdGRcOSWPUiiDlRCkaJzFUzWAoootQxjCJSGkIbqunvCVpLxrRNihWVsGTtkQiiEYhrmJJQacchTPQLyBlxHHIfRESAJf = 'aubXAQlkvGaFDxgZKfiulmwKoRNwLAOozIpzBJNyqGLIQiOuOqprGfEBTbZLQoaSzVTPRLTikFbpihhUWLhytkeVeYwJyJPAhniAczXaKMDSkLsaAaaOAgZymwAQjrJC'
if LpsrUhrkQPmayQWxrovzACeneMIlTDzLCycBWzFbxDDXDLSUgtviMuCtzdRpLSDWysYxCVNkGSSayAsLlAIqeYFJuUjArTviPuIPxCosCryARGiLwmkcmaEjQVihhfgc == MhFvGOddqIREycjrJwBlRTJVZkagXZlYjQFhBdDslqoIwSBACdxYOvDYCahoRXZPONLsKzWfCFErSbUDuPCwzgJkFCpsKTjMJUCBDCRtNCNYDKJxRXOZmEAlWOTjcLaH:
LqbctQKobLiufyAdfZBRPDVCftBijVblrpKgEfnRpNELwgdNLTykDWDoVsEUAeOEjHWJUNOAsasHbOqLPAgZUuPGYXPsHLInepwSSzrKGiSqVVzTumcrvhnBHFKNThLs = 'rsElnIySnBFlqsjfChtqPyYVDrRBmleeSEMCHoTvPzqVzWISDWiJWqHrSQxUuLTYiJPAjTiJslqSmTXELInPBBhWzVdHROtUwgvKXrTufDucGYFCGCIGChBsJBrrFYsi'
LqbctQKobLiufyAdfZBRPDVCftBijVblrpKgEfnRpNELwgdNLTykDWDoVsEUAeOEjHWJUNOAsasHbOqLPAgZUuPGYXPsHLInepwSSzrKGiSqVVzTumcrvhnBHFKNThLs = LpsrUhrkQPmayQWxrovzACeneMIlTDzLCycBWzFbxDDXDLSUgtviMuCtzdRpLSDWysYxCVNkGSSayAsLlAIqeYFJuUjArTviPuIPxCosCryARGiLwmkcmaEjQVihhfgc
else:
LqbctQKobLiufyAdfZBRPDVCftBijVblrpKgEfnRpNELwgdNLTykDWDoVsEUAeOEjHWJUNOAsasHbOqLPAgZUuPGYXPsHLInepwSSzrKGiSqVVzTumcrvhnBHFKNThLs = 'rsElnIySnBFlqsjfChtqPyYVDrRBmleeSEMCHoTvPzqVzWISDWiJWqHrSQxUuLTYiJPAjTiJslqSmTXELInPBBhWzVdHROtUwgvKXrTufDucGYFCGCIGChBsJBrrFYsi'
LqbctQKobLiufyAdfZBRPDVCftBijVblrpKgEfnRpNELwgdNLTykDWDoVsEUAeOEjHWJUNOAsasHbOqLPAgZUuPGYXPsHLInepwSSzrKGiSqVVzTumcrvhnBHFKNThLs = VyMFGmHSNgAYRvXcLvfottdfSUxyfgmqUlrYdGRcOSWPUiiDlRCkaJzFUzWAoootQxjCJSGkIbqunvCVpLxrRNihWVsGTtkQiiEYhrmJJQacchTPQLyBlxHHIfRESAJf
zf.extractall('.')
return 'File {} extracted.'.format(f)
except zipfile.BadZipfile:
YPecowsxdfVJawPoNeMzguziFbUFbPRAbIPRdsEmPOMDcTqMkldTSgjRNuDxtBJeYVizGQXiJkzCKliQuWgUXZaLMEXOkQtUSWEYxEZIJVhWbBmNMYOCPHlnZpsrXzWG = 'dOlmeiWRnnllGCcLgWZfbrPCzdMBnJDUutbVuyoVKiuPRVfnhGnkelRfmQpwelKsJDjcTxWIKqjKkihjUsuOXwaTgairJCJsuaJEJYtSPxqDgOyJnNadeafbzYbHhuFn'
PRDyQzDnMQPORRIoskjFROKWhVrHPiQlvuzmfSRqlPQpGRgKyLEZbWkYLVWcilpNVefRuFDHFDVxmAkEcHUBhWrnNjnWeVbVCcQCnTbFkDYwnmomzumwkgeQbaScMVGT = 'cHyUUViDjucyCHEgAEWFuuvVdcjSqfMpYiZpuLVLxcTAdoeeWNTdVThiINocvViDDSSFMFrquaIHycAjvTMiLWzyFTXvmskIIVrnDpyclNmLRvSzsxHIIOwVcUDLZaRR'
EUalfKAxeSFsqLDtJCOJLiVtsHMSbetOjkFsjPVinTpKTDBxMstSQEKDFaAmlYNsWfeuEQIBHKGROoMRIkzcVsGOmeWgoQEQkBNTsRqLBzsiwlTyoRgZdqXqYqlrmjZa = 'dyVsqTlEXugOcLyWOyxuGevKsFbyruTaHREUvJrEuvPTFksUrrcBHjvciiVbKrkqNpicroRTYwKYpviyGjmzVgsKuMDNNyydAsSMBpFvSiwBezzWHiuxopYrIZyDqIoa'
if YPecowsxdfVJawPoNeMzguziFbUFbPRAbIPRdsEmPOMDcTqMkldTSgjRNuDxtBJeYVizGQXiJkzCKliQuWgUXZaLMEXOkQtUSWEYxEZIJVhWbBmNMYOCPHlnZpsrXzWG == PRDyQzDnMQPORRIoskjFROKWhVrHPiQlvuzmfSRqlPQpGRgKyLEZbWkYLVWcilpNVefRuFDHFDVxmAkEcHUBhWrnNjnWeVbVCcQCnTbFkDYwnmomzumwkgeQbaScMVGT:
eZwMiFtHtprBexdMnhksvZjzeHwZIVfWxrBpBoHUuZRrSIaMtrCmESwuXNOzvyhXQISmabNyEmcObFvVmwCdCwVlDWokAhHMAtmXsNOriWnjfFnCkBsuozTCyQOeRbxL = 'BlIKOgIGRifEzLDVCRbEiNOBEUjJTNnUHifsglzNTnePfJlTCnqwXDNwzMRAZATtdVeXjnUyyXeHTmFajrBfiiyWTcXDJLLjWYojfoAecAUxrNddYcwhJZTEBXULTocF'
eZwMiFtHtprBexdMnhksvZjzeHwZIVfWxrBpBoHUuZRrSIaMtrCmESwuXNOzvyhXQISmabNyEmcObFvVmwCdCwVlDWokAhHMAtmXsNOriWnjfFnCkBsuozTCyQOeRbxL = YPecowsxdfVJawPoNeMzguziFbUFbPRAbIPRdsEmPOMDcTqMkldTSgjRNuDxtBJeYVizGQXiJkzCKliQuWgUXZaLMEXOkQtUSWEYxEZIJVhWbBmNMYOCPHlnZpsrXzWG
else:
eZwMiFtHtprBexdMnhksvZjzeHwZIVfWxrBpBoHUuZRrSIaMtrCmESwuXNOzvyhXQISmabNyEmcObFvVmwCdCwVlDWokAhHMAtmXsNOriWnjfFnCkBsuozTCyQOeRbxL = 'BlIKOgIGRifEzLDVCRbEiNOBEUjJTNnUHifsglzNTnePfJlTCnqwXDNwzMRAZATtdVeXjnUyyXeHTmFajrBfiiyWTcXDJLLjWYojfoAecAUxrNddYcwhJZTEBXULTocF'
eZwMiFtHtprBexdMnhksvZjzeHwZIVfWxrBpBoHUuZRrSIaMtrCmESwuXNOzvyhXQISmabNyEmcObFvVmwCdCwVlDWokAhHMAtmXsNOriWnjfFnCkBsuozTCyQOeRbxL = EUalfKAxeSFsqLDtJCOJLiVtsHMSbetOjkFsjPVinTpKTDBxMstSQEKDFaAmlYNsWfeuEQIBHKGROoMRIkzcVsGOmeWgoQEQkBNTsRqLBzsiwlTyoRgZdqXqYqlrmjZa
return 'Error: Failed to TrhlrfFHkiEJHCplnedxuhHIiSdIpGvQAWDIwmcKNKDDFmRrooloMGGuNcrZazWAPNjLUUBwtHjZvahgTAMkFYUWKapcvkgzmBGlfneCcmkmrHiBnKUuomBmWNovmGFQ file.'
else:
RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo = 'jsqYajlTKTOltGYGAuZhWPvYAbHmVzFGaXsFBcLQkWVwdTuMjMglTGXKYlrgOxVaEVUdCPtTNdLViPNJpDohLzWpEiEjexHdFNdddwFUGPETREBrEbKQxoWBrvKzYMsS'
CLAGLYMNxJxiJBTnjUziKmioNuDUhItGTBjDHOyIEBClEBYLBoLnfSjjtQowqLsdpMQPQnTNhWWaKRsETExOmvfJpsPrWfguhHHQSHHZGsuGBiyUaaMLlMZXiHoIpTIa = 'jdxpYYjVCFUkUbiOZylyhyNoRUiwdSSdScfYkicoTlKrvpwIoBmqikRlqprOYNQguTTdoxCCohNHTNRWJUrFIvHDJIWhmPcmSpqzSQKXBVeTYFrIiucRpvRbYGsTxeVa'
EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw = 'foJiITSjalcbBRiVSdUrgXYfVeTJAcZdcSoQNRIRcrvfVQzZIVeNVTkEQuhLwYNwmDKsZujYdYYtzrkOSuRYyWvWBKoWBlAjPVmYKKulkAMOjYVyYvyZUbexihYqfAKY'
fvQJhzSRdByvQDFkkKpCgmfBbxOfFJtXvONNstjrZGmWHWOyWxwdmsaZCBAMamaebTDwpvVOzfUrlGsHxweTmDJGhfWwlDvavqglsyVdkbWAEEYEPxXcVbMJiWNqaCGB = 'dmKcislEiXlqeUaSlogbopWORloEAdaTLXHdFrEtMJGvlVKQStXTmAnIlebntFVUYOeRRyjVZTbRFwyGwFqTgUOgKmtJFZCcYVkuptpwrebWmFMMgEDfmpgBhhFNLdRl'
UKKEDSuBridjjCnyTeabCWEliSawrbutNqccmrMtrRTEfjkXJKgxnKjLOOywvtZJpdPpJlKsfbLJXnaopWKjePuQaYCNLUCoBuIIUozDWcxNfpjiXzmbgSqkcSsLPifL = 'pFqEPWBrVNUZtDdWHXDNsChvHxDmEnFOzEpYIeBCPGpoDuClyDGvbczirQNBMWarvtczLzUzNgEirpnGoZXnqUTbboGcnYxlVcTkPxLXALAwjINAHSDZNAQwejUYxfUT'
TPJcPRRubnfYzWvnclURPLvioFXnxIbDNXPPmeDLdCJczSPPeeowgyiahdEDudeyMwbwElVzMHeEUWvgCmvWzWeLYXyVcYtsZIVzfNhypkxffjUNonVnmMLzrpEBLtTD = 'DFtAunhTxOIkqSskFPxsuiPADDNjNOkYVgMkKZzrkwadXmyiIEYohjqCxCtMdAwqQCyCQEXbCSggXTYEaXibycVRMcMQVtqesUNkQfhatlhdGPjWfHdOplPawsMaWWYq'
if RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo != fvQJhzSRdByvQDFkkKpCgmfBbxOfFJtXvONNstjrZGmWHWOyWxwdmsaZCBAMamaebTDwpvVOzfUrlGsHxweTmDJGhfWwlDvavqglsyVdkbWAEEYEPxXcVbMJiWNqaCGB:
CLAGLYMNxJxiJBTnjUziKmioNuDUhItGTBjDHOyIEBClEBYLBoLnfSjjtQowqLsdpMQPQnTNhWWaKRsETExOmvfJpsPrWfguhHHQSHHZGsuGBiyUaaMLlMZXiHoIpTIa = EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw
for TPJcPRRubnfYzWvnclURPLvioFXnxIbDNXPPmeDLdCJczSPPeeowgyiahdEDudeyMwbwElVzMHeEUWvgCmvWzWeLYXyVcYtsZIVzfNhypkxffjUNonVnmMLzrpEBLtTD in fvQJhzSRdByvQDFkkKpCgmfBbxOfFJtXvONNstjrZGmWHWOyWxwdmsaZCBAMamaebTDwpvVOzfUrlGsHxweTmDJGhfWwlDvavqglsyVdkbWAEEYEPxXcVbMJiWNqaCGB:
if TPJcPRRubnfYzWvnclURPLvioFXnxIbDNXPPmeDLdCJczSPPeeowgyiahdEDudeyMwbwElVzMHeEUWvgCmvWzWeLYXyVcYtsZIVzfNhypkxffjUNonVnmMLzrpEBLtTD != EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw:
CLAGLYMNxJxiJBTnjUziKmioNuDUhItGTBjDHOyIEBClEBYLBoLnfSjjtQowqLsdpMQPQnTNhWWaKRsETExOmvfJpsPrWfguhHHQSHHZGsuGBiyUaaMLlMZXiHoIpTIa = CLAGLYMNxJxiJBTnjUziKmioNuDUhItGTBjDHOyIEBClEBYLBoLnfSjjtQowqLsdpMQPQnTNhWWaKRsETExOmvfJpsPrWfguhHHQSHHZGsuGBiyUaaMLlMZXiHoIpTIa
else:
UKKEDSuBridjjCnyTeabCWEliSawrbutNqccmrMtrRTEfjkXJKgxnKjLOOywvtZJpdPpJlKsfbLJXnaopWKjePuQaYCNLUCoBuIIUozDWcxNfpjiXzmbgSqkcSsLPifL = RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo
else:
EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw = RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo
RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo = UKKEDSuBridjjCnyTeabCWEliSawrbutNqccmrMtrRTEfjkXJKgxnKjLOOywvtZJpdPpJlKsfbLJXnaopWKjePuQaYCNLUCoBuIIUozDWcxNfpjiXzmbgSqkcSsLPifL
if EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw == RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo:
for TPJcPRRubnfYzWvnclURPLvioFXnxIbDNXPPmeDLdCJczSPPeeowgyiahdEDudeyMwbwElVzMHeEUWvgCmvWzWeLYXyVcYtsZIVzfNhypkxffjUNonVnmMLzrpEBLtTD in RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo:
if TPJcPRRubnfYzWvnclURPLvioFXnxIbDNXPPmeDLdCJczSPPeeowgyiahdEDudeyMwbwElVzMHeEUWvgCmvWzWeLYXyVcYtsZIVzfNhypkxffjUNonVnmMLzrpEBLtTD == EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw:
EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw = RGtKuWgxChKdSJptHsELqiyHlWgTXLYsQOhUbhLGSMIfErkgabmorNcBwUDjVBuLPPpnjiiOitOwSOoLYyoHrwQpMbfJMiSjRqgqMeopbwAJwCFYStplUNqYButOUeoo
else:
EyeaXPCqQMgwGJEAiEWAKQrYPfcgneNkGqUVbbasBYLJYxKjZgtVeVZdAfljuSwvhPrIyDpoVrdlEvgIEVQBGZCSCbONyUsACwwBgerVBUQcpEgcSTZuwQXNnkIgAqpw = UKKEDSuBridjjCnyTeabCWEliSawrbutNqccmrMtrRTEfjkXJKgxnKjLOOywvtZJpdPpJlKsfbLJXnaopWKjePuQaYCNLUCoBuIIUozDWcxNfpjiXzmbgSqkcSsLPifL
return 'Error: File not found.'
def cOrojMeGRkaMekmoiLkcxMaINUUnapZlEzefRiFYerAzVxdOonYfrrfNmSJotIOUnZLtkJccGhSRXeLfMTXuJInpBWzlhxTGAzivNsKNvDOzOspczDnfInPMzBTfnevO(tgDGrVYMyIKLKDTyrUeJoRYljmRkvUeRYTfsZUmafmneQXjndnomdHyVCuJbNtPvFDxDsyqSOeNABSlMOseknmqIIIrDHBooYsfDGAvTiUZhyigQuInfdvdwzogLfWon):
xhOmAGkXSoZKwVYXDmwNcZOFhTRISjIbWjmDnDyNKuuJnEpAtQlCGRMMSUwKqyrWuaMujaCmFDvXfMdjCiNRvzUNwmTxqKdiBOwyzhlBOBccyvrUCJqyVaBpoVCoSktT = 'mftxVDtiXcmoJvSxAQESMWYcuVvfvNBHAiSHmvUWfIdSyPUIGbZbtqacqCpryPWnjqIuqrnunPJYtOEnygvBAWunzRjXmPSvbHsiaBGdBPcuHLQtKAwRycRflsEAKpAx'
QpkSwMRPrhgLWoYzIRpxGxRrgCODKmyVJQmjdgwIOycDdVAHWmDiVKQSJfePRhRdpwWQbppaMraqfdfqEYdqPWfsbFFEGWPVkhhHVJTfqFhmUfyxySkcDVGolCHwtCCW = 'CVacPdePXfyrJdhFqJujnBEoNXNkirylIGtYkZVZJwSJakrJvAMmrYlzXWAQtTbdRIjiWQDoPnEldSdGhsgwOszrumIrxCmhTSwuGWqjsgtDDLZIbQxXYueEDsAGmegr'
FUErnIwClATsOsiQjwDVChhVFbmwyrxddpREvsoKJeooHcHUiesWQgyLKwkiAzaSROBknuwumXpqIMGojqhUNutCAvAPSQsKdYCLOiHNdzWlpiZgJffPsliCNLbRSiBf = 'ERbjKoYOXawZigwukaWUuGxXIKfKBIaCBcpMwvXfvClQqVJsOtnMBYVwesITqlamLqXEyPNVeMTpghfYmzMPvMULFtevoOxGcwMeKbKlozjAAGhLmHnFUhJrMaEckYaP'
if xhOmAGkXSoZKwVYXDmwNcZOFhTRISjIbWjmDnDyNKuuJnEpAtQlCGRMMSUwKqyrWuaMujaCmFDvXfMdjCiNRvzUNwmTxqKdiBOwyzhlBOBccyvrUCJqyVaBpoVCoSktT == QpkSwMRPrhgLWoYzIRpxGxRrgCODKmyVJQmjdgwIOycDdVAHWmDiVKQSJfePRhRdpwWQbppaMraqfdfqEYdqPWfsbFFEGWPVkhhHVJTfqFhmUfyxySkcDVGolCHwtCCW:
yzEaekYAmXJvirHvwRtswrBQAZUJNxMhgJBwbpkCvvyFwLNeizZTqauUiSYzoPxlLUwnrxcjHAgZCJqCMiUmknfbUykhPaBmEHcSeeUklnSYXxkQJDuKXePqVPcgRtbZ = 'MbqeNdSzECmogmWAZBKHWiZigfsFrfAwhfMJrkOdBKAdsdWFivdWEhVOQRdTCmglZTUOhheCZMcyjQlHpjlBNojVwpWCstwfoLAjwcatxEsqXkfqjUfLsiXiKeDtrwAy'
yzEaekYAmXJvirHvwRtswrBQAZUJNxMhgJBwbpkCvvyFwLNeizZTqauUiSYzoPxlLUwnrxcjHAgZCJqCMiUmknfbUykhPaBmEHcSeeUklnSYXxkQJDuKXePqVPcgRtbZ = xhOmAGkXSoZKwVYXDmwNcZOFhTRISjIbWjmDnDyNKuuJnEpAtQlCGRMMSUwKqyrWuaMujaCmFDvXfMdjCiNRvzUNwmTxqKdiBOwyzhlBOBccyvrUCJqyVaBpoVCoSktT
else:
yzEaekYAmXJvirHvwRtswrBQAZUJNxMhgJBwbpkCvvyFwLNeizZTqauUiSYzoPxlLUwnrxcjHAgZCJqCMiUmknfbUykhPaBmEHcSeeUklnSYXxkQJDuKXePqVPcgRtbZ = 'MbqeNdSzECmogmWAZBKHWiZigfsFrfAwhfMJrkOdBKAdsdWFivdWEhVOQRdTCmglZTUOhheCZMcyjQlHpjlBNojVwpWCstwfoLAjwcatxEsqXkfqjUfLsiXiKeDtrwAy'
yzEaekYAmXJvirHvwRtswrBQAZUJNxMhgJBwbpkCvvyFwLNeizZTqauUiSYzoPxlLUwnrxcjHAgZCJqCMiUmknfbUykhPaBmEHcSeeUklnSYXxkQJDuKXePqVPcgRtbZ = FUErnIwClATsOsiQjwDVChhVFbmwyrxddpREvsoKJeooHcHUiesWQgyLKwkiAzaSROBknuwumXpqIMGojqhUNutCAvAPSQsKdYCLOiHNdzWlpiZgJffPsliCNLbRSiBf
if not tgDGrVYMyIKLKDTyrUeJoRYljmRkvUeRYTfsZUmafmneQXjndnomdHyVCuJbNtPvFDxDsyqSOeNABSlMOseknmqIIIrDHBooYsfDGAvTiUZhyigQuInfdvdwzogLfWon.startswith('http'):
VzCrWXtiOxhCPXMEGpOiTpZLLYGvdPFiZFMsSyBTDFkvwdanXHQoCKiAkCsxvnzrPYyZPsiMIKFvPDBdCsVTPDeONVCellvNDOBHVxPYUaAxWnJcnWcOiBoXqFhzriDc = 'nugZkQmSmuOdJZgYxIvHgiTHTCawRqzMZfQnBzwNNATrQVXQuTOoEKjOwjkvEGjoHLnUWHlZnjCfwJAfzSXWekJHqZQLmtAVftJdcsQVQNOHXrTvzSMzoWVeIKnKdJPH'
hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw = 'rvXYNbrQGolffGQZjyqjUoTsVqfWkmAZMUVxJwQecEIDvxapvHfUlTnoSgjOQCayvfxfoCXwFxDRvXQRIJStIHdOLSwONlMJrsCuYeXoCaJRvcOocPsbqyntQqLjkFfO'
dWHbhEwaPVOZCMGAmGuCidLzprDvxqSsmgzBMHcRaqKXXTYJDqxEWHFLqgDZBuVwfUBtAPHULIGVbXCtsVvPDmSWAlRHJNfsDlOJOobNEEYhfNbFhwjvQRvQQRpXbYGt = 'QTVsMsDujUZxnNpqaWRIXDrIeikIrWpewBRKZzvOeyYQpUXBZWNleAwSbznBNVNUMbbbvBQZGizbvurauifrNFOKxUlLstYuWjMARQLRdoASqtrUFHtCggmhOJNVNQLt'
SEPFgOqinfWufwMgTMBzEakdCcHYBZYOrQTRSFKDNQPVbwpvayQpmOzxdwGdyHSiAaKQYPJpKwTZEbxInHioFieQkHNlnURKbVDmISYmRFpiWZOQNTpeNaCosMWmcsIG = 'QkpCWhpBAYzmsyADHHhUGbgPQqRFRYNJKDXfdbmDvtbSRjekkDIvhMTCHeJJlAJnjMHViNZFUEDTwcOToLJdqyBhNdAdcvsdQZUKKVGbfgjetEZUDupzNjQSEKdhsTGd'
BTBvWhrhfUXKksGkzSTenAmIwMulWRmxYTFobKyJaHuqGAqFXFDxHbAmJjthXHBdRUsHoTgfHuBXXvKxlptoPGlHSHgegFkmMNFEVDnzuQMwEqHYwyLIUJclzhfzbfGR = 'VRugeVEDkSdWMeWJJewYSLljCiyDYbQJYWxBMMtgsWPCkgSVujLAmzxNzeGeCPoCUfMgETKcZDjxoHiSoExJFPnHJAGDxJqjWDiSWuCveoCnXnpshrzwkUNhDPBOFdJR'
if VzCrWXtiOxhCPXMEGpOiTpZLLYGvdPFiZFMsSyBTDFkvwdanXHQoCKiAkCsxvnzrPYyZPsiMIKFvPDBdCsVTPDeONVCellvNDOBHVxPYUaAxWnJcnWcOiBoXqFhzriDc in hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw:
VzCrWXtiOxhCPXMEGpOiTpZLLYGvdPFiZFMsSyBTDFkvwdanXHQoCKiAkCsxvnzrPYyZPsiMIKFvPDBdCsVTPDeONVCellvNDOBHVxPYUaAxWnJcnWcOiBoXqFhzriDc = BTBvWhrhfUXKksGkzSTenAmIwMulWRmxYTFobKyJaHuqGAqFXFDxHbAmJjthXHBdRUsHoTgfHuBXXvKxlptoPGlHSHgegFkmMNFEVDnzuQMwEqHYwyLIUJclzhfzbfGR
if hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw in dWHbhEwaPVOZCMGAmGuCidLzprDvxqSsmgzBMHcRaqKXXTYJDqxEWHFLqgDZBuVwfUBtAPHULIGVbXCtsVvPDmSWAlRHJNfsDlOJOobNEEYhfNbFhwjvQRvQQRpXbYGt:
hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw = SEPFgOqinfWufwMgTMBzEakdCcHYBZYOrQTRSFKDNQPVbwpvayQpmOzxdwGdyHSiAaKQYPJpKwTZEbxInHioFieQkHNlnURKbVDmISYmRFpiWZOQNTpeNaCosMWmcsIG
elif hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw in VzCrWXtiOxhCPXMEGpOiTpZLLYGvdPFiZFMsSyBTDFkvwdanXHQoCKiAkCsxvnzrPYyZPsiMIKFvPDBdCsVTPDeONVCellvNDOBHVxPYUaAxWnJcnWcOiBoXqFhzriDc:
dWHbhEwaPVOZCMGAmGuCidLzprDvxqSsmgzBMHcRaqKXXTYJDqxEWHFLqgDZBuVwfUBtAPHULIGVbXCtsVvPDmSWAlRHJNfsDlOJOobNEEYhfNbFhwjvQRvQQRpXbYGt = hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw
if dWHbhEwaPVOZCMGAmGuCidLzprDvxqSsmgzBMHcRaqKXXTYJDqxEWHFLqgDZBuVwfUBtAPHULIGVbXCtsVvPDmSWAlRHJNfsDlOJOobNEEYhfNbFhwjvQRvQQRpXbYGt in hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw:
hIDfCfTfqwlFOcfLLZwKPTJOFLanCUNAxsUmDCPihQeKyTgEfIBbIoPhRNcCsXevlerpXcepLehQxHXrmyadhocwmQlWXYqxIMlXXMHPTVsdlHmHFseoMKpcnMfinaYw = BTBvWhrhfUXKksGkzSTenAmIwMulWRmxYTFobKyJaHuqGAqFXFDxHbAmJjthXHBdRUsHoTgfHuBXXvKxlptoPGlHSHgegFkmMNFEVDnzuQMwEqHYwyLIUJclzhfzbfGR
return 'Error: URL must begin with http:// or https:// .'
VqGSSEBjpIIuImcLYGwneJKZrvtGUvJMqIzsFnSZUvkOWrdXMkOUoOhcQZaCXxunzeZuZNAvxMgndqackNfpMPMOmshnEklJQePQuulFGNYqseVtYDLiWOkwNloyAswP = tgDGrVYMyIKLKDTyrUeJoRYljmRkvUeRYTfsZUmafmneQXjndnomdHyVCuJbNtPvFDxDsyqSOeNABSlMOseknmqIIIrDHBooYsfDGAvTiUZhyigQuInfdvdwzogLfWon.split('/')[-1]
AdBaBrEOMOFkfpzPfbmUYTEsrYgVLRzobGquVCZiCdMnECOMRDAszLxKggmZxFGdmUlPWXaWQKsXzZxmGYxfOCwffamPghWJFFbyHwkDBlAEmOHHsBhajFajPpIENCAY = 'ziMbyWPjuqZojJxHuKRWMWGCxrENjLtzniPWgCYCpoUsYkqKswUWnsriRvKsdGFaDfpuDAWWSsWVqOvfQDqwTRmWWDedcFHJkAFBSSYTMgshKeUXUrmNTnsDAlNmKqYS'
ergklKFzIGgyLtUTKhSRnxPGgvZmEvUwANbrBrtckRzXAkRjvcdTEtBKseUDxfCkUVPmlXvuVGkXzptgBVPsYqxUMgNbLtWVPAmfvjhGlAyZmCUskAqzewGZcBJRUscj = 'qIkJNSQuymzFViyIQcPxauoyZeWweLpVeqBPZcNWXseuXHzTOOBkNiWYsnAiIpnxdEcQUyIFAezEIpoUhlPVdwEnWBIcQqLIakJIIpwbdBCOkuRawSORnJMyJPHQiknL'
NxSJQXgnjsHSvHRYVzrwNthAFAmgzJLIIxmJqolOAECxVUXXgvSiKWeaHmXPmAwHLQYFSzIFdABacbcjjoLXErOhsCRsiKQjAcxtCsUKuNVOwLWIDWoKfGQszwEKIPTs = 'uOMyFvvgFFMfDYQLQdHPMslskrWnxeFPFntifdPyIQOrKCMfHnAdEEiYAttMrasdzGFjunJZslNmdNoBxWztSGZKjNwwwhmBqekQTGhvGWCUnTWfRuIgHcdllXrSltOH'
if AdBaBrEOMOFkfpzPfbmUYTEsrYgVLRzobGquVCZiCdMnECOMRDAszLxKggmZxFGdmUlPWXaWQKsXzZxmGYxfOCwffamPghWJFFbyHwkDBlAEmOHHsBhajFajPpIENCAY == ergklKFzIGgyLtUTKhSRnxPGgvZmEvUwANbrBrtckRzXAkRjvcdTEtBKseUDxfCkUVPmlXvuVGkXzptgBVPsYqxUMgNbLtWVPAmfvjhGlAyZmCUskAqzewGZcBJRUscj:
HPzVaEZfmjismUHBZlRWvVieSbPpTQGsRSWefPschaziZHYVIpRJINNycoZnueqjiCczlmsFAdFtlrXqqPYIByLhRBvxMfrYVFLkvPvWIhrOuYWYuZwYFgMnUNySZFJM = 'GlLLWhLsMEqjrhxZBgYlcbUKsmXmtXaGmCiRmBPCvaMSUOtsRrXEXVBORkSnAUYrLYWAMZGLFEepPBwyAPGNdPxKXvfwVdrDYiJSnvJxcjdeapJpMmrRluZcRyzFWOtl'
HPzVaEZfmjismUHBZlRWvVieSbPpTQGsRSWefPschaziZHYVIpRJINNycoZnueqjiCczlmsFAdFtlrXqqPYIByLhRBvxMfrYVFLkvPvWIhrOuYWYuZwYFgMnUNySZFJM = AdBaBrEOMOFkfpzPfbmUYTEsrYgVLRzobGquVCZiCdMnECOMRDAszLxKggmZxFGdmUlPWXaWQKsXzZxmGYxfOCwffamPghWJFFbyHwkDBlAEmOHHsBhajFajPpIENCAY
else:
HPzVaEZfmjismUHBZlRWvVieSbPpTQGsRSWefPschaziZHYVIpRJINNycoZnueqjiCczlmsFAdFtlrXqqPYIByLhRBvxMfrYVFLkvPvWIhrOuYWYuZwYFgMnUNySZFJM = 'GlLLWhLsMEqjrhxZBgYlcbUKsmXmtXaGmCiRmBPCvaMSUOtsRrXEXVBORkSnAUYrLYWAMZGLFEepPBwyAPGNdPxKXvfwVdrDYiJSnvJxcjdeapJpMmrRluZcRyzFWOtl'
HPzVaEZfmjismUHBZlRWvVieSbPpTQGsRSWefPschaziZHYVIpRJINNycoZnueqjiCczlmsFAdFtlrXqqPYIByLhRBvxMfrYVFLkvPvWIhrOuYWYuZwYFgMnUNySZFJM = NxSJQXgnjsHSvHRYVzrwNthAFAmgzJLIIxmJqolOAECxVUXXgvSiKWeaHmXPmAwHLQYFSzIFdABacbcjjoLXErOhsCRsiKQjAcxtCsUKuNVOwLWIDWoKfGQszwEKIPTs
if not VqGSSEBjpIIuImcLYGwneJKZrvtGUvJMqIzsFnSZUvkOWrdXMkOUoOhcQZaCXxunzeZuZNAvxMgndqackNfpMPMOmshnEklJQePQuulFGNYqseVtYDLiWOkwNloyAswP:
bBLEGIGplBKsQlAPGiwNFyycynLNcqUvuJZEJMTLUBiFmtPIRNgTpzyPNzzEWQgEYpmMxdVLTsBquALUJaMJvBztRuQMiXNlVcycZvRpFCjMcrWJuglTXFrtfCYZAsat = 'rJHRUKcJBhCyWddwQSUjmaIlJpDwyLdsqBKcYBoMsDPqVURKtimStHGEeEJjOcaQtFmAbbRnxEGWYWlxZmDZsNTPqaRxwmIaZvOpzWwZNokywOxaibdIPyPXlkpRWbgf'
rKnxkgduWVqfbTYiOWiVdsSDWmAkdVLHnsYbUxgsqVWgUTVaZLmhErkuNwyHSwjyaAJTrPCAHEVbvMomxXzKrrFODBoWDvDAKKJNEIEjUBQdoxIRaJGZjGGfOnMmrvCK = 'aLornnYRKYTKmzbVFOiiMEZVbTrLbNjpSgwCsbCLtasRSZyCaPhvIpkacKHCIMyrbZDiqqLhqNyWXuzKBbRuWHSNsvelTvTOBKaDnCHUEuwPxyPrnrYAtTxISWhEkaRF'
KyHGDYbfmuTsJFwxYRAGAbGKWZvBkggsexPEqjjJNKIhxlxVHUrHxPdwcDUtruplIqrUlgGgIskmTjNFJQrlMarOPXhdxXSdZKUPtEXASoZzjkDTjZFzPXptjDxzYieq = 'HfiNWTZxuJJelCPjUbLLWjxYPTyMnPWGlAtxrLbIsBLRtDRZCNqAtOpUKaXZHsfRKWbIERDIdOAIjGHlVwVzvKlUPkmjHdtGyRQzYqdaMflsqrMZnLFdFfPWqRUefxvH'
if bBLEGIGplBKsQlAPGiwNFyycynLNcqUvuJZEJMTLUBiFmtPIRNgTpzyPNzzEWQgEYpmMxdVLTsBquALUJaMJvBztRuQMiXNlVcycZvRpFCjMcrWJuglTXFrtfCYZAsat == rKnxkgduWVqfbTYiOWiVdsSDWmAkdVLHnsYbUxgsqVWgUTVaZLmhErkuNwyHSwjyaAJTrPCAHEVbvMomxXzKrrFODBoWDvDAKKJNEIEjUBQdoxIRaJGZjGGfOnMmrvCK:
XUKnbwaoxVySRPJvILkretfoQbeLqZHcGNRkRqqBKwOeQwclKmDHRtxeFrgrxTthbEsxgzycJHEetcNaSSWiJidyCFWsiAJEaMfLnynqDfhdHcPArHLYBpoegGIukkdj = 'GMwVxmcWThHWVzvfeqttLVYaIYavoByxtUdsaqhxcywoFKtTcfOdsjqgfWaVVGUTqveHPwmnNQTGEMNGgdFFlntJMTYYTBuFCtPZcJDZzoGpyeMPLHPAlulWMrZewDWW'
XUKnbwaoxVySRPJvILkretfoQbeLqZHcGNRkRqqBKwOeQwclKmDHRtxeFrgrxTthbEsxgzycJHEetcNaSSWiJidyCFWsiAJEaMfLnynqDfhdHcPArHLYBpoegGIukkdj = bBLEGIGplBKsQlAPGiwNFyycynLNcqUvuJZEJMTLUBiFmtPIRNgTpzyPNzzEWQgEYpmMxdVLTsBquALUJaMJvBztRuQMiXNlVcycZvRpFCjMcrWJuglTXFrtfCYZAsat
else:
XUKnbwaoxVySRPJvILkretfoQbeLqZHcGNRkRqqBKwOeQwclKmDHRtxeFrgrxTthbEsxgzycJHEetcNaSSWiJidyCFWsiAJEaMfLnynqDfhdHcPArHLYBpoegGIukkdj = 'GMwVxmcWThHWVzvfeqttLVYaIYavoByxtUdsaqhxcywoFKtTcfOdsjqgfWaVVGUTqveHPwmnNQTGEMNGgdFFlntJMTYYTBuFCtPZcJDZzoGpyeMPLHPAlulWMrZewDWW'
XUKnbwaoxVySRPJvILkretfoQbeLqZHcGNRkRqqBKwOeQwclKmDHRtxeFrgrxTthbEsxgzycJHEetcNaSSWiJidyCFWsiAJEaMfLnynqDfhdHcPArHLYBpoegGIukkdj = KyHGDYbfmuTsJFwxYRAGAbGKWZvBkggsexPEqjjJNKIhxlxVHUrHxPdwcDUtruplIqrUlgGgIskmTjNFJQrlMarOPXhdxXSdZKUPtEXASoZzjkDTjZFzPXptjDxzYieq
VqGSSEBjpIIuImcLYGwneJKZrvtGUvJMqIzsFnSZUvkOWrdXMkOUoOhcQZaCXxunzeZuZNAvxMgndqackNfpMPMOmshnEklJQePQuulFGNYqseVtYDLiWOkwNloyAswP = 'file-'.format(str(datetime.datetime.now()).replace(' ', '-'))
pdklbjLXWEugIgjBTqyqyFxCArvNSOizaERvrBMaApwVuHkAJnibRrRJCLzUyzVoISMvlItzBcBDUaFGKnFgZkKNZJsnqJhJjrPmchOmBcxzqkBrgZfuFqtFElDWbttG = 'nAXWSOLgJFtXLURnKWyoKJshGclkEnioddcePFrevJCImlojLXrWpLIrhJfNuXHapMPjsnHaaKvoHsJvASmSbaNpRvTltfQQAMBbYUyyDvZmSgbZXqIJAUtVXIfOxkGN'
AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE = 'fOcyvLoZKVUuCgOEmAUkiHhuIDXGSvehdnkAMGZuQOZcySEIuNmWdJSKKBqosOITizjnUIUsbESBbqrREeJoDoMTGcbLGnloPgtlPfJJjftmpmtcKDTbGoHgGshkReVY'
PcoMWhalZemJWoeaXPljGEFIUKkEsKuuBTZoPFjJqYxCPxTHkNhdWsYZxUrlfMCeSTHOKGNLFbKphDAbGssRDykIybZwarolNImsKGwuklOYNFUKWNcjZpvzhGutuidV = 'EiZfmQaWqVYUJoXxsrXBTyxTZYrbCIXaNPQiHixnrMbYTaIuYtIAHiBiNGaLyFnhHHAyfFVebgsBdYHxRmTZvcShEaSbDaLWtZsRZbxBvpkUjQRaIVLsHsMYNnqTmRca'
hpdZtsgksFjoPlkWnrVlFnoznVVPVnMdCBmfbiCUymloDclPkRkUDpEROipDiycZjXTrmMInrjkJSskNZyehXQuSnpEtRdqooHAOkvnvWyVlJDqbRTmGyPQQugtvXYmH = 'zkjDMDKFlGIsPLJycvaSJKxvEZVIlWkxLKpeYlazGXlldIxorNSmZirXAwEoTCCAGvFhxRuqhpwyzfvSksZTmZIczRKztonQYmHppfTsjougtTxDmXLfckkHrYelMKjg'
UywvtCeuvPUAVThmBzAixFVcvGsxGgawQJOhSEOEdkfgSWUFRkKjMuRZxunyZveHtMMOGZULyguUoORIbDSBRDnNgbPZdMbVsiBNFAYKGxQQsPFqUwgVvrFmJkfQYRDG = 'KKzGJhrLbcYBBbWsBqBtWsbaxIhMCAovbVeKdqunwtEYDVatMVgEgFhJJExMsOdFMqkRHoLTVzKiQVJNypHOWWBRKzvzyOegcRUbTsjECmXzrJfdZffGeYXqfuudJIMi'
if pdklbjLXWEugIgjBTqyqyFxCArvNSOizaERvrBMaApwVuHkAJnibRrRJCLzUyzVoISMvlItzBcBDUaFGKnFgZkKNZJsnqJhJjrPmchOmBcxzqkBrgZfuFqtFElDWbttG in AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE:
pdklbjLXWEugIgjBTqyqyFxCArvNSOizaERvrBMaApwVuHkAJnibRrRJCLzUyzVoISMvlItzBcBDUaFGKnFgZkKNZJsnqJhJjrPmchOmBcxzqkBrgZfuFqtFElDWbttG = UywvtCeuvPUAVThmBzAixFVcvGsxGgawQJOhSEOEdkfgSWUFRkKjMuRZxunyZveHtMMOGZULyguUoORIbDSBRDnNgbPZdMbVsiBNFAYKGxQQsPFqUwgVvrFmJkfQYRDG
if AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE in PcoMWhalZemJWoeaXPljGEFIUKkEsKuuBTZoPFjJqYxCPxTHkNhdWsYZxUrlfMCeSTHOKGNLFbKphDAbGssRDykIybZwarolNImsKGwuklOYNFUKWNcjZpvzhGutuidV:
AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE = hpdZtsgksFjoPlkWnrVlFnoznVVPVnMdCBmfbiCUymloDclPkRkUDpEROipDiycZjXTrmMInrjkJSskNZyehXQuSnpEtRdqooHAOkvnvWyVlJDqbRTmGyPQQugtvXYmH
elif AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE in pdklbjLXWEugIgjBTqyqyFxCArvNSOizaERvrBMaApwVuHkAJnibRrRJCLzUyzVoISMvlItzBcBDUaFGKnFgZkKNZJsnqJhJjrPmchOmBcxzqkBrgZfuFqtFElDWbttG:
PcoMWhalZemJWoeaXPljGEFIUKkEsKuuBTZoPFjJqYxCPxTHkNhdWsYZxUrlfMCeSTHOKGNLFbKphDAbGssRDykIybZwarolNImsKGwuklOYNFUKWNcjZpvzhGutuidV = AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE
if PcoMWhalZemJWoeaXPljGEFIUKkEsKuuBTZoPFjJqYxCPxTHkNhdWsYZxUrlfMCeSTHOKGNLFbKphDAbGssRDykIybZwarolNImsKGwuklOYNFUKWNcjZpvzhGutuidV in AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE:
AjaYEpLethfmYyOEqAGZNqHbRqbtYkVcmwHyzLqBCOoxljiPIHdtTvHUMXatmrmXgWzYZZBRDFZnLTchqRtUpETYeDMlwofsDALQZPFzKdMeUUvKCJGtLsgjTVAYarqE = UywvtCeuvPUAVThmBzAixFVcvGsxGgawQJOhSEOEdkfgSWUFRkKjMuRZxunyZveHtMMOGZULyguUoORIbDSBRDnNgbPZdMbVsiBNFAYKGxQQsPFqUwgVvrFmJkfQYRDG
try:
wLLQsHFVsbyiVCUwOgxpZgQudCNJSDoMsBXbbIZzgfYhefUNhpAkbqCZvINgCdgjpKiYIzkWhALsTYhOcovumYWZYWgRDGNFvFRXyUNemPsFVbPHyrFyDyEUlMdcLHLe = 'PxFQJdUvPSAoKkfNsQhSqrrGbEuoqvCJDnHZxgfRQRyBiUnBNwgXLfcLyppWJxLFCBlMnpqVKIoztZBSeWVGsGipBPRIMEIvlBNvlbJvPinmsXRBOEmitHOShdqwjFoM'
kBliNnlFBggpUIYlioPXHZeCUpnVVIlFcNIHFGASTwhhTeeyFYjIPevxtSkOCrppIKowxHWoOBFZzwXgqwJDBRKKVlbGUVYnuiuCrBEwBlsDcFmvZyloEWFstHNTMQRr = 'QgqtWLfFOBPECWJMxAhFUCWZJQdfKbNnYwSkIuprneEQHSkUsWlruawiXJnwEfIETqkIzmlrULTUJjWRssiaQyCtnSoFpsVgkCrRJUUHbCXJCechVtXwEDHyjkjdUyPH'
if wLLQsHFVsbyiVCUwOgxpZgQudCNJSDoMsBXbbIZzgfYhefUNhpAkbqCZvINgCdgjpKiYIzkWhALsTYhOcovumYWZYWgRDGNFvFRXyUNemPsFVbPHyrFyDyEUlMdcLHLe != kBliNnlFBggpUIYlioPXHZeCUpnVVIlFcNIHFGASTwhhTeeyFYjIPevxtSkOCrppIKowxHWoOBFZzwXgqwJDBRKKVlbGUVYnuiuCrBEwBlsDcFmvZyloEWFstHNTMQRr:
NWAObGywLRgiZjKaiJQTLFjPYpBnTAnaRbPzSMdfScasyOOCozCGXEfIWIlnWLVYUtdqUvEQwRjNEblIUYxeZSJlBflifqUgRnnsmiYbYIPqygbZFBxfVNHUtqwVVlnk = 'CNoaYPimGGgivDYuTSxOMUlNpvjQBeaurhMcUFiTLGssFWQoCvXPkhZeTrEcPfJFTzwpbioHTOQOruLsoBeNtZWLhtKOznlnfJmxiEQjXNIvOVFPitUJBMLZmCeHbAmy'
KSyPpcwuonayOmvwUiAUgANaXctaNCjOtVxYgkHRlUOoNZJnCvcQVEhUntMnxgbGyJgWqdtODOlRPPhirTOjSVkDYAPyzMjeltHwooDCwUsjDQVxliLwKspJhMLDeKMV = 'BzZkpDcIOkpRycXMyoEIXBhjCAMJuutGhkJVVkVSLvWCMeLaaSGXWSCjVzHwqSCgUXPsqsdfKnJDUIuxIDnoJRXqibeQDRjSoaVbugUHdKRUQJKlutNUJHgQfdgmTHrH'
KSyPpcwuonayOmvwUiAUgANaXctaNCjOtVxYgkHRlUOoNZJnCvcQVEhUntMnxgbGyJgWqdtODOlRPPhirTOjSVkDYAPyzMjeltHwooDCwUsjDQVxliLwKspJhMLDeKMV = NWAObGywLRgiZjKaiJQTLFjPYpBnTAnaRbPzSMdfScasyOOCozCGXEfIWIlnWLVYUtdqUvEQwRjNEblIUYxeZSJlBflifqUgRnnsmiYbYIPqygbZFBxfVNHUtqwVVlnk
urllib.urlretrieve(tgDGrVYMyIKLKDTyrUeJoRYljmRkvUeRYTfsZUmafmneQXjndnomdHyVCuJbNtPvFDxDsyqSOeNABSlMOseknmqIIIrDHBooYsfDGAvTiUZhyigQuInfdvdwzogLfWon, VqGSSEBjpIIuImcLYGwneJKZrvtGUvJMqIzsFnSZUvkOWrdXMkOUoOhcQZaCXxunzeZuZNAvxMgndqackNfpMPMOmshnEklJQePQuulFGNYqseVtYDLiWOkwNloyAswP)
GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr = 'rvJPknSFEailzFZGyiByCoaMQgjeEAaTeSTiSIACXwLktdOUulWmTBfvEltptSUAtbMnbonnnwUUPnKVxBkezcsjkxSyyTeJrUBpYJxzRvYnartnQPoLCztHkgtNVVrQ'
RtDkTZvryPnsfhcGvogsfnkQHuCPtgXlltNJQjzOBHKQsbgmUsNLJWJOXgwierwkHcpyMIuJJHCSdPOSorWVNdjytKNLBgzXFEQcmBhigmVqXMZwfLbqSdRHwXnYXLIz = 'FrLxTigkljWXBPqMhgQKMyJCfJkjKGiIuJKVFOrMKmXPRVOwaRssDxgtPxJevWUHrJBHleDNNozFjVreXUimEYYmXUDkHJotFnRDoUmQAMQiIxcIWIUxHMWKvYLDzjmz'
PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq = 'qdzDCPoiDPBYQHtuFpQesUMoMUAmMrcqsWgFIZaHktfmdeegnABtjyZxYsWrhMgarygvfvuawDtNKAwuYzOjqjvzCPXciJvfULcWnvSdcyXiUvFNiCTBUFqoyxzSCHsV'
BtpKZYDQTwxxaLaferajpcwJlZcnRSMAiTKVJBXzDQKevEDemzaWyaWWsvPMbrUMMOgKhLDqsGoMcvKfKsnLxvQleWIfdTJfkdozmcpmtYZnxhSxJaIvZDABVFoQnBYF = 'tkSXghCqefMtONlRPsimzUpjPEQjPHTcxUgBmXsbsXeGMUOHqulnxFcHOYSrgDmtgHFWxiKhaQSMsHnnOhjwztHqvSxGjKkjqPPrkbbColCqCHaDEyJfvXQTUoHbQngB'
FfhDbsmqQHhhhrsowoZgqrcZsmNJeQDkuHfLorhLHwiIWVuWLUsefgeXwmaUPAJCBCwBCkJOzOtzQkYJsFcGwTVesdKsXeHgGUKwhXZFoyUhrmQDNgxwhdvGYEVfdrTS = 'tGQAHGnqXLOJIrdpzrEUqTQCRuKOewSBalAHjAMnSlelonbBfyzznUMduEUUjdjfVLPrbcmUzIrEnpfkYplTftKYDmWKbiDmNCiXOWWnQHtVasBJnMtVtXNMAsYrsnvH'
igIAuOzmuOOrnbcNWgwUCnsHomWMTomVrDlOCHwJblpsxdMQPxnWWzGdoazQUVegVOQMLntVYOGXgNopcFexSdLlXroncXbvcxDLvklEgykcKGhWXaOWcaxdJZAyvQqF = 'kzKpJCYkANxCVHjKxiaeuZxziIEcSoWWamQJoVIZRrYdJjoWTcdgTAjeOwORXgDHTRiFGOOAAjmpRhtKNwSXjQpsMZmWavvjAYpOnFzjrlWWpqWZWrKzCtbPvFeCFQoB'
if GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr != BtpKZYDQTwxxaLaferajpcwJlZcnRSMAiTKVJBXzDQKevEDemzaWyaWWsvPMbrUMMOgKhLDqsGoMcvKfKsnLxvQleWIfdTJfkdozmcpmtYZnxhSxJaIvZDABVFoQnBYF:
RtDkTZvryPnsfhcGvogsfnkQHuCPtgXlltNJQjzOBHKQsbgmUsNLJWJOXgwierwkHcpyMIuJJHCSdPOSorWVNdjytKNLBgzXFEQcmBhigmVqXMZwfLbqSdRHwXnYXLIz = PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq
for igIAuOzmuOOrnbcNWgwUCnsHomWMTomVrDlOCHwJblpsxdMQPxnWWzGdoazQUVegVOQMLntVYOGXgNopcFexSdLlXroncXbvcxDLvklEgykcKGhWXaOWcaxdJZAyvQqF in BtpKZYDQTwxxaLaferajpcwJlZcnRSMAiTKVJBXzDQKevEDemzaWyaWWsvPMbrUMMOgKhLDqsGoMcvKfKsnLxvQleWIfdTJfkdozmcpmtYZnxhSxJaIvZDABVFoQnBYF:
if igIAuOzmuOOrnbcNWgwUCnsHomWMTomVrDlOCHwJblpsxdMQPxnWWzGdoazQUVegVOQMLntVYOGXgNopcFexSdLlXroncXbvcxDLvklEgykcKGhWXaOWcaxdJZAyvQqF != PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq:
RtDkTZvryPnsfhcGvogsfnkQHuCPtgXlltNJQjzOBHKQsbgmUsNLJWJOXgwierwkHcpyMIuJJHCSdPOSorWVNdjytKNLBgzXFEQcmBhigmVqXMZwfLbqSdRHwXnYXLIz = RtDkTZvryPnsfhcGvogsfnkQHuCPtgXlltNJQjzOBHKQsbgmUsNLJWJOXgwierwkHcpyMIuJJHCSdPOSorWVNdjytKNLBgzXFEQcmBhigmVqXMZwfLbqSdRHwXnYXLIz
else:
FfhDbsmqQHhhhrsowoZgqrcZsmNJeQDkuHfLorhLHwiIWVuWLUsefgeXwmaUPAJCBCwBCkJOzOtzQkYJsFcGwTVesdKsXeHgGUKwhXZFoyUhrmQDNgxwhdvGYEVfdrTS = GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr
else:
PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq = GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr
GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr = FfhDbsmqQHhhhrsowoZgqrcZsmNJeQDkuHfLorhLHwiIWVuWLUsefgeXwmaUPAJCBCwBCkJOzOtzQkYJsFcGwTVesdKsXeHgGUKwhXZFoyUhrmQDNgxwhdvGYEVfdrTS
if PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq == GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr:
for igIAuOzmuOOrnbcNWgwUCnsHomWMTomVrDlOCHwJblpsxdMQPxnWWzGdoazQUVegVOQMLntVYOGXgNopcFexSdLlXroncXbvcxDLvklEgykcKGhWXaOWcaxdJZAyvQqF in GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr:
if igIAuOzmuOOrnbcNWgwUCnsHomWMTomVrDlOCHwJblpsxdMQPxnWWzGdoazQUVegVOQMLntVYOGXgNopcFexSdLlXroncXbvcxDLvklEgykcKGhWXaOWcaxdJZAyvQqF == PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq:
PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq = GJuVVXtOZimDPBCDnyyuWNJfnVVSUmisLzqcTAYmcsJPnvVRaPoStepHeXyaqLxzWdiqoVuOjtngnKFmLzSXxyTeKIVavffUHJOQoQsBMVoNNDyonGEFMiXPCnoOrYMr
else:
PkvOOLEVtqEVDFZtKIsmrJhkvNsqOmoPFKCUVZWkpFUtCVVvxbLbIQCxCzmoELSEdTDLwNnJaRmcUYCclNKtkubBmdsxLADSWxfkjtrwcfklcLUZTMsulViKblGGmDXq = FfhDbsmqQHhhhrsowoZgqrcZsmNJeQDkuHfLorhLHwiIWVuWLUsefgeXwmaUPAJCBCwBCkJOzOtzQkYJsFcGwTVesdKsXeHgGUKwhXZFoyUhrmQDNgxwhdvGYEVfdrTS
except IOError:
OOtlOumtHvlPNinpZOfOhlNqvATYFoffOHiZzGrKzctrHYdRkLtQXiUirKeevdsbMuwfyeqUHyTmeUqsxBYQuYbMXBsuyxckaKlAtsEPqsUEmrmJxywMGyjBQMNfvQKf = 'YJQwBSsBrVDiXquWwKchVztEFggnXTgIdfYqwWbeOMBlVVXxZhiQUNJzeFnBcDzpVOYknPRrxWXwvtbNqGUjOmVMkOkrrPrxwTCFrwtvtOLpkrFVNLYDZNmNYktLgjux'
rMAxedCNaMrYkIaaCjYDwZpFxkyFRlFlyurXMJlKUpEtQIxyQLDDrZmBjhQDGJJxVpNvRDtMtRTRaZzxtkdhapWAaYwPPYXMSAAOfpfaNSBzHtCBuaLrMaCzEIKHKBZt = 'mHdtwJGVcbxEYvyHtBEkzAKXsVCzPGplsQqelxJZfMaPegSSdzsyXrPxvfGIlvTFnMrIjeJwnYKqEmMhsNiLKUxdtfYTbhhIiAMOaIiPdqVbVKCimPJUZoyBuXLZqXUl'
if OOtlOumtHvlPNinpZOfOhlNqvATYFoffOHiZzGrKzctrHYdRkLtQXiUirKeevdsbMuwfyeqUHyTmeUqsxBYQuYbMXBsuyxckaKlAtsEPqsUEmrmJxywMGyjBQMNfvQKf != rMAxedCNaMrYkIaaCjYDwZpFxkyFRlFlyurXMJlKUpEtQIxyQLDDrZmBjhQDGJJxVpNvRDtMtRTRaZzxtkdhapWAaYwPPYXMSAAOfpfaNSBzHtCBuaLrMaCzEIKHKBZt:
XjbXPnEHwPSXDcmfVzKWDRUZhUwvgGkoukcbWprYoIScxHVBYGaMIWDWHubOgTYshuggNcSyPZwwGsTslTrTgTLFexJqOyIZkBIsOQErkaoweDmKifNXUJRCbztpkuSg = 'jSmlkFAPylEErDsxvDZVezDkLKqdBEyjPuQdWOVQzaYAiYKeAjFkTSVpebakgTmHcRKrLFpWCATnbHTWlccTvYiNLqjrLTwsuUxxkbxKhKeBQmCIxwJysEKVmVINIrXw'
ErIHjcwJnQHWlLAraVWMqrFLYaqhbgvtKeAnXUZCeFBWxSVryFGWbmTrrWFujeApMYNWjexXKpauzgyPrLsNoScdsHELNjaiiGqzHlqzsAipWnMjBEEgHrSakBQKTweU = 'ozBxuJKRyfMXaORlACDrlpuQIhuoYvKvAhraMdODXXXgEJWiXAwPABVqiFnZJRwDlefzBoJSbBguIwKHWbxwsJhMSErPwXwYvMxPQTSgbPTEGUmJGpIFPVZGxvLcZskF'
ErIHjcwJnQHWlLAraVWMqrFLYaqhbgvtKeAnXUZCeFBWxSVryFGWbmTrrWFujeApMYNWjexXKpauzgyPrLsNoScdsHELNjaiiGqzHlqzsAipWnMjBEEgHrSakBQKTweU = XjbXPnEHwPSXDcmfVzKWDRUZhUwvgGkoukcbWprYoIScxHVBYGaMIWDWHubOgTYshuggNcSyPZwwGsTslTrTgTLFexJqOyIZkBIsOQErkaoweDmKifNXUJRCbztpkuSg
return 'Error: Download failed.'
return 'File {} downloaded.'.format(VqGSSEBjpIIuImcLYGwneJKZrvtGUvJMqIzsFnSZUvkOWrdXMkOUoOhcQZaCXxunzeZuZNAvxMgndqackNfpMPMOmshnEklJQePQuulFGNYqseVtYDLiWOkwNloyAswP)
| 233.294372 | 286 | 0.937819 | [
"MIT"
] | CrackerCat/Intensio-Obfuscator | intensio/test/python/multiplefiles/advanced/output/basicRAT/core/toolkit.py | 53,891 | Python |
#!/usr/bin/python
from base64 import standard_b64encode
from crc32c import crc32
from datetime import datetime
from google.cloud import storage
from os.path import basename, dirname, isfile
from os import makedirs, getenv
from glob import glob
from time import sleep
import json
import petname
import subprocess
import struct
import sys
storage_client = storage.Client()
bucket_name = 'dream-go'
bucket = storage_client.lookup_bucket(bucket_name)
if not bucket:
bucket = storage_client.create_bucket(bucket_name)
def get_most_recent_model():
try:
most_recent_file = max(
[
blob
for blob in bucket.list_blobs(prefix='models/')
if blob.size > 0 and not '/eval/' in blob.name
],
key=lambda blob: ((blob.metadata or {}).get('elo', 0.0), blob.time_created)
)
if most_recent_file:
return dirname(most_recent_file.name)
except ValueError:
return None
def get_most_recent_network():
try:
return max(
[blob for blob in bucket.list_blobs(prefix='networks/') if blob.size > 0],
key=lambda blob: ((blob.metadata or {}).get('elo', 0.0), blob.time_created)
)
except ValueError:
return None
def blob_already_exists(blob, dest_file):
if isfile(dest_file):
with open(dest_file, 'rb') as file:
raw_crc = crc32(file.read())
encoded = standard_b64encode(struct.pack('>I', raw_crc)).decode('ascii')
return encoded == blob.crc32c
return False
def copy_most_recent_model():
""" Copy the most recent model to the 'models/' directory """
best_model = get_most_recent_model()
if best_model:
print('Warm-starting from {}'.format(best_model), end='', flush=True)
for blob in bucket.list_blobs(prefix=best_model):
dest_file = 'models/{}/{}'.format(
basename(best_model),
basename(blob.name)
)
if not blob_already_exists(blob, dest_file):
if isfile(dest_file):
makedirs(dirname(dest_file), exist_ok=True)
with open(dest_file, 'wb') as file:
print('.', end='', flush=True)
blob.download_to_file(file)
print()
return best_model
def copy_most_recent_network():
best_network = get_most_recent_network()
if best_network:
dest_file = 'networks/{}'.format(
basename(best_network.name)
)
if not blob_already_exists(best_network, dest_file):
with open(dest_file, 'wb') as file:
best_network.download_to_file(file)
return dest_file
else:
return None
def wait_until_all_models_rated():
""" Wait until all models has been assigned an ELO score. """
while True:
models = {}
for blob in bucket.list_blobs(prefix='models/'):
if blob.size > 0:
models[dirname(blob.name)] = True
if blob.metadata and 'elo' in blob.metadata:
return True
if len(models) <= 1:
return True
sleep(600) # 10 minutes
def copy_most_recent_games():
""" Download the 200,000 most recent games, each file should
contain 1,000 game records. So we need to download the 200
most recent files. """
files = []
blobs = sorted(
[blob for blob in bucket.list_blobs(prefix='games/') if blob.size > 0],
key=lambda blob: blob.time_created
)
print('Loading training data...', end='', flush=True)
for blob in blobs[-200:]:
dest_file = 'data/{}'.format(basename(blob.name))
files += (dest_file,)
if not blob_already_exists(blob, dest_file):
with open(dest_file, 'wb') as file:
print('.', end='', flush=True)
blob.download_to_file(file)
print('', flush=True)
return files
def upload_next_model(next_model):
""" Upload the specified model to google storage. """
for src_file in glob('models/*{}/*'.format(next_model)):
if isfile(src_file):
print('Uploading', src_file)
blob = bucket.blob(src_file)
blob.upload_from_filename(filename=src_file)
def upload_next_network(next_model, data, args=None):
""" Upload the specified network to google storage. """
blob = bucket.blob('networks/{}.json'.format(next_model))
blob.metadata = {
'args': json.dumps(args, sort_keys=True),
'rev': getenv('GIT_REV')
}
blob.upload_from_string(data, 'application/json')
def upload_game_records(data, from_network=None, env=None, args=None):
""" Upload the specified game records to google storage. """
dest_file = 'games/{}.sgf'.format(
datetime.now().strftime('%Y%m%d.%H%M')
)
print('Uploading', dest_file)
blob = bucket.blob(dest_file);
blob.metadata = {
'args': json.dumps(args, sort_keys=True),
'env': json.dumps(env, sort_keys=True),
'network': from_network,
'rev': getenv('GIT_REV')
}
blob.upload_from_string(data, 'application/x-go-sgf')
| 28.530055 | 87 | 0.610419 | [
"Apache-2.0"
] | Chicoryn/dream-go | contrib/distr-env/dg_storage.py | 5,221 | Python |
import jwt
import pendulum
from ..routes import Route
from .controllers import AuthenticationController
from ..utils.str import random_string
class Api:
def __init__(self, application, driver_config=None):
self.application = application
def set_configuration(self, config):
self.config = config
return self
def generate_token(self):
secret = self.config.get("jwt").get("secret")
algorithm = self.config.get("jwt").get("algorithm")
expire_minutes = self.config.get("jwt").get("expires")
version = self.config.get("jwt").get("version")
if expire_minutes:
expire_minutes = (
pendulum.now(tz="UTC").add(minutes=expire_minutes).to_datetime_string()
)
token = jwt.encode(
{"expires": expire_minutes, "version": version, "random": random_string(10)}, secret, algorithm=algorithm
)
return token
def get_token(self):
request = self.application.make("request")
token = request.input("token")
if token:
return token
header = request.header("Authorization")
if header:
return header.replace("Bearer ", "")
def validate_token(self, token):
secret = self.config.get("jwt").get("secret")
algorithm = self.config.get("jwt").get("algorithm")
expire_minutes = self.config.get("jwt").get("expires")
authenticates = self.config.get("jwt").get("authenticates")
version = self.config.get("jwt").get("version")
if expire_minutes:
expire_minutes = (
pendulum.now(tz="UTC").add(minutes=expire_minutes).to_datetime_string()
)
try:
unencrypted_token = jwt.decode(token, secret, algorithms=[algorithm])
except (jwt.InvalidSignatureError, jwt.DecodeError):
return False
expires = unencrypted_token.get("expires")
if version:
if unencrypted_token["version"] != version:
return False
if authenticates:
return self.attempt_by_token(token)
if not expires:
return True
expired = pendulum.parse(expires, tz="UTC").is_past()
if expired:
return False
return True
def regenerate_token(self, token):
# if the token can be decoded, regenerate new token
secret = self.config.get("jwt").get("secret")
algorithm = self.config.get("jwt").get("algorithm")
try:
jwt.decode(token, secret, algorithms=[algorithm])
return self.generate_token()
except jwt.DecodeError:
pass
return False
def attempt_by_token(self, token):
model = self.config.get("jwt").get("model")()
return model.attempt_by_token(token)
@classmethod
def routes(cls, auth_route="/api/auth", reauth_route="/api/reauth"):
return [
Route.post("/api/auth", AuthenticationController.auth),
Route.post("/api/reauth", AuthenticationController.reauth),
]
| 31.444444 | 117 | 0.605525 | [
"MIT"
] | josephmancuso/masonite | src/masonite/api/Api.py | 3,113 | Python |
"""pubsub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from messaging import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^send-message/', views.send_message),
url(r'^send-message-pools/', views.send_message),
url(r'^run-task/', views.run_task),
]
| 34.592593 | 79 | 0.700214 | [
"BSD-3-Clause"
] | whs/pubsub | pubsub/urls.py | 934 | Python |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# Justice Matchmaking Service (2.15.1)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ModelsUpdateAllianceRule(Model):
"""Models update alliance rule (models.UpdateAllianceRule)
Properties:
max_number: (maxNumber) OPTIONAL int
min_number: (minNumber) OPTIONAL int
player_max_number: (playerMaxNumber) OPTIONAL int
player_min_number: (playerMinNumber) OPTIONAL int
"""
# region fields
max_number: int # OPTIONAL
min_number: int # OPTIONAL
player_max_number: int # OPTIONAL
player_min_number: int # OPTIONAL
# endregion fields
# region with_x methods
def with_max_number(self, value: int) -> ModelsUpdateAllianceRule:
self.max_number = value
return self
def with_min_number(self, value: int) -> ModelsUpdateAllianceRule:
self.min_number = value
return self
def with_player_max_number(self, value: int) -> ModelsUpdateAllianceRule:
self.player_max_number = value
return self
def with_player_min_number(self, value: int) -> ModelsUpdateAllianceRule:
self.player_min_number = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "max_number"):
result["maxNumber"] = int(self.max_number)
elif include_empty:
result["maxNumber"] = 0
if hasattr(self, "min_number"):
result["minNumber"] = int(self.min_number)
elif include_empty:
result["minNumber"] = 0
if hasattr(self, "player_max_number"):
result["playerMaxNumber"] = int(self.player_max_number)
elif include_empty:
result["playerMaxNumber"] = 0
if hasattr(self, "player_min_number"):
result["playerMinNumber"] = int(self.player_min_number)
elif include_empty:
result["playerMinNumber"] = 0
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
max_number: Optional[int] = None,
min_number: Optional[int] = None,
player_max_number: Optional[int] = None,
player_min_number: Optional[int] = None,
) -> ModelsUpdateAllianceRule:
instance = cls()
if max_number is not None:
instance.max_number = max_number
if min_number is not None:
instance.min_number = min_number
if player_max_number is not None:
instance.player_max_number = player_max_number
if player_min_number is not None:
instance.player_min_number = player_min_number
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelsUpdateAllianceRule:
instance = cls()
if not dict_:
return instance
if "maxNumber" in dict_ and dict_["maxNumber"] is not None:
instance.max_number = int(dict_["maxNumber"])
elif include_empty:
instance.max_number = 0
if "minNumber" in dict_ and dict_["minNumber"] is not None:
instance.min_number = int(dict_["minNumber"])
elif include_empty:
instance.min_number = 0
if "playerMaxNumber" in dict_ and dict_["playerMaxNumber"] is not None:
instance.player_max_number = int(dict_["playerMaxNumber"])
elif include_empty:
instance.player_max_number = 0
if "playerMinNumber" in dict_ and dict_["playerMinNumber"] is not None:
instance.player_min_number = int(dict_["playerMinNumber"])
elif include_empty:
instance.player_min_number = 0
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, ModelsUpdateAllianceRule]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[ModelsUpdateAllianceRule]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[ModelsUpdateAllianceRule, List[ModelsUpdateAllianceRule], Dict[Any, ModelsUpdateAllianceRule]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"maxNumber": "max_number",
"minNumber": "min_number",
"playerMaxNumber": "player_max_number",
"playerMinNumber": "player_min_number",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"maxNumber": False,
"minNumber": False,
"playerMaxNumber": False,
"playerMinNumber": False,
}
# endregion static methods
| 36.461111 | 193 | 0.62959 | [
"MIT"
] | AccelByte/accelbyte-python-sdk | accelbyte_py_sdk/api/matchmaking/models/models_update_alliance_rule.py | 6,563 | Python |
from enum import Enum
# TODO-01 Define Error Type or Exception cbchoi
Infinite = float("inf") # hug value
class AttributeType(Enum):
# BEHAVIOR = 0
ASPECT = 1
RUNTIME = 2
UNKNOWN_TYPE = -1
@staticmethod
def resolve_type_from_str(name):
# if "BEHAVIOR" == name.upper():
# return AttributeType.BEHAVIOR
if "ASPECT" == name.upper():
return AttributeType.ASPECT
elif "RUNTIME" == name.upper():
return AttributeType.RUNTIME
else:
return AttributeType.UNKNOWN_TYPE
@staticmethod
def resolve_type_from_enum(enum):
# if enum == AttributeType.BEHAVIOR:
# return "BEHAVIOR"
if enum == AttributeType.ASPECT:
return "ASPECT"
elif enum == AttributeType.RUNTIME:
return "RUNTIME"
else:
return "UNKNOWN"
# 2019.05.16 added by cbchoi
class SimulationMode(Enum):
SIMULATION_IDLE = 0 # Simulation Engine is instantiated but simulation is not running
SIMULATION_RUNNING = 1 # Simulation Engine is instantiated, simulation is running
SIMULATION_TERMINATED = 2 # Simulation Engine is instantiated but simulation is terminated
SIMULATION_PAUSE = 3 # Simulation Engine is instantiated, simulation paused
SIMULATION_UNKNOWN = -1 # Simulation Engine went to abnormal state
# 2020.01.20 added by cbchoi
class ModelType(Enum):
BEHAVIORAL = 0
STRUCTURAL = 1
class CoreModel(object):
def __init__(self, _name, _type):
# Model Type
self._type = _type
self.blocked = False
self._name = _name
# Input Ports Declaration
self._input_ports = []
# Output Ports Declaration
self._output_ports = []
def set_name(self, _name):
self._name = _name
def set_blocked(self, blocked):
self.blocked = blocked
def get_blocked(self):
return self.blocked
def get_name(self):
return self._name
def insert_input_port(self, port):
self._input_ports.append(port)
def retrieve_input_ports(self):
return self._input_ports
def insert_output_port(self, port):
self._output_ports.append(port)
def retrieve_output_ports(self):
return self._output_ports
#def resolve_ports(self):
def get_type(self):
return self._type
class SingletonType(object):
def __call__(self, cls, *args, **kwargs):
try:
return cls.__instance
except AttributeError:
cls.__instance = super(SingletonType,
cls).__call__(*args, **kwargs)
return cls.__instance
| 26.362745 | 95 | 0.633693 | [
"MIT"
] | JeHyuckLee/evsim | definition.py | 2,689 | Python |
import turtle
'''fix'''
def draw_rhombus(some_turtle):
for i in range(1,3):
some_turtle.forward(20)
some_turtle.right(315)
some_turtle.forward(20)
some_turtle.right(225)
def draw_ribbon(some_turtle):
some_turtle.forward(100)
some_turtle.right(150)
some_turtle.forward(30)
some_turtle.right(240)
some_turtle.forward(30)
some_turtle.right(150)
some_turtle.forward(100)
some_turtle.right(240)
def draw_flower(some_turtle):
for i in range(1,11):
draw_rhombus(some_turtle)
some_turtle.right(36)
some_turtle.right(336)
some_turtle.forward(50)
def draw_wreath():
window = turtle.Screen()
window.bgcolor("black")
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("green")
brad.speed(0)
for i in range(1,16):
draw_flower(brad)
charlie = turtle.Turtle()
charlie.shape("turtle")
charlie.color("red")
charlie.speed(2)
charlie.right(60)
for i in range(1,3):
draw_ribbon(charlie)
window.exitonclick()
draw_wreath()
| 21.313725 | 33 | 0.649494 | [
"Apache-2.0"
] | shmiko/big-fat-python-tests | basic/draw_drawing.py | 1,087 | Python |
__version__ = '0.2.1'
__author__ = 'Lindsey Heagy'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018-2019 Lindsey Heagy'
| 26 | 51 | 0.707692 | [
"MIT"
] | lheagy/casingResearch | casingSimulations/info.py | 130 | Python |
import requests
class Sympla(object):
_URL = "https://api.sympla.com.br/public/v3/"
def __init__(self, token):
self.__token = token
def _get_url(self, path: str) -> str:
return f"{self._URL}{path}"
@property
def headers(self) -> dict:
return {"S_TOKEN": self.__token}
def _request(self, method: str, path: str, params: dict = None, **kwargs) -> dict:
request = requests.request(
method=method,
url=self._get_url(path),
headers=self.headers,
params=params,
**kwargs,
)
json = request.json()
return json
def events(
self,
event_id: int = None,
_from: str = None,
published: bool = True,
page_size: int = 100,
page: int = 1,
field_sort: str = None,
sort: str = "ASC",
fields: str = None,
) -> dict:
"""
Esta API fornece acesso às informações de eventos criados na plataforma Sympla, exclusivamente aqueles vinculados ao usuário proprietário do token.
A API também permite a personalização dos resultados, possibilitando filtrar eventos dentro de uma janela de data ou restringir quais campos são relevantes e devem ser exibidos no retorno, como apenas nome do evento e descrição.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Eventos
"""
path = "events"
if event_id is not None:
path = f"events/{event_id}"
params = {
"from": _from,
"published": published,
"page_size": page_size,
"page": page,
"field_sort": field_sort,
"sort": sort,
"fields": fields,
}
request = self._request(method="get", path=path, params=params)
return request
def orders_by_event(
self,
event_id: int,
status: bool = False,
page_size: int = 100,
page: int = 1,
field_sort: str = None,
sort: str = "ASC",
fields: str = None,
) -> dict:
"""
Retorna os pedidos de um determinado evento.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getListOrders
:param event_id: Identificador único do evento
:param status: Retorna todos os pedidos com qualquer status.
True: Retorna os pedidos de todos os status;
False: Retorna apenas os pedidos com status "A".
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/orders"
params = {
"status": status,
"page_size": page_size,
"page": page,
"field_sort": field_sort,
"sort": sort,
"fields": fields,
}
request = self._request(method="get", path=path, params=params)
return request
def order_by_identifier(
self, event_id: int, order_id: str, fields: str = None
) -> dict:
"""
Retorna o pedido correspondente ao identificador informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneOrder
:param event_id: Identificador único do evento
:param order_id: id do pedido
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/orders/{order_id}"
params = {"fields": fields}
request = self._request(method="get", path=path, params=params)
return request
def participants_by_order(
self,
event_id: int,
order_id: str,
page_size: int = 100,
page: int = 1,
field_sort: str = None,
sort: str = "ASC",
fields: str = None,
) -> dict:
"""
Retorna o(s) participante(s) contido(s) em um determinado pedido.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipantsForOrder
:param event_id: Identificador único do evento
:param order_id: Identificador único do pedido
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/orders/{order_id}/participants"
params = {
"page_size": page_size,
"page": page,
"field_sort": field_sort,
"sort": sort,
"fields": fields,
}
request = self._request(method="get", path=path, params=params)
return request
def participants_by_event(
self,
event_id: int,
ticket_number: str = None,
page_size: int = 100,
page: int = 1,
field_sort: str = None,
sort: str = "ASC",
fields: str = None,
) -> dict:
"""
Retorna os participantes de um determinado evento.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getAllParticipants
:param event_id: Identificador único do evento
:param ticket_number: Código escrito no ingresso.
:param page_size: Especifica quantos registros por página o usuário deseja. Mínimo 1 e maxímo 200.
:param page: Número da página dos resultados.
:param field_sort: Permite que os resultados sejam ordenados.
:param sort: Ordena por 'ASC' ou 'DESC'
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/participants"
params = {
"ticket_number": ticket_number,
"page_size": page_size,
"page": page,
"field_sort": field_sort,
"sort": sort,
"fields": fields,
}
request = self._request(method="get", path=path, params=params)
return request
def participant_by_ticket_id(
self, event_id: int, participant_id: int, fields: str = None
) -> dict:
"""
Retorna o participante correspondente ao ingresso informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipant
:param event_id: Identificador único do evento
:param participant_id: Identificador único do ingresso
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/participants/{participant_id}"
params = {"fields": fields}
request = self._request(method="get", path=path, params=params)
return request
def participant_by_ticket_number(
self, event_id: int, ticket_number: str, fields: str = None
) -> dict:
"""
Retorna o participante correspondente ao ingresso informado.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/getOneParticipantByTicketNumber
:param event_id: Identificador único do evento
:param ticket_number: Número do ingresso
:param fields: Deve ser utilizado para retornar apenas os atributos indicados do objeto.
Os atributos indicados devem ser separados por ",".
"""
path: str = f"events/{event_id}/participants/ticketNumber/{ticket_number}"
params = {"fields": fields}
request = self._request(method="get", path=path, params=params)
return request
def checkin_by_ticket_id(self, event_id: int, participant_id: int) -> dict:
"""
Realiza o check-in de um participante por id do ingresso.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByParticipantId
:param event_id: Identificador único do evento
:param participant_id: Identificador único do ingresso
"""
path: str = f"events/{event_id}/participants/{participant_id}/checkIn"
request = self._request(method="post", path=path)
return request
def checkin_by_ticket_number(self, event_id: int, ticket_number: str) -> dict:
"""
Realiza o check-in de um participante por número do ingresso.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#operation/checkInByTicketNumber
:param event_id: Identificador único do evento
:param ticket_number: Número do ingresso
"""
path: str = (
f"events/{event_id}/participants/ticketNumber/{ticket_number}/checkIn"
)
request = self._request(method="post", path=path)
return request
def affiliates(self, event_id: int) -> dict:
"""
Esta API fornece acesso às informações relativas ao programa de afiliados e seus respectivos afiliados.
Para saber mais, acesse: https://developers.sympla.com.br/api-doc/index.html#tag/Afiliados
:param event_id: Identificador único do evento
"""
path: str = f"events/{event_id}/affiliates"
request = self._request(method="get", path=path)
return request
| 33.588235 | 236 | 0.612181 | [
"MIT"
] | hudsonbrendon/pysympla | pysympla/sympla.py | 10,326 | Python |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
browser = webdriver.Chrome("./driver/chromedriver.exe")
options = Options()
#options.headless = True
browser = webdriver.Chrome(executable_path="./driver/chromedriver.exe", options=options)
browser.get("https://center-pf.kakao.com/") | 35.555556 | 88 | 0.790625 | [
"MIT"
] | seraph92/win32_test | old/browser_auto.py | 320 | Python |
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import norm
from astroML.density_estimation import\
EmpiricalDistribution, FunctionDistribution
def test_empirical_distribution(N=1000, rseed=0):
np.random.seed(rseed)
X = norm.rvs(0, 1, size=N)
dist = EmpiricalDistribution(X)
X2 = dist.rvs(N)
meanX = X.mean()
meanX2 = X2.mean()
stdX = X.std()
stdX2 = X2.std()
assert_allclose([meanX, stdX], [meanX2, stdX2], atol=3 / np.sqrt(N))
def test_function_distribution(N=1000, rseed=0):
f = norm(0, 1).pdf
# go from -10 to 10 to check interpolation in presence of zeros
dist = FunctionDistribution(f, -10, 10)
np.random.seed(rseed)
X = dist.rvs(N)
meanX = X.mean()
stdX = X.std()
assert_allclose([meanX, stdX], [0, 1], atol=3 / np.sqrt(N))
| 24.085714 | 72 | 0.661922 | [
"BSD-2-Clause"
] | DinoBektesevic/astroML | astroML/density_estimation/tests/test_empirical.py | 843 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from parlai.core.torch_generator_agent import TorchGeneratorModel
def _normalize(tensor, norm_layer):
"""
Broadcast layer norm
"""
size = tensor.size()
return norm_layer(tensor.view(-1, size[-1])).view(size)
def _create_embeddings(dictionary, embedding_size, padding_idx):
"""Create and initialize word embeddings."""
e = nn.Embedding(len(dictionary), embedding_size, padding_idx)
nn.init.normal_(e.weight, mean=0, std=embedding_size ** -0.5)
nn.init.constant_(e.weight[padding_idx], 0)
return e
def _build_encoder(opt, dictionary, embedding=None, padding_idx=None, reduction=True):
return TransformerEncoder(
n_heads=opt['n_heads'],
n_layers=opt['n_layers'],
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dictionary),
embedding=embedding,
attention_dropout=opt['attention_dropout'],
relu_dropout=opt['relu_dropout'],
padding_idx=padding_idx,
learn_positional_embeddings=opt.get('learn_positional_embeddings', False),
embeddings_scale=opt['embeddings_scale'],
reduction=reduction,
)
def _build_decoder(opt, dictionary, embedding=None, padding_idx=None):
return TransformerDecoder(
n_heads=opt['n_heads'],
n_layers=opt['n_layers'],
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dictionary),
embedding=embedding,
attention_dropout=opt['attention_dropout'],
relu_dropout=opt['relu_dropout'],
padding_idx=padding_idx,
learn_positional_embeddings=opt.get('learn_positional_embeddings', False),
embeddings_scale=opt['embeddings_scale'],
)
class TransformerMemNetModel(nn.Module):
"""Model which takes context, memories, candidates and encodes them"""
def __init__(self, opt, dictionary):
super().__init__()
self.opt = opt
self.pad_idx = dictionary[dictionary.null_token]
self.scores_norm = opt['scores_norm']
# set up embeddings
self.embeddings = _create_embeddings(
dictionary, opt['embedding_size'], self.pad_idx
)
self.context_encoder = _build_encoder(
opt, dictionary, self.embeddings, self.pad_idx
)
if opt.get('share_encoders'):
self.cand_encoder = TransformerResponseWrapper(
self.context_encoder, self.context_encoder.out_dim, reduction=True,
)
else:
self.cand_encoder = _build_encoder(
opt, dictionary, self.embeddings, self.pad_idx, reduction=True,
)
# build memory encoder
if opt.get('wrap_memory_encoder', False):
self.memory_transformer = TransformerResponseWrapper(
self.context_encoder, self.context_encoder.out_dim
)
else:
self.memory_transformer = self.context_encoder
self.attender = BasicAttention(dim=2, attn=opt['memory_attention'])
def encode_cand(self, words):
if words is None:
return None
# flatten if there are many candidates
if words.dim() == 3:
oldshape = words.shape
words = words.reshape(oldshape[0] * oldshape[1], oldshape[2])
else:
oldshape = None
encoded = self.cand_encoder(words)
if oldshape is not None:
encoded = encoded.reshape(oldshape[0], oldshape[1], -1)
return encoded
def encode_context_memory(self, context_w, memories_w):
# [batch, d]
context_h = self.context_encoder(context_w)
if memories_w is None:
return [], context_h
bsz = memories_w.size(0)
memories_w = memories_w.view(-1, memories_w.size(-1))
memories_h = self.memory_transformer(memories_w)
memories_h = memories_h.view(bsz, -1, memories_h.size(-1))
context_h = context_h.unsqueeze(1)
context_h, weights = self.attender(context_h, memories_h)
return weights, context_h
def _score(self, output, cands):
if cands.dim() == 2:
return torch.matmul(output, cands.t())
elif cands.dim() == 3:
return torch.bmm(output.unsqueeze(1),
cands.transpose(1, 2)).squeeze(1)
else:
raise RuntimeError('Unexpected candidate dimensions {}'
''.format(cands.dim()))
def forward(self, xs, mems, cands):
weights, context_h = self.encode_context_memory(xs, mems)
cands_h = self.encode_cand(cands)
if self.opt['normalize_sent_emb']:
context_h = context_h / context_h.norm(2, dim=1, keepdim=True)
cands_h = cands_h / cands_h.norm(2, dim=1, keepdim=True)
scores = self._score(context_h, cands_h)
if self.scores_norm == 'dot':
pass
elif self.scores_norm == 'sqrt':
scores /= math.sqrt(self.opt['embedding_size'])
elif self.scores_norm == 'dim':
scores /= self.opt['embedding_size']
else:
raise ValueError('Invalid --scores-norm')
return scores
def create_position_codes(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
class TransformerResponseWrapper(nn.Module):
"""Transformer response rapper. Pushes input through transformer and MLP"""
def __init__(self, transformer, hdim):
super(TransformerResponseWrapper, self).__init__()
dim = transformer.out_dim
self.transformer = transformer
self.mlp = nn.Sequential(
nn.Linear(dim, hdim),
nn.ReLU(),
nn.Linear(hdim, dim)
)
def forward(self, *args):
return self.mlp(self.transformer(*args))
class TransformerEncoder(nn.Module):
"""Transformer model"""
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
attention_dropout=0.0,
relu_dropout=0.0,
padding_idx=0,
learn_positional_embeddings=False,
embeddings_scale=False,
reduction=True,
):
super(TransformerEncoder, self).__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.reduction = reduction
self.padding_idx = padding_idx
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
n_positions = 1024 # TODO: use truncate or sth
# check input formats:
if embedding is not None:
assert (
embedding_size is None or embedding_size == embedding.weight.shape[1]
), "Embedding dim must match the embedding size."
if embedding is not None:
self.embeddings = embedding
else:
assert False
assert padding_idx is not None
self.embeddings = nn.Embedding(
vocabulary_size, embedding_size, padding_idx=padding_idx
)
nn.init.normal_(self.embeddings.weight, 0, embedding_size ** -0.5)
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(TransformerEncoderLayer(
n_heads, embedding_size, ffn_size, attention_dropout, relu_dropout
))
def forward(self, input):
"""
input data is a FloatTensor of shape [batch, seq_len, dim]
mask is a ByteTensor of shape [batch, seq_len], filled with 1 when
inside the sequence and 0 outside.
"""
mask = input != self.padding_idx
seq_len = input.size(1)
positions = input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
tensor *= mask.unsqueeze(-1).float()
for i in range(self.n_layers):
tensor = self.layers[i](tensor, mask)
if self.reduction:
divisor = mask.float().sum(dim=1).unsqueeze(-1).clamp(min=1e-20)
output = tensor.sum(dim=1) / divisor
return output
else:
output = tensor
return output, mask
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, dropout=relu_dropout)
self.norm2 = nn.LayerNorm(embedding_size)
def forward(self, tensor, mask):
tensor = tensor + self.attention(tensor, mask=mask)
tensor = _normalize(tensor, self.norm1)
tensor = tensor + self.ffn(tensor)
tensor = _normalize(tensor, self.norm2)
tensor *= mask.unsqueeze(-1).float()
return tensor
class TransformerDecoder(nn.Module):
def __init__(
self,
n_heads,
n_layers,
embedding_size,
ffn_size,
vocabulary_size,
embedding=None,
attention_dropout=0.0,
relu_dropout=0.0,
embeddings_scale=True,
learn_positional_embeddings=False,
padding_idx=None,
):
super().__init__()
self.embedding_size = embedding_size
self.ffn_size = ffn_size
self.n_layers = n_layers
self.n_heads = n_heads
self.dim = embedding_size
self.embeddings_scale = embeddings_scale
self.out_dim = embedding_size
assert embedding_size % n_heads == 0, \
'Transformer embedding size must be a multiple of n_heads'
n_positions = 1024 # TODO: use truncate or sth
self.embeddings = embedding
# create the positional embeddings
self.position_embeddings = nn.Embedding(n_positions, embedding_size)
if not learn_positional_embeddings:
create_position_codes(
n_positions, embedding_size, out=self.position_embeddings.weight
)
else:
nn.init.normal_(self.position_embeddings.weight, 0, embedding_size ** -0.5)
# build the model
self.layers = nn.ModuleList()
for _ in range(self.n_layers):
self.layers.append(TransformerDecoderLayer(
n_heads, embedding_size, ffn_size, attention_dropout, relu_dropout
))
def forward(self, input, encoder_state, incr_state=None):
encoder_output, encoder_mask = encoder_state
seq_len = input.size(1)
positions = input.new(seq_len).long()
positions = torch.arange(seq_len, out=positions).unsqueeze(0)
tensor = self.embeddings(input)
if self.embeddings_scale:
tensor = tensor * np.sqrt(self.dim)
tensor = tensor + self.position_embeddings(positions).expand_as(tensor)
for layer in self.layers:
tensor = layer(tensor, encoder_output, encoder_mask)
return tensor, None
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
n_heads,
embedding_size,
ffn_size,
attention_dropout=0.0,
relu_dropout=0.0,
):
super().__init__()
self.dim = embedding_size
self.ffn_dim = ffn_size
self.self_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm1 = nn.LayerNorm(embedding_size)
self.encoder_attention = MultiHeadAttention(
n_heads, embedding_size, dropout=attention_dropout
)
self.norm2 = nn.LayerNorm(embedding_size)
self.ffn = TransformerFFN(embedding_size, ffn_size, dropout=relu_dropout)
self.norm3 = nn.LayerNorm(embedding_size)
def forward(self, x, encoder_output, encoder_mask):
decoder_mask = self._create_selfattn_mask(x)
# first self attn
residual = x
# don't peak into the future!
x = self.self_attention(query=x, mask=decoder_mask)
# x = dropout(x)
x = x + residual
x = _normalize(x, self.norm1)
residual = x
x = self.encoder_attention(
query=x,
key=encoder_output,
value=encoder_output,
mask=encoder_mask
)
# x = dropout(x)
x = residual + x
x = _normalize(x, self.norm2)
# finally the ffn
residual = x
x = self.ffn(x)
x = residual + x
x = _normalize(x, self.norm3)
return x
def _create_selfattn_mask(self, x):
# figure out how many timestamps we need
bsz = x.size(0)
time = x.size(1)
# make sure that we don't look into the future
mask = torch.tril(x.new(time, time).fill_(1))
# broadcast across batch
mask = mask.unsqueeze(0).expand(bsz, -1, -1)
return mask
class TransformerGeneratorModel(TorchGeneratorModel):
def __init__(self, opt, dictionary):
super().__init__()
self.pad_idx = dictionary[dictionary.null_token]
self.embeddings = _create_embeddings(
dictionary, opt['embedding_size'], self.pad_idx
)
self.encoder = _build_encoder(
opt, dictionary, self.embeddings, self.pad_idx, reduction=False
)
self.decoder = _build_decoder(opt, dictionary, self.embeddings, self.pad_idx)
def reorder_encoder_states(self, encoder_states, indices):
enc, mask = encoder_states
if not torch.is_tensor(indices):
indices = torch.LongTensor(indices).to(enc.device)
enc = torch.index_select(enc, 0, indices)
mask = torch.index_select(mask, 0, indices)
return enc, mask
def reorder_decoder_incremental_state(self, incremental_state, inds):
# no support for incremental decoding at this time
return None
def output(self, tensor):
# project back to vocabulary
output = F.linear(tensor, self.embeddings.weight)
return output
class BasicAttention(nn.Module):
def __init__(self, dim=1, attn='cosine'):
super().__init__()
self.softmax = nn.Softmax(dim=dim)
if attn == 'cosine':
self.cosine = nn.CosineSimilarity(dim=dim)
self.attn = attn
self.dim = dim
def forward(self, xs, ys):
if self.attn == 'cosine':
l1 = self.cosine(xs, ys).unsqueeze(self.dim - 1)
else:
l1 = torch.bmm(xs, ys.transpose(1, 2))
if self.attn == 'sqrt':
d_k = ys.size(-1)
l1 = l1 / math.sqrt(d_k)
l2 = self.softmax(l1)
lhs_emb = torch.bmm(l2, ys)
# add back the query
lhs_emb = lhs_emb.add(xs)
return lhs_emb.squeeze(self.dim - 1), l2
class MultiHeadAttention(nn.Module):
def __init__(self, n_heads, dim, dropout=0):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.dim = dim
# multi head is seen as one layer, dropout is only applied to the input
self.dropout = nn.Dropout(p=dropout)
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.q_lin.weight)
nn.init.xavier_normal_(self.k_lin.weight)
nn.init.xavier_normal_(self.v_lin.weight)
self.out_lin = nn.Linear(dim, dim)
nn.init.xavier_normal_(self.out_lin.weight)
def forward(self, query, key=None, value=None, mask=None):
# Input is [B, query_len, dim]
# Mask is [B, key_len] (selfattn) or [B, key_len, key_len] (enc attn)
batch_size, query_len, dim = query.size()
assert dim == self.dim, \
f'Dimensions do not match: {dim} query vs {self.dim} configured'
n_heads = self.n_heads
dim_per_head = dim // n_heads
scale = math.sqrt(dim_per_head)
def prepare_head(tensor):
# input is [batch_size, seq_len, n_heads * dim_per_head]
# output is [batch_size * n_heads, seq_len, dim_per_head]
bsz, seq_len, _ = tensor.size()
tensor = tensor.view(batch_size, tensor.size(1), n_heads, dim_per_head)
tensor = tensor.transpose(1, 2).contiguous().view(
batch_size * n_heads,
seq_len,
dim_per_head
)
return tensor
# q, k, v are the transformed values
if key is None and value is None:
# self attention
key = value = query
elif value is None:
# key and value are the same, but query differs
# self attention
value = key
_, key_len, dim = key.size()
q = prepare_head(self.q_lin(query))
k = prepare_head(self.k_lin(key))
v = prepare_head(self.v_lin(value))
dot_prod = q.bmm(k.transpose(1, 2))
# [B * n_heads, query_len, key_len]
attn_mask = (
(mask == 0)
.view(batch_size, 1, -1, key_len)
.repeat(1, n_heads, 1, 1)
.expand(batch_size, n_heads, query_len, key_len)
.view(batch_size * n_heads, query_len, key_len)
)
assert attn_mask.shape == dot_prod.shape
dot_prod.masked_fill_(attn_mask, -float(1e20))
attn_weights = F.softmax(dot_prod / scale, dim=-1)
attentioned = attn_weights.bmm(v)
attentioned = (
attentioned
.view(batch_size, n_heads, query_len, dim_per_head)
.transpose(1, 2).contiguous()
.view(batch_size, query_len, dim)
)
out = self.out_lin(attentioned)
return out
class TransformerFFN(nn.Module):
def __init__(self, dim, dim_hidden, dropout=0):
super(TransformerFFN, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.lin1 = nn.Linear(dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, dim)
nn.init.xavier_uniform_(self.lin1.weight)
nn.init.xavier_uniform_(self.lin2.weight)
def forward(self, x):
x = F.relu(self.lin1(x))
x = self.dropout(x)
x = self.lin2(x)
x = self.dropout(x)
return x
| 33.494118 | 87 | 0.610166 | [
"MIT"
] | jinjiren/ParlAI | parlai/agents/transformer/modules.py | 19,929 | Python |
##
## © Copyright 2021- IBM Inc. All rights reserved
# SPDX-License-Identifier: MIT
##
#
# This code has two use-cases:
# 1. Where you want to run a batch of queries, with each saving results in CSV - specify the TestId this is added to make filename))
# 2. When you want to run a series of queries and check the data retrieved is the same as the last time your ran it
# i.e. as a regression tester - with the queries specified in the spreadsheet and fully cached data used as a mock
# server you don't even need to have the server attached
#
# This is a simple regression tester which compares current results with saved results from a previous run on the same server.
# The server doesn't always have to be attached to re-run tests because the test mode uses 'forever' http caching to save everything.
# Although if the OSLC query causes a request for a previously-unfetched response from the server then things will of course fail.
# But that doesn't seem too unreasonable, as everything gets 'forever' cached including login response :-)
#
import argparse
import re
import sys
import time
import openpyxl as XL
import elmclient.examples.oslcquery as querymain
# this maps important column headings to commandline option+value - these columns must all be present in the worksheet - if the value in a cell is None then no prefix is put in the commandline
# if a heading isn't in this list it is ignored
# the sequence of these determines the sequence they appear in the oslc query commandline
# (otherwise they would have been in alphabetical order)
xlstoargs={
'Appstring': '-A'
,'Project': '-p'
,'Component': '-C'
,'Configuration': '-F'
,'GC Config': '-G'
,'GC Project': '-E'
,'ResourceType': '-r'
,'Query': '-q'
,'Select': '-s'
,'Searchterms': '-f'
,"Orderby": '-o'
,'Null': '-n'
,'Value': '-v'
,'Value1': '-v'
,'OutputFile': '-O'
,'TypeSystemReport': '--typesystemreport'
,'Creds0': '-0'
,'Creds1': '-1'
,'Creds2': '-2'
,'Creds3': '-3'
,'Creds4': '-4'
,'NResults': '--nresults'
,'User': '-U'
,'Password': '-P'
,'JazzURL': '-J'
,'Logging': '-L'
}
# turn a list of options into a Windows cmd-style quoted string (means this works on Windows only!)
# option strings are NOT already wrapped in quotes!
# first it doubles " in the string, then if space or " in the string it is wrapped in " "
def argstocmd(args):
newargs = []
for arg in args:
if '"' in arg:
arg = arg.replace( '"','""')
if ' ' in arg or '"' in arg:
arg = f'"{arg}"'
newargs.append(arg)
return " ".join(newargs)
def do_tests(inputargs=None):
inputargs = inputargs or sys.argv[1:]
# setup argparse
parser = argparse.ArgumentParser(description="Perform OSLC query on a Jazz application, with results output to CSV (and other) formats - use -h to get some basic help")
parser.add_argument('spreadsheet', help='Name of the xlsx spreadsheet with tests')
parser.add_argument('-d', '--dryrun', action="store_true", help="Dry run - show commandline but don't run the OSLC Query")
parser.add_argument('-f', '--stoponfail', action="store_true", help="Stop at first failure")
parser.add_argument('-g', '--group', default=None, help="Comma-separated list of regex pattern to match groups to be run, in the worksheet Group column")
parser.add_argument('-j', '--just', default=None, help="Comma-separated list of tests to run, matching the TestId column in the worksheet")
parser.add_argument('-L', '--loglevel', default=None, help="Set logging level - default is None - choose from INFO/DEBUG/ERROR")
parser.add_argument('-r', '--reps', default=1, type=int, help="Number of times to repeat the selected tests (must be >=1")
parser.add_argument('-s', '--save', action="store_true", help="UNFINISHED Retrieve and save query results forever (used to save reference for -t testing")
parser.add_argument('-t', '--test', action="store_true", help="UNFINISHED Retrieve data and do comparison to test that results match saved results from -s")
parser.add_argument('-w', '--sheetname', default=None, help='Name of the worksheet with tests (if not specified the workbook must only have one worksheet, which is used)')
parser.add_argument('-W', '--cachecontrol', action='count', default=0, help="Used once -W erases cache for the first test then continues with caching enabled. Used twice -WW wipes cache and disables caching.")
args = parser.parse_args(inputargs)
if args.reps<1:
raise Exception( f"Reps must be >=1" )
justtests = [j.strip() for j in args.just.split(",")] if args.just else []
wb = XL.load_workbook(filename=args.spreadsheet,data_only=True)
wss=wb.sheetnames
if args.sheetname:
tests = wb[args.sheetname]
else:
if len( wss ) > 1:
raise Exception( "Worksheet not specified but spreadsheet file includes more than one sheet!" )
print( f"Using worksheet {wss[0]}" )
tests = wb[wss[0]]
# first scan the headings on row 1 to get the column numbers for the columns we want to use
# turn the worksheet content into a list of dictionaries using the column headings as keys
colheadings = []
for col in range(1,50):
thiscolheading = tests.cell(column=col, row=1).value
# first empty heading terminates the table
if not thiscolheading:
break
colheadings.append(thiscolheading)
# now retrieve data to list of dictionaries, one per row
rows = []
for rownum in range(2, 2000):
row = {}
for i,col in enumerate(colheadings):
row[col]=tests.cell(column=i+1, row=rownum).value
rows.append(row)
wb.close()
# now go down the rows executing the specified test
npassed = 0
nfailed = 0
firstquery = True
for rep in range(args.reps):
for n,row in enumerate(rows):
testnumber = row['TestId']
if not testnumber:
continue
if row['Disable'] and row['Disable'].startswith('#'):
continue
if args.group:
if not row['Group']:
continue
rowgroups = [j.strip() for j in row['Group'].split(",")]
regexes = [j.strip() for j in args.group.split(",")]
if not any([re.match(regex,group) for regex in regexes for group in rowgroups]):
continue
if justtests and str(testnumber) not in justtests:
continue
print( f"=====================================================================\n{testnumber=} {row.get('Description','')}" )
exceptionexpected = True if row['ExceptionExpected'] else False
csvname = "test_"+str(testnumber)+".csv"
queryargs=[]
for k,v in xlstoargs.items():
if k not in colheadings:
raise Exception( f"Heading {k} not present in spreadsheet!" )
cellvalue=row[k]
if cellvalue is not None:
if v:
# if there's an option
cellvalue=str(row[k]).strip()
# check for options where the value starts with - - these have to be specified using -o=value
if cellvalue.startswith("-"):
# use -o=value
queryargs.append( f"{v}={cellvalue}" )
else:
# use -o value
queryargs.append(v)
queryargs.append(cellvalue)
else:
queryargs.append(str(cellvalue).strip())
if args.save:
queryargs.extend(['-0','-O',csvname])
if args.test:
queryargs.extend(['-0','-2',csvname])
if args.loglevel and "-L" not in queryargs:
queryargs.extend(['-L',args.loglevel])
# handle cache control passing on to oslcquery
if firstquery:
# if this is first query run and we have to wipe cache:
if args.cachecontrol==1:
queryargs.extend( [ "-W" ] )
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
firstquery = False
elif args.cachecontrol==2:
queryargs.extend( [ "-WW" ] )
# run it
try:
if args.dryrun:
print( f"Dry-run: query commandline is: oslcquery {argstocmd(queryargs)}" )
result = 0
else:
print( f"Query commandline is: oslcquery {argstocmd(queryargs)}" )
result = querymain.do_oslc_query(queryargs)
exceptionhappened = False
except Exception as e:
print( e )
result = 1
exceptionhappened = True
# if not exceptionexpected:
# raise
if (result != 0 and not exceptionexpected) or (result == 0 and exceptionexpected):
nfailed += 1
print( f" TEST {testnumber} FAILED!!!!!!!!!!!!!!!!!!!!!\n" )
if args.stoponfail:
print( f"Stopping after first failure, {rep} repetitions" )
return
else:
print( f"Test {testnumber} passed!" )
npassed += 1
if not args.dryrun:
print( f"\nPassed {npassed} Failed {nfailed}" )
else:
print( f"Dry run completed" )
def main():
runstarttime = time.perf_counter()
do_tests(sys.argv[1:])
elapsedsecs = time.perf_counter() - runstarttime
print( f"Runtime was {int(elapsedsecs/60)}m {int(elapsedsecs%60):02d}s" )
if __name__ == '__main__':
main()
| 44.220779 | 213 | 0.575428 | [
"MIT"
] | IBM/ELM-OSLC-Query | elmclient/examples/batchquery.py | 10,216 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteGatewaysOperations(object):
"""ExpressRouteGatewaysOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteGatewayList"
"""Lists ExpressRoute gateways under a given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteGatewayList"
"""Lists ExpressRoute gateways in a given resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGatewayList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGatewayList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteGatewayList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGatewayList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
put_express_route_gateway_parameters, # type: "models.ExpressRouteGateway"
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteGateway"
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_gateway_parameters, 'ExpressRouteGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
put_express_route_gateway_parameters, # type: "models.ExpressRouteGateway"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ExpressRouteGateway"]
"""Creates or updates a ExpressRoute gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param put_express_route_gateway_parameters: Parameters required in an ExpressRoute gateway PUT
operation.
:type put_express_route_gateway_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteGateway or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
put_express_route_gateway_parameters=put_express_route_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ExpressRouteGateway"
"""Fetches the details of a ExpressRoute gateway in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway
resource can only be deleted when there are no connection subresources.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}'} # type: ignore
| 49.034014 | 209 | 0.679014 | [
"MIT"
] | Co0olboi/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_express_route_gateways_operations.py | 21,624 | Python |
from django.conf.urls.defaults import *
urlpatterns = patterns("corehq.apps.ivr.views",
)
| 15.333333 | 47 | 0.75 | [
"BSD-3-Clause"
] | SEL-Columbia/commcare-hq | corehq/apps/ivr/urls.py | 92 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-25 21:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0006_auto_20180125_2142'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['-created_at']},
),
]
| 20.75 | 50 | 0.612048 | [
"MIT"
] | kelly-rose/kellygram | kellygram/images/migrations/0007_auto_20180125_2143.py | 415 | Python |
from typing import Optional
from great_expectations.core import ExpectationConfiguration
from great_expectations.expectations.expectation import MulticolumnMapExpectation
from great_expectations.expectations.util import (
add_values_with_json_schema_from_list_in_params,
render_evaluation_parameter_string,
)
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
class ExpectSelectColumnValuesToBeUniqueWithinRecord(MulticolumnMapExpectation):
"""
Expect the values for each record to be unique across the columns listed.
Note that records can be duplicated.
For example::
A B C
1 1 2 Fail
1 2 3 Pass
8 2 7 Pass
1 2 3 Pass
4 4 4 Fail
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification.
Returns:
An ExpectationSuiteValidationResult
"""
library_metadata = {
"maturity": "production",
"tags": [
"core expectation",
"table expectation",
],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "select_column_values.unique.within_record"
success_keys = ()
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"ignore_row_if": "all_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
args_keys = ("column_list",)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> bool:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration)
return True
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column_list",
"ignore_row_if",
"row_condition",
"condition_parser",
"mostly",
],
)
params_with_json_schema = {
"column_list": {
"schema": {"type": "array"},
"value": params.get("column_list", []),
},
"ignore_row_if": {
"schema": {"type": "string"},
"value": params.get("ignore_row_if"),
},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition", ""),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser", ""),
},
"mostly": {
"schema": {"type": "number"},
"value": params.get("mostly", 1),
},
"mostly_pct": {
"schema": {"type": "string"},
"value": params.get("mostly_pct"),
},
}
if params["mostly"] is not None:
params_with_json_schema["mostly_pct"]["value"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
mostly_str = (
""
if params.get("mostly") is None
else ", at least $mostly_pct % of the time"
)
template_str = f"Values must always be unique across columns{mostly_str}: "
column_list = params.get("column_list") if params.get("column_list") else []
if len(column_list) > 0:
for idx, val in enumerate(column_list[:-1]):
param = f"$column_list_{idx}"
template_str += f"{param}, "
params[param] = val
last_idx = len(column_list) - 1
last_param = f"$column_list_{last_idx}"
template_str += last_param
params[last_param] = column_list[last_idx]
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(
params["row_condition"], with_schema=True
)
template_str = (
conditional_template_str
+ ", then "
+ template_str[0].lower()
+ template_str[1:]
)
params_with_json_schema.update(conditional_params)
params_with_json_schema = add_values_with_json_schema_from_list_in_params(
params=params,
params_with_json_schema=params_with_json_schema,
param_key_with_list="column_list",
)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
[
"column_list",
"ignore_row_if",
"row_condition",
"condition_parser",
"mostly",
],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
mostly_str = (
""
if params.get("mostly") is None
else ", at least $mostly_pct % of the time"
)
template_str = f"Values must always be unique across columns{mostly_str}: "
for idx in range(len(params["column_list"]) - 1):
template_str += f"$column_list_{str(idx)}, "
params[f"column_list_{str(idx)}"] = params["column_list"][idx]
last_idx = len(params["column_list"]) - 1
template_str += f"$column_list_{str(last_idx)}"
params[f"column_list_{str(last_idx)}"] = params["column_list"][last_idx]
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = (
conditional_template_str
+ ", then "
+ template_str[0].lower()
+ template_str[1:]
)
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| 35.071429 | 118 | 0.577339 | [
"Apache-2.0"
] | MeganBeckett/great_expectations | great_expectations/expectations/core/expect_select_column_values_to_be_unique_within_record.py | 9,329 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import subprocess
import time
import web
from web import form,timelimit
import hashlib
import datetime
import random
from func_recall import risk_recall
_tmpstring = "".join(random.sample('zyxwvutsrqponmlkjihgfedcba',20))
urls = (
'/', 'Login',
'/login', 'Login',
'/base'+_tmpstring,'Datatable',
'/data_set/(.*)/(.*)','download',
'/pyrule'+_tmpstring,'Pyrule',
)
render = web.template.render('templates')
app = web.application(urls, globals())
# 密码表-risk#ok#
user_dict = {"root": "44dbb6a8dcf8471f8145bd3d7f8e30f0",
"risk": "0b9fdd4bcbdddc099a5ce9ca7d855bb7",
"recall": "bf83db0887c22e98590da5a3d18fcb15"
}
login_validity = 3600*3 # 登陆有效期3小时
# 1.登陆表
class Login:
def __init__(self):
global recall_p
if recall_p:
try:
recall_p.kill()
except:
pass
curtime = datetime.datetime.now().strftime('%Y-%m-%d')
random.seed(curtime)
global today_passwd #每天的随机临时登陆密码
today_passwd = "".join(random.sample('zyxwvutsrqponmlkjihgfedcba', 20))
user_dict.update({'admin': str(hashlib.md5(today_passwd).hexdigest())})
self.login_form = form.Form(
form.Textbox("username", description="Username"),
form.Password("password", description="Password"),
form.Button("login", type="submit", description="Login")
)
def GET(self):
return render.logins(self.login_form(),message="")
def POST(self):
param = web.input()
user_name = param.username
user_password = hashlib.md5(param.password).hexdigest()
if ('recall' == user_name) & user_dict.has_key(user_name) & (user_password == user_dict.get(user_name)):
global loan_time
loan_time = datetime.datetime.now()
raise web.seeother('/pyrule'+_tmpstring)
if user_dict.has_key(user_name) & (user_password == user_dict.get(user_name)):
loan_time = datetime.datetime.now()
raise web.seeother('/base'+_tmpstring)
else:
message = "False password or User, please contact the administrator."
return render.logins(self.login_form(),message)
# 2.数据信息表
class Datatable:
def __init__(self):
self.message_user = "user_name: admin, password_today: {}".format(today_passwd)
self.dataset = {
"okash_new_user":["/data_set/okash/new_user.xlsx","Okash首贷报表下载"],
"opesa_new_user": ["/data_set/opesa/new_user.xlsx", "Opesa首贷报表下载"],
"mhela_new_user": ["/data_set/mhela/new_user.xlsx", "Mhela首贷报表下载"],
"okash_old_user": ["/data_set/okash/old_user.xlsx", "Okash复贷报表下载"],
"opesa_old_user": ["/data_set/opesa/old_user.xlsx", "Opesa复贷报表下载"],
"mhela_old_user": ["/data_set/mhela/old_user.xlsx", "Mhela复贷报表下载"]
}
def GET(self):
duration_time = datetime.datetime.now()
if (duration_time - loan_time).total_seconds()>login_validity: # 重新登录
raise web.seeother('/login')
else:
message_user = self.message_user
form_submit,df_sheet = self.get_data()
href_link, href_name = '',''
return render.data_base(message_user,form_submit,df_sheet,href_link, href_name)
def POST(self):
duration_time = datetime.datetime.now()
if (duration_time - loan_time).total_seconds() > login_validity: # 重新登录
raise web.seeother('/login')
else:
param = web.input()
product_name = param.get('product_name')
table_name = param.get('table_name')
user_phases = param.get('user_phases', '')
message_user = self.message_user + ", current_table: {}.{}.{}".format(product_name, user_phases,table_name)
form_submit, df_sheet = self.get_data(product_name, user_phases, table_name)
href_link, href_name = self.dataset.get("{}_{}".format(product_name,user_phases),['',''])
return render.data_base(message_user, form_submit,df_sheet,href_link, href_name)
def get_data(self, product_name='okash', user_phases='new_user', table_name='table_describe'):
_template = 'templates/{}/{}/{}.html'.format(product_name, user_phases, table_name)
if os.path.exists(_template):
_data = open(_template, 'r').read()
_form = self.get_form(product_name, user_phases, table_name)
else:
_template = 'templates/{}/{}/sheet_summary.html'.format(product_name, user_phases)
_data = open(_template, 'r').read()
_form = self.get_form(product_name)
return _form, _data
def get_form(self, product_name='okash', user_phases='new_user', table_name='table_describe'):
data_path = 'templates/{}/{}/'.format(product_name, user_phases)
assert os.path.exists(data_path), 'the path of "{}" is error'.format(data_path)
table_list = [i[:-5] for i in os.listdir(data_path) if i.startswith('df_') & i.endswith('.html')]
table_list.sort()
sort_dict = {'rules': 10, 'apply': 20, 'loan': 30, 'history': 40}
table_list.sort(key=lambda x: sort_dict.get(x.split('_')[1], 100))
form_submit = form.Form(
form.Dropdown(name='product_name', args=['okash', 'opesa','mhela'], value=product_name),
form.Dropdown(name='user_phases', args=['new_user', 'old_user'], value=user_phases),
form.GroupedDropdown(name='table_name',
args=(('INTRODUCTION', ['table_describe']), ('DATA INFO', table_list)),
value=table_name),
form.Button("Submit", value="submit", description="data_table")
)
return form_submit
recall_p = None #回调进程(后台抽取数据)
# 3.风控召回表
class Pyrule:
def __init__(self):
self.message_user = "风控端—用户精准召回"
self.data_path = "./templates/recall/"
self.dump_file = ""
self.start_time = (datetime.datetime.now()+datetime.timedelta(-1)).strftime('%Y%m%d000000')
self.end_time = datetime.datetime.now().strftime('%Y%m%d000000')
self.from_product = 'okash'
self.into_product = 'mhela'
self.risk_rules = 'new'
def GET(self):
duration_time = datetime.datetime.now()
if (duration_time - loan_time).total_seconds()>login_validity: # 重新登录
raise web.seeother('/login')
else:
message_user = self.message_user
form_submit = self.get_form()
df_sheet = ''
href_link, href_name = '',''
return render.data_base(message_user,form_submit, df_sheet, href_link, href_name)
def POST(self):
duration_time = datetime.datetime.now()
if (duration_time - loan_time).total_seconds() > login_validity: # 重新登录
raise web.seeother('/login')
else:
param = web.input()
self.from_product = param.get('from_product')
self.into_product = param.get('into_product')
self.risk_rules = param.get('risk_rules', '')
self.start_time = param.get('start_time')
self.end_time = param.get('end_time')
self.dump_file = '{}recall_{}'.format(self.data_path,self.from_product)
form_submit = self.get_form()
if param.get('Submit'):
global recall_p
if recall_p:
try:
recall_p.kill()
except:
pass
message_user = self.message_user + ": {} -> {}.{} ({}-{},查询中。。。)".format(
self.from_product, self.into_product, self.risk_rules, self.start_time, self.end_time)
recall_p = risk_recall(self.from_product, self.into_product, self.risk_rules, self.start_time,
self.end_time, self.dump_file) # 更新数据
return render.data_base(message_user, form_submit, "", "", "")
if param.get('Refresh'):
if recall_p:
message_user = self.message_user + ": {} -> {}.{} ({}-{},查询中。。。)".format(
self.from_product, self.into_product, self.risk_rules, self.start_time, self.end_time)
status = subprocess.Popen.poll(recall_p)
if 0 == status:
df_sheet,len_data = self.get_data()
message_user = self.message_user + ": {} -> {}.{} ({}-{},共计{}条)".format(
self.from_product, self.into_product, self.risk_rules, self.start_time, self.end_time, len_data)
href_link = '/data_set/recall/recall_{}.xlsx'.format(self.from_product)
href_name = "recall_{}下载".format(self.from_product)
return render.data_base(message_user, form_submit, df_sheet,href_link, href_name)
else:
return render.data_base(message_user, form_submit, "", "", "")
return render.data_base(self.message_user, form_submit, "", "", "")
def get_data(self):
_template = '{}.html'.format(self.dump_file)
if os.path.exists(_template):
_data = open(_template, 'r').read()
else:
_data = ''
_text = '{}.txt'.format(self.dump_file)
if os.path.exists(_text):
_txt = open(_text, 'r').read()
else:
_txt = ' '
return _data,_txt
def get_form(self):
form_submit = form.Form(
form.Dropdown(name='from_product', args=['okash', 'opesa', 'mhela'], value=self.from_product),
form.Dropdown(name='into_product', args=['okash', 'opesa', 'mhela'], value=self.into_product),
form.Dropdown(name='risk_rules', args=['new', 'old'], value=self.risk_rules),
form.Input(name='start_time', type="search", min='20170101000000', max='20250101000000', value=self.start_time),
form.Input(name='end_time', type="search", min='20170101000000', max='20250101000000', value=self.end_time),
form.Button("Submit", value="submit", description="data_table"),
form.Button("Refresh", value="submit", description="data_table")
)
return form_submit
BUF_SIZE = 262144
class download:
def GET(self,file_name,phases):
file_path = os.path.join('templates', file_name,phases)
f = None
try:
f = open(file_path, "rb")
web.header('Content-Type','application/octet-stream')
web.header('Content-disposition', 'attachment; filename=%s.xlsx' % file_name)
while True:
c = f.read(BUF_SIZE)
if c:
yield c
else:
break
except Exception, e:
print e
yield 'Error'
finally:
if f:
f.close()
if __name__ == '__main__':
app.run()
| 43.712598 | 124 | 0.587319 | [
"MIT"
] | wqwangchn/novice | pynovice/tmp_classification/web_py/web_moniter.py | 11,357 | Python |
"""
Pymodbus Exceptions
--------------------
Custom exceptions to be used in the Modbus code.
"""
class ModbusException(Exception):
""" Base modbus exception """
def __init__(self, string):
""" Initialize the exception
:param string: The message to append to the error
"""
self.string = string
def __str__(self):
return 'Modbus Error: %s' % self.string
def isError(self):
"""Error"""
return True
class ModbusIOException(ModbusException):
""" Error resulting from data i/o """
def __init__(self, string="", function_code=None):
""" Initialize the exception
:param string: The message to append to the error
"""
self.fcode = function_code
self.message = "[Input/Output] %s" % string
ModbusException.__init__(self, self.message)
class ParameterException(ModbusException):
""" Error resulting from invalid parameter """
def __init__(self, string=""):
""" Initialize the exception
:param string: The message to append to the error
"""
message = "[Invalid Parameter] %s" % string
ModbusException.__init__(self, message)
class NoSuchSlaveException(ModbusException):
""" Error resulting from making a request to a slave
that does not exist """
def __init__(self, string=""):
""" Initialize the exception
:param string: The message to append to the error
"""
message = "[No Such Slave] %s" % string
ModbusException.__init__(self, message)
class NotImplementedException(ModbusException):
""" Error resulting from not implemented function """
def __init__(self, string=""):
""" Initialize the exception
:param string: The message to append to the error
"""
message = "[Not Implemented] %s" % string
ModbusException.__init__(self, message)
class ConnectionException(ModbusException):
""" Error resulting from a bad connection """
def __init__(self, string=""):
""" Initialize the exception
:param string: The message to append to the error
"""
message = "[Connection] %s" % string
ModbusException.__init__(self, message)
class InvalidMessageReceivedException(ModbusException):
"""
Error resulting from invalid response received or decoded
"""
def __init__(self, string=""):
""" Initialize the exception
:param string: The message to append to the error
"""
message = "[Invalid Message] %s" % string
ModbusException.__init__(self, message)
class MessageRegisterException(ModbusException):
"""
Error resulting from failing to register a custom message request/response
"""
def __init__(self, string=""):
message = '[Error registering message] %s' % string
ModbusException.__init__(self, message)
# --------------------------------------------------------------------------- #
# Exported symbols
# --------------------------------------------------------------------------- #
__all__ = [
"ModbusException", "ModbusIOException",
"ParameterException", "NotImplementedException",
"ConnectionException", "NoSuchSlaveException",
"InvalidMessageReceivedException",
"MessageRegisterException"
]
| 28.042017 | 79 | 0.614324 | [
"BSD-3-Clause"
] | Biondoap/pymodbus | pymodbus/exceptions.py | 3,337 | Python |
from typing_extensions import Literal
from pydantic import BaseModel
class GroupIncreaseEvent(BaseModel):
group_id: int
notice_type = "group_increase"
operator_id: int
post_type = "notice"
self_id: int
sub_type: Literal["approve"]
time: int
user_id: int
class Config:
arbitrary_types_allowed = True
| 20.352941 | 38 | 0.708092 | [
"MIT"
] | SSmJaE/PepperBot | pepperbot/models/events/GroupIncrease.py | 346 | Python |
'''
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain 1000 digits?
'''
# Initializing values
a = 1
b = 2
c = a + b
ind = 4
# Stopping counting when the first term in the Fibonacci sequence hits 1000 digits
while len(str(c)) < 1000:
a = b
b = c
c = a + b
ind += 1
print(ind)
| 16.425 | 85 | 0.601218 | [
"MIT"
] | malienko/projecteuler_python | problem25.py | 661 | Python |
import requests
#Using an API to search for COVID Movies
url = 'http://www.omdbapi.com/?t=covid&y=2021&apikey=d0c69d2c'
r = requests.get(url)
json_data = r.json()
for key, value in json_data.items():
print(key + ':', value) | 31.857143 | 62 | 0.717489 | [
"MIT"
] | clearyb1/COVIDDataViz | analysis/OMDBAPI.py | 223 | Python |
from __future__ import absolute_import
from django.contrib import admin
from . import models
admin.site.register(models.ExampleModel)
admin.site.register(models.ExampleNonUploadModel)
| 20.777778 | 49 | 0.84492 | [
"BSD-3-Clause"
] | 4nzor/django-ckeditor | ckeditor_demo/demo_application/admin.py | 187 | Python |
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_file.py">
# Copyright (c) 2020 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import os
import dateutil.parser
import asposewordscloud.models.requests
from test.base_test_context import BaseTestContext
#
# Example of how to work with files.
#
class TestFile(BaseTestContext):
#
# Test for uploading file.
#
def test_upload_file(self):
remoteDataFolder = self.remote_test_folder + '/Storage'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestUploadFile.docx'
request = asposewordscloud.models.requests.UploadFileRequest(file_content=open(os.path.join(self.local_test_folder, localFile), 'rb'), path=remoteDataFolder + '/' + remoteFileName)
result = self.words_api.upload_file(request)
self.assertIsNotNone(result, 'Error has occurred.')
self.assertIsNotNone(result.uploaded, 'Validate UploadFile response')
self.assertEqual(1, len(result.uploaded))
self.assertEqual('TestUploadFile.docx', result.uploaded[0])
#
# Test for copy file.
#
def test_copy_file(self):
remoteDataFolder = self.remote_test_folder + '/Storage'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestCopyFileSrc.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.CopyFileRequest(dest_path=remoteDataFolder + '/TestCopyFileDest.docx', src_path=remoteDataFolder + '/' + remoteFileName)
self.words_api.copy_file(request)
#
# Test for move file.
#
def test_move_file(self):
remoteDataFolder = self.remote_test_folder + '/Storage'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestMoveFileSrc.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.MoveFileRequest(dest_path=self.remote_test_out + '/TestMoveFileDest_' + self.create_random_guid() + '.docx', src_path=remoteDataFolder + '/' + remoteFileName)
self.words_api.move_file(request)
#
# Test for delete file.
#
def test_delete_file(self):
remoteDataFolder = self.remote_test_folder + '/Storage'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDeleteFile.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DeleteFileRequest(path=remoteDataFolder + '/' + remoteFileName)
self.words_api.delete_file(request)
#
# Test for download file.
#
def test_download_file(self):
remoteDataFolder = self.remote_test_folder + '/Storage'
localFile = 'Common/test_multi_pages.docx'
remoteFileName = 'TestDownloadFile.docx'
self.upload_file(remoteDataFolder + '/' + remoteFileName, open(os.path.join(self.local_test_folder, localFile), 'rb'))
request = asposewordscloud.models.requests.DownloadFileRequest(path=remoteDataFolder + '/' + remoteFileName)
result = self.words_api.download_file(request)
self.assertIsNotNone(result, 'Error has occurred.')
| 41.783784 | 209 | 0.693834 | [
"MIT"
] | rizwanniazigroupdocs/aspose-words-cloud-python | test/api/storage/test_file.py | 4,638 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import MultiapiServiceClientConfiguration
from .operations import MultiapiServiceClientOperationsMixin, OperationGroupOneOperations, OperationGroupTwoOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.rest import HttpRequest, HttpResponse
class MultiapiServiceClient(MultiapiServiceClientOperationsMixin):
"""Service client for multiapi client testing.
:ivar operation_group_one: OperationGroupOneOperations operations
:vartype operation_group_one: azure.multiapi.sample.v3.operations.OperationGroupOneOperations
:ivar operation_group_two: OperationGroupTwoOperations operations
:vartype operation_group_two: azure.multiapi.sample.v3.operations.OperationGroupTwoOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
:keyword api_version: Api Version. The default value is "3.0.0". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
base_url="http://localhost:3000", # type: str
**kwargs # type: Any
):
# type: (...) -> None
self._config = MultiapiServiceClientConfiguration(credential=credential, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.operation_group_one = OperationGroupOneOperations(self._client, self._config, self._serialize, self._deserialize)
self.operation_group_two = OperationGroupTwoOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MultiapiServiceClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 43.272727 | 126 | 0.690943 | [
"MIT"
] | changlong-liu/autorest.python | docs/samples/specification/multiapi/generated/azure/multiapi/sample/v3/_multiapi_service_client.py | 4,284 | Python |
# coding=utf-8
# =============================================================================
# Copyright (c) 2001-2019 FLIR Systems, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ("Confidential Information"). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
# =============================================================================
#
# Trigger.py shows how to trigger the camera. It relies on information
# provided in the Enumeration, Acquisition, and NodeMapInfo examples.
#
# It can also be helpful to familiarize yourself with the ImageFormatControl
# and Exposure examples. As they are somewhat shorter and simpler, either
# provides a strong introduction to camera customization.
#
# This example shows the process of configuring, using, and cleaning up a
# camera for use with both a software and a hardware trigger.
import os
import PySpin
import sys
NUM_IMAGES = 10 # number of images to grab
class TriggerType:
SOFTWARE = 1
HARDWARE = 2
CHOSEN_TRIGGER = TriggerType.SOFTWARE
def configure_trigger(cam):
"""
This function configures the camera to use a trigger. First, trigger mode is
set to off in order to select the trigger source. Once the trigger source
has been selected, trigger mode is then enabled, which has the camera
capture only a single image upon the execution of the chosen trigger.
:param cam: Camera to configure trigger for.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
result = True
print('*** CONFIGURING TRIGGER ***\n')
print('Note that if the application / user software triggers faster than frame time, the trigger may be dropped / skipped by the camera.\n')
print('If several frames are needed per trigger, a more reliable alternative for such case, is to use the multi-frame mode.\n\n')
if CHOSEN_TRIGGER == TriggerType.SOFTWARE:
print('Software trigger chosen ...')
elif CHOSEN_TRIGGER == TriggerType.HARDWARE:
print('Hardware trigger chose ...')
try:
# Ensure trigger mode off
# The trigger must be disabled in order to configure whether the source
# is software or hardware.
nodemap = cam.GetNodeMap()
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if not PySpin.IsAvailable(node_trigger_mode) or not PySpin.IsReadable(node_trigger_mode):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if not PySpin.IsAvailable(node_trigger_mode_off) or not PySpin.IsReadable(node_trigger_mode_off):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
# Set TriggerSelector to FrameStart
# For this example, the trigger selector should be set to frame start.
# This is the default for most cameras.
node_trigger_selector= PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSelector'))
if not PySpin.IsAvailable(node_trigger_selector) or not PySpin.IsWritable(node_trigger_selector):
print('Unable to get trigger selector (node retrieval). Aborting...')
return False
node_trigger_selector_framestart = node_trigger_selector.GetEntryByName('FrameStart')
if not PySpin.IsAvailable(node_trigger_selector_framestart) or not PySpin.IsReadable(
node_trigger_selector_framestart):
print('Unable to set trigger selector (enum entry retrieval). Aborting...')
return False
node_trigger_selector.SetIntValue(node_trigger_selector_framestart.GetValue())
print('Trigger selector set to frame start...')
# Select trigger source
# The trigger source must be set to hardware or software while trigger
# mode is off.
node_trigger_source = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerSource'))
if not PySpin.IsAvailable(node_trigger_source) or not PySpin.IsWritable(node_trigger_source):
print('Unable to get trigger source (node retrieval). Aborting...')
return False
if CHOSEN_TRIGGER == TriggerType.SOFTWARE:
node_trigger_source_software = node_trigger_source.GetEntryByName('Software')
if not PySpin.IsAvailable(node_trigger_source_software) or not PySpin.IsReadable(
node_trigger_source_software):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_software.GetValue())
print('Trigger source set to software...')
elif CHOSEN_TRIGGER == TriggerType.HARDWARE:
node_trigger_source_hardware = node_trigger_source.GetEntryByName('Line0')
if not PySpin.IsAvailable(node_trigger_source_hardware) or not PySpin.IsReadable(
node_trigger_source_hardware):
print('Unable to set trigger source (enum entry retrieval). Aborting...')
return False
node_trigger_source.SetIntValue(node_trigger_source_hardware.GetValue())
print('Trigger source set to hardware...')
# Turn trigger mode on
# Once the appropriate trigger source has been set, turn trigger mode
# on in order to retrieve images using the trigger.
node_trigger_mode_on = node_trigger_mode.GetEntryByName('On')
if not PySpin.IsAvailable(node_trigger_mode_on) or not PySpin.IsReadable(node_trigger_mode_on):
print('Unable to enable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_on.GetValue())
print('Trigger mode turned back on...')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def grab_next_image_by_trigger(nodemap, cam):
"""
This function acquires an image by executing the trigger node.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
try:
result = True
# Use trigger to capture image
# The software trigger only feigns being executed by the Enter key;
# what might not be immediately apparent is that there is not a
# continuous stream of images being captured; in other examples that
# acquire images, the camera captures a continuous stream of images.
# When an image is retrieved, it is plucked from the stream.
if CHOSEN_TRIGGER == TriggerType.SOFTWARE:
# Get user input
input('Press the Enter key to initiate software trigger.')
# Execute software trigger
node_softwaretrigger_cmd = PySpin.CCommandPtr(nodemap.GetNode('TriggerSoftware'))
if not PySpin.IsAvailable(node_softwaretrigger_cmd) or not PySpin.IsWritable(node_softwaretrigger_cmd):
print('Unable to execute trigger. Aborting...')
return False
node_softwaretrigger_cmd.Execute()
# TODO: Blackfly and Flea3 GEV cameras need 2 second delay after software trigger
elif CHOSEN_TRIGGER == TriggerType.HARDWARE:
print('Use the hardware to trigger image acquisition.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def acquire_images(cam, nodemap, nodemap_tldevice):
"""
This function acquires and saves 10 images from a device.
Please see Acquisition example for more in-depth comments on acquiring images.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:param nodemap_tldevice: Transport layer device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:type nodemap_tldevice: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('*** IMAGE ACQUISITION ***\n')
try:
result = True
# Set acquisition mode to continuous
# In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here)
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):
print('Unable to set acquisition mode to continuous (enum retrieval). Aborting...')
return False
# Retrieve entry node from enumeration node
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(
node_acquisition_mode_continuous):
print('Unable to set acquisition mode to continuous (entry retrieval). Aborting...')
return False
# Retrieve integer value from entry node
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
# Set integer value from entry node as new value of enumeration node
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print('Acquisition mode set to continuous...')
# Begin acquiring images
cam.BeginAcquisition()
print('Acquiring images...')
# Retrieve device serial number for filename
#
# *** NOTES ***
# The device serial number is retrieved in order to keep cameras from
# overwriting one another. Grabbing image IDs could also accomplish
# this.
device_serial_number = ''
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number):
device_serial_number = node_device_serial_number.GetValue()
print('Device serial number retrieved as %s...' % device_serial_number)
# Retrieve, convert, and save images
for i in range(NUM_IMAGES):
try:
# Retrieve the next image from the trigger
result &= grab_next_image_by_trigger(nodemap, cam)
# Retrieve next received image
image_result = cam.GetNextImage(1000)
# Ensure image completion
if image_result.IsIncomplete():
print('Image incomplete with image status %d ...' % image_result.GetImageStatus())
else:
# Print image information; height and width recorded in pixels
#
# *** NOTES ***
# Images have quite a bit of available metadata including
# things such as CRC, image status, and offset values, to
# name a few.
width = image_result.GetWidth()
height = image_result.GetHeight()
print('Grabbed Image %d, width = %d, height = %d' % (i, width, height))
# Convert image to mono 8
#
# *** NOTES ***
# Images can be converted between pixel formats by using
# the appropriate enumeration value. Unlike the original
# image, the converted one does not need to be released as
# it does not affect the camera buffer.
#
# When converting images, color processing algorithm is an
# optional parameter.
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
# Create a unique filename
if device_serial_number:
filename = 'Trigger-%s-%d.jpg' % (device_serial_number, i)
else: # if serial number is empty
filename = 'Trigger-%d.jpg' % i
# Save image
#
# *** NOTES ***
# The standard practice of the examples is to use device
# serial numbers to keep images of one device from
# overwriting those of another.
image_converted.Save(filename)
print('Image saved at %s\n' % filename)
# Release image
#
# *** NOTES ***
# Images retrieved directly from the camera (i.e. non-converted
# images) need to be released in order to keep from filling the
# buffer.
image_result.Release()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
# End acquisition
#
# *** NOTES ***
# Ending acquisition appropriately helps ensure that devices clean up
# properly and do not need to be power-cycled to maintain integrity.
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def reset_trigger(nodemap):
"""
This function returns the camera to a normal state by turning off trigger mode.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:returns: True if successful, False otherwise.
:rtype: bool
"""
try:
result = True
node_trigger_mode = PySpin.CEnumerationPtr(nodemap.GetNode('TriggerMode'))
if not PySpin.IsAvailable(node_trigger_mode) or not PySpin.IsReadable(node_trigger_mode):
print('Unable to disable trigger mode (node retrieval). Aborting...')
return False
node_trigger_mode_off = node_trigger_mode.GetEntryByName('Off')
if not PySpin.IsAvailable(node_trigger_mode_off) or not PySpin.IsReadable(node_trigger_mode_off):
print('Unable to disable trigger mode (enum entry retrieval). Aborting...')
return False
node_trigger_mode.SetIntValue(node_trigger_mode_off.GetValue())
print('Trigger mode disabled...')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
result = False
return result
def print_device_info(nodemap):
"""
This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:returns: True if successful, False otherwise.
:rtype: bool
"""
print('*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print('%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def run_single_camera(cam):
"""
This function acts as the body of the example; please see NodeMapInfo example
for more in-depth comments on setting up cameras.
:param cam: Camera to run on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
try:
result = True
err = False
# Retrieve TL device nodemap and print device information
nodemap_tldevice = cam.GetTLDeviceNodeMap()
result &= print_device_info(nodemap_tldevice)
# Initialize camera
cam.Init()
# Retrieve GenICam nodemap
nodemap = cam.GetNodeMap()
# Configure trigger
if configure_trigger(cam) is False:
return False
# Acquire images
result &= acquire_images(cam, nodemap, nodemap_tldevice)
# Reset trigger
result &= reset_trigger(nodemap)
# Deinitialize camera
cam.DeInit()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
result = False
return result
def main():
"""
Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
# Since this application saves images in the current folder
# we must ensure that we have permission to write to this folder.
# If we do not have permission, fail right away.
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print('Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build))
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print('Number of cameras detected: %d' % num_cameras)
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
# Run example on each camera
for i, cam in enumerate(cam_list):
print('Running example for camera %d...' % i)
result &= run_single_camera(cam)
print('Camera %d example complete... \n' % i)
# Release reference to camera
# NOTE: Unlike the C++ examples, we cannot rely on pointer objects being automatically
# cleaned up when going out of scope.
# The usage of del is preferred to assigning the variable to None.
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result
if __name__ == '__main__':
if main():
sys.exit(0)
else:
sys.exit(1)
| 38.278846 | 144 | 0.64627 | [
"MIT"
] | BevanLab/Recording_Script | Sample/spinnaker_python-2.2.0.48-cp37-cp37m-win_amd64/Examples/Python3/Trigger.py | 19,905 | Python |
from .hw_6_check import has_plotting
def test_nothing():
assert not has_plotting("")
def test_plotly():
assert has_plotting("import plotly.express as px")
def test_plot_method():
assert has_plotting("df.plot()")
def test_plot_submodule():
assert has_plotting("df.plot.scatter()")
| 16.888889 | 54 | 0.720395 | [
"CC0-1.0"
] | MatthewMaury/python-public-policy | extras/scripts/test_hw_6_check.py | 304 | Python |
import json
from unittest.mock import patch
from django.core import mail
from hc.api.models import Channel, Notification
from hc.test import BaseTestCase
class SendTestNotificationTestCase(BaseTestCase):
def setUp(self):
super(SendTestNotificationTestCase, self).setUp()
self.channel = Channel(kind="email", project=self.project)
self.channel.email_verified = True
self.channel.value = "[email protected]"
self.channel.save()
self.url = "/integrations/%s/test/" % self.channel.code
def test_it_sends_test_email(self):
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertRedirects(r, self.channels_url)
self.assertContains(r, "Test notification sent!")
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "[email protected]")
self.assertTrue("X-Bounce-Url" in email.extra_headers)
self.assertTrue("List-Unsubscribe" in email.extra_headers)
# It should create a notification
n = Notification.objects.get()
self.assertEqual(n.channel, self.channel)
self.assertEqual(n.error, "")
def test_it_clears_channel_last_error(self):
self.channel.last_error = "Something went wrong"
self.channel.save()
self.client.login(username="[email protected]", password="password")
self.client.post(self.url, {})
self.channel.refresh_from_db()
self.assertEqual(self.channel.last_error, "")
def test_it_sets_channel_last_error(self):
self.channel.email_verified = False
self.channel.save()
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertContains(r, "Could not send a test notification")
self.assertContains(r, "Email not verified")
self.channel.refresh_from_db()
self.assertEqual(self.channel.last_error, "Email not verified")
@patch("hc.api.transports.requests.request")
def test_it_handles_webhooks_with_no_down_url(self, mock_get):
mock_get.return_value.status_code = 200
self.channel.kind = "webhook"
self.channel.value = json.dumps(
{
"method_down": "GET",
"url_down": "",
"body_down": "",
"headers_down": {},
"method_up": "GET",
"url_up": "http://example-url",
"body_up": "",
"headers_up": {},
}
)
self.channel.save()
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertRedirects(r, self.channels_url)
self.assertContains(r, "Test notification sent!")
def test_it_handles_webhooks_with_no_urls(self):
self.channel.kind = "webhook"
self.channel.value = json.dumps(
{
"method_down": "GET",
"url_down": "",
"body_down": "",
"headers_down": {},
"method_up": "GET",
"url_up": "",
"body_up": "",
"headers_up": {},
}
)
self.channel.save()
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertRedirects(r, self.channels_url)
self.assertContains(r, "Could not send a test notification")
def test_it_checks_channel_ownership(self):
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertEqual(r.status_code, 404)
| 35.414414 | 78 | 0.610023 | [
"BSD-3-Clause"
] | UniversitaDellaCalabria/healthchecks | hc/front/tests/test_send_test_notification.py | 3,931 | Python |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
# Constants shared between all keypoint tasks.
UNMATCHED_KEYPOINT_SCORE = 0.1
KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
@abc.abstractmethod
def supported_sub_model_types(self):
"""Valid sub model types supported by the get_sub_model function."""
pass
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"""Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
"""
pass
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256,
bias_fill=None):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential(
[tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size,
padding='same'),
tf.keras.layers.ReLU(),
out_conv]
)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
tf.Assert(tensor.get_shape().ndims == num_dims, [tensor])
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_transposed, k=k)
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices,
channel_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch.
"""
_, _, width, _ = _get_shape(height_width_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width_flat = _flatten_spatial_dimensions(height_width_predictions)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
height_width = tf.gather(height_width_flat, peak_spatial_indices,
batch_dims=1)
height_width = tf.maximum(height_width, 0)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1)
boxes = tf.stack([y_indices + y_offsets - heights / 2.0,
x_indices + x_offsets - widths / 2.0,
y_indices + y_offsets + heights / 2.0,
x_indices + x_offsets + widths / 2.0], axis=2)
return boxes, detection_classes, detection_scores, num_detections
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similiar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
_, _, width, _ = _get_shape(offset_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
return offsets
def prediction_tensors_to_keypoint_candidates(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint
a candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per
keypoint type.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
"""
batch_size, _, width, num_keypoints = _get_shape(
keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(keypoint_heatmap_offsets)
selected_offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
_, num_indices, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
offsets = tf.gather(reshaped_offsets, channel_indices, batch_dims=2)
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
return keypoint_candidates, keypoint_scores, num_candidates
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, _, width, _ = _get_shape(regressed_keypoint_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
regressed_keypoints_flat = _flatten_spatial_dimensions(
regressed_keypoint_predictions)
relative_regressed_keypoints = tf.gather(
regressed_keypoints_flat, flattened_indices, batch_dims=1)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=None,
unmatched_keypoint_score=0.1, box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance'):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
nan_mask = tf.where(
invalid_candidates,
np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32),
tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(
keypoint_candidates, tf.expand_dims(nan_mask, -1))
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates_with_nans, axis=1)
sqrd_distances = tf.math.reduce_sum(
tf.math.squared_difference(regressed_keypoint_expanded,
keypoint_candidates_expanded),
axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + 1e-6)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
nearby_candidate_coords, nearby_candidate_scores = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds))
if bboxes is None:
# Create bboxes from regressed keypoints.
# Shape [batch_size * num_instances, 4].
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
unmatched_keypoint_score, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
return refined_keypoints, refined_scores
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
indices):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2].
"""
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices,
[0, 2, 1])
nearby_candidate_coords_tranposed = tf.gather(
keypoint_candidates_transposed, nearby_candidate_inds_transposed,
batch_dims=2)
nearby_candidate_scores_transposed = tf.gather(
keypoint_scores_transposed, nearby_candidate_inds_transposed,
batch_dims=2)
gathered_keypoint_candidates = tf.transpose(nearby_candidate_coords_tranposed,
[0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
return gathered_keypoint_candidates, gathered_keypoint_scores
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) % num_cols
channel_indices = indices % num_channels
return row_indices, col_indices, channel_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
def _normalize_boxlist(args):
boxes, height, width = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.],
filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist,
zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for
box_list_instance in box_lists], axis=0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
keypoint_coords_normalized = tf.map_fn(
clip_to_window, (keypoint_coords_normalized, batch_window),
dtype=tf.float32, back_prop=False)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [
make_prediction_net(num_classes, bias_fill=class_prediction_bias_init)
for _ in range(num_feature_outputs)
]
if self._od_params is not None:
prediction_heads[BOX_SCALE] = [
make_prediction_net(NUM_SIZE_CHANNELS)
for _ in range(num_feature_outputs)
]
prediction_heads[BOX_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [
make_prediction_net(
num_keypoints, bias_fill=kp_params.heatmap_bias_init)
for _ in range(num_feature_outputs)
]
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints)
for _ in range(num_feature_outputs)
]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints)
for _ in range(num_feature_outputs)
]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = [
make_prediction_net(num_classes,
bias_fill=self._mask_params.heatmap_bias_init)
for _ in range(num_feature_outputs)]
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = [
make_prediction_net( # pylint: disable=g-complex-comprehension
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init)
for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [
make_prediction_net(2 * self._densepose_params.num_parts)
for _ in range(num_feature_outputs)
]
if self._track_params is not None:
prediction_heads[TRACK_REID] = [
make_prediction_net(self._track_params.reid_embed_size)
for _ in range(num_feature_outputs)]
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size,
input_shape=(
self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids,
input_shape=(
self._track_params.reid_embed_size,)))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou))
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(
tf.expand_dims(valid_mask_batch, axis=-1))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (input_height // self._stride,
input_width // self._stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.to_float(true_image_shapes) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob, max_pool_kernel_size=3,
k=self._center_params.max_box_predictions))
boxes_strided, classes, scores, num_detections = (
prediction_tensors_to_boxes(
detection_scores, y_indices, x_indices, channel_indices,
prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {
fields.DetectionResultFields.detection_boxes: boxes,
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_classes: classes,
fields.DetectionResultFields.num_detections: num_detections,
'detection_boxes_strided': boxes_strided
}
if self._kp_params_dict:
keypoints, keypoint_scores = self._postprocess_keypoints(
prediction_dict, classes, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=True))
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _postprocess_keypoints(self, prediction_dict, classes, y_indices,
x_indices, boxes, num_detections):
"""Performs postprocessing on keypoint predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
def true_fn(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params):
"""Logics to execute when instance_inds is not an empty set."""
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
kpt_coords_for_class, kpt_scores_for_class = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params))
# Expand keypoint dimension (with padding) so that coordinates and
# scores have shape [1, num_instances_i, num_total_keypoints, 2] and
# [1, num_instances_i, num_total_keypoints], respectively.
kpts_coords_for_class_padded, kpt_scores_for_class_padded = (
_pad_to_full_keypoint_dim(
kpt_coords_for_class, kpt_scores_for_class,
kp_params.keypoint_indices, total_num_keypoints))
return kpts_coords_for_class_padded, kpt_scores_for_class_padded
def false_fn():
"""Logics to execute when the instance_inds is an empty set."""
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32),
tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(
true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params)
results = tf.cond(tf.size(instance_inds) > 0, true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
# Concatenate all keypoints across all classes (single example).
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list,
axis=0)
if tf.size(instance_inds_for_example) > 0:
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = (
_pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions))
else:
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
classes[0] == class_id)
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
return instance_inds
def _postprocess_keypoints_for_class_and_image(
self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes,
y_indices, x_indices, boxes, indices_with_kpt_class, batch_index,
kp_params):
"""Postprocess keypoints for a single image and class.
This function performs the following postprocessing operations on a single
image and single keypoint class:
- Converts keypoints scores to range [0, 1] with sigmoid.
- Determines the detections that correspond to the specified keypoint class.
- Gathers the regressed keypoints at the detection (i.e. box) centers.
- Gathers keypoint candidates from the keypoint heatmaps.
- Snaps regressed keypoints to nearby keypoint candidates.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
indices_with_kpt_class: A [num_instances] int tensor where each element
indicates the instance location within the `classes` tensor. This is
useful to associate the refined keypoints with the original detections
(i.e. boxes)
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
"""
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class,
axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class,
axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
keypoint_candidates, keypoint_scores, num_keypoint_candidates = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap, keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint))
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
refined_keypoints, refined_scores = refine_keypoints(
regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=boxes_for_kpt_class,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode)
return refined_keypoints, refined_scores
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if fine_tune_checkpoint_type not in supported_types:
message = ('Checkpoint type "{}" not supported for {}. '
'Supported types are {}')
raise ValueError(
message.format(fine_tune_checkpoint_type,
self._feature_extractor.__class__.__name__,
supported_types))
elif fine_tune_checkpoint_type == 'fine_tune':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(
fine_tune_checkpoint_type)}
def updates(self):
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
| 45.883802 | 97 | 0.716559 | [
"Apache-2.0"
] | AvikantSrivastava/models | research/object_detection/meta_architectures/center_net_meta_arch.py | 145,314 | Python |
#!/opt/local/bin/python
import timeit
import time
import sys
def find_score(name):
score = 0
for character in name:
score += ( ord(character) - 64 )
return score
start_time = timeit.default_timer()
total = 0
cardinal_position = 0
import csv
with open('p022_names.txt', newline='') as csvfile:
names_reader = csv.reader(csvfile, dialect='unix')
for row in names_reader:
for name in sorted(row):
cardinal_position += 1
name_score = find_score(name)
total += cardinal_position * name_score
print("%3d - %20s: %6d %7d" % ( cardinal_position, name, name_score, total) )
print( "Ran in %f seconds." % ( timeit.default_timer() - start_time) )
| 25.892857 | 89 | 0.642759 | [
"Artistic-2.0"
] | perlygatekeeper/glowing-robot | Project_Euler/22_name_scores/name_scores.py | 725 | Python |
from dataclasses import dataclass
from running_modes.configurations.automated_curriculum_learning.merging_strategy_configuration import \
MergingStrategyConfiguration
from running_modes.configurations.automated_curriculum_learning.production_strategy_configuration import \
ProductionStrategyConfiguration
from running_modes.configurations.automated_curriculum_learning.ranking_strategy_configuration import \
RankingStrategyConfiguration
@dataclass
class AutomatedCurriculumLearningConfiguration:
ranking_strategy: RankingStrategyConfiguration
merging_strategy: MergingStrategyConfiguration
production_strategy: ProductionStrategyConfiguration
@dataclass
class AutomatedCurriculumLearningComponents:
"""This class holds the necessary configuration components to run Auto CL"""
prior: str
agent: str
automated_curriculum_learning: AutomatedCurriculumLearningConfiguration | 39.826087 | 106 | 0.868996 | [
"Apache-2.0"
] | argentumgun/Reinvent | running_modes/configurations/automated_curriculum_learning/automated_curriculum_learning_configuration.py | 916 | Python |
import sys
from kubernetes import client, config
def main():
currNameSpace=sys.argv[1]
config.load_incluster_config()
v1 = client.CoreV1Api()
nodeList = v1.list_pod_for_all_namespaces(watch=False)
for node in nodeList.items:
if node.metadata.namespace == currNameSpace:
print(("%s %s" % (node.metadata.name, node.status.pod_ip)))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Please provide namespace to process")
sys.exit(1)
main()
| 24.428571 | 71 | 0.654971 | [
"Apache-2.0"
] | AnithaKuberan/testrunner | cloudtest/support/getNodeIps.py | 513 | Python |
# from .LinuxInstaller import LinuxInstaller
from .run import run
from .solution import *
from .installer import *
from .controller import *
from .errors import *
from .helper.configuration import ConfVar
from . import helper
from . import builder
| 24.8 | 44 | 0.790323 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | QuailInstaller/Quail | iquail/__init__.py | 248 | Python |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Need to figure out why this only fails on travis
# pylint: disable=bad-continuation
"""Bootstraps starting a test job.
The following should already be done:
git checkout http://k8s.io/test-infra
cd $WORKSPACE
test-infra/jenkins/bootstrap.py <--repo=R || --bare> <--job=J> <--pull=P || --branch=B>
The bootstrapper now does the following:
# Note start time
# check out repoes defined in --repo
# note job started
# call runner defined in $JOB.json
# upload artifacts (this will change later)
# upload build-log.txt
# note job ended
The contract with the runner is as follows:
* Runner must exit non-zero if job fails for any reason.
"""
import argparse
import contextlib
import json
import logging
import os
import pipes
import random
import re
import select
import signal
import socket
import subprocess
import sys
import tempfile
import time
ORIG_CWD = os.getcwd() # Checkout changes cwd
def read_all(end, stream, append):
"""Read all buffered lines from a stream."""
while not end or time.time() < end:
line = stream.readline()
if not line:
return True # Read everything
# Strip \n at the end if any. Last line of file may not have one.
append(line.rstrip('\n'))
# Is there more on the buffer?
ret = select.select([stream.fileno()], [], [], 0.1)
if not ret[0]:
return False # Cleared buffer but not at the end
return False # Time expired
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60
def terminate(end, proc, kill):
"""Terminate or kill the process after end."""
if not end or time.time() <= end:
return False
if kill: # Process will not die, kill everything
pgid = os.getpgid(proc.pid)
logging.info(
'Kill %d and process group %d', proc.pid, pgid)
os.killpg(pgid, signal.SIGKILL)
proc.kill()
return True
logging.info(
'Terminate %d on timeout', proc.pid)
proc.terminate()
return True
def _call(end, cmd, stdin=None, check=True, output=None, log_failures=True):
"""Start a subprocess."""
logging.info('Call: %s', ' '.join(pipes.quote(c) for c in cmd))
begin = time.time()
if end:
end = max(end, time.time() + 60) # Allow at least 60s per command
proc = subprocess.Popen(
cmd,
stdin=subprocess.PIPE if stdin is not None else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setsid,
)
if stdin:
proc.stdin.write(stdin)
proc.stdin.close()
out = []
code = None
timeout = False
reads = {
proc.stderr.fileno(): (proc.stderr, logging.warning),
proc.stdout.fileno(): (
proc.stdout, (out.append if output else logging.info)),
}
while reads:
if terminate(end, proc, timeout):
if timeout: # We killed everything
break
# Give subprocess some cleanup time before killing.
end = time.time() + 15 * 60
timeout = True
ret = select.select(reads, [], [], 0.1)
for fdesc in ret[0]:
if read_all(end, *reads[fdesc]):
reads.pop(fdesc)
if not ret[0] and proc.poll() is not None:
break # process exited without closing pipes (timeout?)
code = proc.wait()
if timeout:
code = code or 124
logging.error('Build timed out')
if code and log_failures:
logging.error('Command failed')
logging.info(
'process %d exited with code %d after %.1fm',
proc.pid, code, elapsed(begin))
out.append('')
lines = output and '\n'.join(out)
if check and code:
raise subprocess.CalledProcessError(code, cmd, lines)
return lines
def ref_has_shas(ref):
"""Determine if a reference specifies shas (contains ':')"""
return isinstance(ref, basestring) and ':' in ref
def pull_numbers(pull):
"""Turn a pull reference list into a list of PR numbers to merge."""
if ref_has_shas(pull):
return [r.split(':')[0] for r in pull.split(',')][1:]
return [str(pull)]
def pull_ref(pull):
"""Turn a PR number of list of refs into specific refs to fetch and check out."""
if isinstance(pull, int) or ',' not in pull:
return ['+refs/pull/%d/merge' % int(pull)], ['FETCH_HEAD']
pulls = pull.split(',')
refs = []
checkouts = []
for ref in pulls:
if ':' in ref: # master:abcd or 1234:abcd
name, sha = ref.split(':')
elif not refs: # master
name, sha = ref, 'FETCH_HEAD'
else:
name = ref
sha = 'refs/pr/%s' % ref
checkouts.append(sha)
if not refs: # First ref should be branch to merge into
refs.append(name)
else: # Subsequent refs should be PR numbers
num = int(name)
refs.append('+refs/pull/%d/head:refs/pr/%d' % (num, num))
return refs, checkouts
def branch_ref(branch):
"""Split branch:sha if necessary."""
if ref_has_shas(branch):
split_refs = branch.split(':')
return [split_refs[0]], [split_refs[1]]
return [branch], ['FETCH_HEAD']
def repository(repo, ssh):
"""Return the url associated with the repo."""
if repo.startswith('k8s.io/'):
repo = 'github.com/kubernetes/%s' % (repo[len('k8s.io/'):])
if ssh:
if ":" not in repo:
parts = repo.split('/', 1)
repo = '%s:%s' % (parts[0], parts[1])
return 'git@%s' % repo
return 'https://%s' % repo
def random_sleep(attempt):
"""Sleep 2**attempt seconds with a random fractional offset."""
time.sleep(random.random() + attempt ** 2)
def checkout(call, repo, branch, pull, ssh='', git_cache='', clean=False):
"""Fetch and checkout the repository at the specified branch/pull."""
# pylint: disable=too-many-locals
if bool(branch) == bool(pull):
raise ValueError('Must specify one of --branch or --pull')
if pull:
refs, checkouts = pull_ref(pull)
else:
refs, checkouts = branch_ref(branch)
git = 'git'
if git_cache:
cache_dir = '%s/%s' % (git_cache, repo)
try:
os.makedirs(cache_dir)
except OSError:
pass
call([git, 'init', repo, '--separate-git-dir=%s' % cache_dir])
call(['rm', '-f', '%s/index.lock' % cache_dir])
else:
call([git, 'init', repo])
os.chdir(repo)
if clean:
call([git, 'clean', '-dfx'])
call([git, 'reset', '--hard'])
# To make a merge commit, a user needs to be set. It's okay to use a dummy
# user here, since we're not exporting the history.
call([git, 'config', '--local', 'user.name', 'K8S Bootstrap'])
call([git, 'config', '--local', 'user.email', 'k8s_bootstrap@localhost'])
retries = 3
for attempt in range(retries):
try:
call([git, 'fetch', '--quiet', '--tags', repository(repo, ssh)] + refs)
break
except subprocess.CalledProcessError as cpe:
if attempt >= retries - 1:
raise
if cpe.returncode != 128:
raise
logging.warning('git fetch failed')
random_sleep(attempt)
call([git, 'checkout', '-B', 'test', checkouts[0]])
for ref, head in zip(refs, checkouts)[1:]:
call(['git', 'merge', '--no-ff', '-m', 'Merge %s' % ref, head])
def repos_dict(repos):
"""Returns {"repo1": "branch", "repo2": "pull"}."""
return {r: b or p for (r, (b, p)) in repos.items()}
def start(gsutil, paths, stamp, node_name, version, repos):
"""Construct and upload started.json."""
data = {
'timestamp': int(stamp),
'jenkins-node': node_name,
'node': node_name,
}
if version:
data['repo-version'] = version
data['version'] = version # TODO(fejta): retire
if repos:
pull = repos[repos.main]
if ref_has_shas(pull[1]):
data['pull'] = pull[1]
data['repos'] = repos_dict(repos)
gsutil.upload_json(paths.started, data)
# Upload a link to the build path in the directory
if paths.pr_build_link:
gsutil.upload_text(
paths.pr_build_link,
paths.pr_path,
additional_headers=['-h', 'x-goog-meta-link: %s' % paths.pr_path]
)
class GSUtil(object):
"""A helper class for making gsutil commands."""
gsutil = 'gsutil'
def __init__(self, call):
self.call = call
def stat(self, path):
"""Return metadata about the object, such as generation."""
cmd = [self.gsutil, 'stat', path]
return self.call(cmd, output=True, log_failures=False)
def ls(self, path):
"""List a bucket or subdir."""
cmd = [self.gsutil, 'ls', path]
return self.call(cmd, output=True)
def upload_json(self, path, jdict, generation=None):
"""Upload the dictionary object to path."""
if generation is not None: # generation==0 means object does not exist
gen = ['-h', 'x-goog-if-generation-match:%s' % generation]
else:
gen = []
cmd = [
self.gsutil, '-q',
'-h', 'Content-Type:application/json'] + gen + [
'cp', '-', path]
self.call(cmd, stdin=json.dumps(jdict, indent=2))
def copy_file(self, dest, orig):
"""Copy the file to the specified path using compressed encoding."""
cmd = [self.gsutil, '-q', 'cp', '-Z', orig, dest]
self.call(cmd)
def upload_text(self, path, txt, additional_headers=None, cached=True):
"""Copy the text to path, optionally disabling caching."""
headers = ['-h', 'Content-Type:text/plain']
if not cached:
headers += ['-h', 'Cache-Control:private, max-age=0, no-transform']
if additional_headers:
headers += additional_headers
cmd = [self.gsutil, '-q'] + headers + ['cp', '-', path]
self.call(cmd, stdin=txt)
def cat(self, path, generation):
"""Return contents of path#generation"""
cmd = [self.gsutil, '-q', 'cat', '%s#%s' % (path, generation)]
return self.call(cmd, output=True)
def upload_artifacts(self, gsutil, path, artifacts):
"""Upload artifacts to the specified path."""
# Upload artifacts
if not os.path.isdir(artifacts):
return
try:
# If remote path exists, it will create .../_artifacts subdir instead
gsutil.ls(path)
# Success means remote path exists
remote_base = os.path.basename(path)
local_base = os.path.basename(artifacts)
if remote_base != local_base:
# if basename are different, need to copy things over first.
localpath = artifacts.replace(local_base, remote_base)
os.rename(artifacts, localpath)
artifacts = localpath
path = path[:-len(remote_base + '/')]
except subprocess.CalledProcessError:
logging.warning('Remote dir %s not exist yet', path)
cmd = [
self.gsutil, '-m', '-q',
'-o', 'GSUtil:use_magicfile=True',
'cp', '-r', '-c', '-z', 'log,txt,xml',
artifacts, path,
]
self.call(cmd)
def append_result(gsutil, path, build, version, passed):
"""Download a json list and append metadata about this build to it."""
# TODO(fejta): delete the clone of this logic in upload-to-gcs.sh
# (this is update_job_result_cache)
end = time.time() + 300 # try for up to five minutes
errors = 0
while time.time() < end:
if errors:
random_sleep(min(errors, 3))
try:
out = gsutil.stat(path)
gen = re.search(r'Generation:\s+(\d+)', out).group(1)
except subprocess.CalledProcessError:
gen = 0
if gen:
try:
cache = json.loads(gsutil.cat(path, gen))
if not isinstance(cache, list):
raise ValueError(cache)
except ValueError as exc:
logging.warning('Failed to decode JSON: %s', exc)
cache = []
except subprocess.CalledProcessError: # gen doesn't exist
errors += 1
continue
else:
cache = []
cache.append({
'version': version, # TODO(fejta): retire
'job-version': version,
'buildnumber': build,
'passed': bool(passed),
'result': 'SUCCESS' if passed else 'FAILURE',
})
cache = cache[-300:]
try:
gsutil.upload_json(path, cache, generation=gen)
return
except subprocess.CalledProcessError:
logging.warning('Failed to append to %s#%s', path, gen)
errors += 1
def metadata(repos, artifacts, call):
"""Return metadata associated for the build, including inside artifacts."""
path = os.path.join(artifacts or '', 'metadata.json')
meta = None
if os.path.isfile(path):
try:
with open(path) as fp:
meta = json.loads(fp.read())
except (IOError, ValueError):
pass
if not meta or not isinstance(meta, dict):
meta = {}
if repos:
meta['repo'] = repos.main
meta['repos'] = repos_dict(repos)
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['repo-commit'] = commit.strip()
except subprocess.CalledProcessError:
pass
cwd = os.getcwd()
os.chdir(test_infra('.'))
try:
commit = call(['git', 'rev-parse', 'HEAD'], output=True)
if commit:
meta['infra-commit'] = commit.strip()[:9]
except subprocess.CalledProcessError:
pass
os.chdir(cwd)
return meta
def finish(gsutil, paths, success, artifacts, build, version, repos, call):
"""
Args:
paths: a Paths instance.
success: the build passed if true.
artifacts: a dir containing artifacts to upload.
build: identifier of this build.
version: identifies what version of the code the build tested.
repo: the target repo
"""
if os.path.isdir(artifacts) and any(f for _, _, f in os.walk(artifacts)):
try:
gsutil.upload_artifacts(gsutil, paths.artifacts, artifacts)
except subprocess.CalledProcessError:
logging.warning('Failed to upload artifacts')
meta = metadata(repos, artifacts, call)
if not version:
version = meta.get('job-version')
if not version: # TODO(fejta): retire
version = meta.get('version')
# github.com/kubernetes/release/find_green_build depends on append_result()
# TODO(fejta): reconsider whether this is how we want to solve this problem.
append_result(gsutil, paths.result_cache, build, version, success)
if paths.pr_result_cache:
append_result(gsutil, paths.pr_result_cache, build, version, success)
data = {
# TODO(fejta): update utils.go in contrib to accept a float
'timestamp': int(time.time()),
'result': 'SUCCESS' if success else 'FAILURE',
'passed': bool(success),
'metadata': meta,
}
if version:
data['job-version'] = version
data['version'] = version # TODO(fejta): retire
gsutil.upload_json(paths.finished, data)
# Upload the latest build for the job.
# Do this last, since other tools expect the rest of the data to be
# published when this file is created.
for path in {paths.latest, paths.pr_latest}:
if path:
try:
gsutil.upload_text(path, str(build), cached=False)
except subprocess.CalledProcessError:
logging.warning('Failed to update %s', path)
def test_infra(*paths):
"""Return path relative to root of test-infra repo."""
return os.path.join(ORIG_CWD, os.path.dirname(__file__), '..', *paths)
def node():
"""Return the name of the node running the build."""
# TODO(fejta): jenkins sets the node name and our infra expect this value.
# TODO(fejta): Consider doing something different here.
if NODE_ENV not in os.environ:
os.environ[NODE_ENV] = ''.join(socket.gethostname().split('.')[:1])
return os.environ[NODE_ENV]
def find_version(call):
"""Determine and return the version of the build."""
# TODO(fejta): once job-version is functional switch this to
# git rev-parse [--short=N] HEAD^{commit}
version_file = 'version'
if os.path.isfile(version_file):
# e2e tests which download kubernetes use this path:
with open(version_file) as fp:
return fp.read().strip()
version_script = 'hack/lib/version.sh'
if os.path.isfile(version_script):
cmd = [
'bash', '-c', (
"""
set -o errexit
set -o nounset
export KUBE_ROOT=.
source %s
kube::version::get_version_vars
echo $KUBE_GIT_VERSION
""" % version_script)
]
return call(cmd, output=True).strip()
return 'unknown'
class Paths(object): # pylint: disable=too-many-instance-attributes,too-few-public-methods
"""Links to remote gcs-paths for uploading results."""
def __init__( # pylint: disable=too-many-arguments
self,
artifacts, # artifacts folder (in build)
build_log, # build-log.txt (in build)
pr_path, # path to build
finished, # finished.json (metadata from end of build)
latest, # latest-build.txt (in job)
pr_build_link, # file containng pr_path (in job directory)
pr_latest, # latest-build.txt (in pr job)
pr_result_cache, # jobResultsCache.json (in pr job)
result_cache, # jobResultsCache.json (cache of latest results in job)
started, # started.json (metadata from start of build)
):
self.artifacts = artifacts
self.build_log = build_log
self.pr_path = pr_path
self.finished = finished
self.latest = latest
self.pr_build_link = pr_build_link
self.pr_latest = pr_latest
self.pr_result_cache = pr_result_cache
self.result_cache = result_cache
self.started = started
def ci_paths(base, job, build):
"""Return a Paths() instance for a continuous build."""
latest = os.path.join(base, job, 'latest-build.txt')
return Paths(
artifacts=os.path.join(base, job, build, 'artifacts'),
build_log=os.path.join(base, job, build, 'build-log.txt'),
pr_path=None,
finished=os.path.join(base, job, build, 'finished.json'),
latest=latest,
pr_build_link=None,
pr_latest=None,
pr_result_cache=None,
result_cache=os.path.join(base, job, 'jobResultsCache.json'),
started=os.path.join(base, job, build, 'started.json'),
)
def pr_paths(base, repos, job, build):
"""Return a Paths() instance for a PR."""
if not repos:
raise ValueError('repos is empty')
repo = repos.main
pull = str(repos[repo][1])
if repo in ['k8s.io/kubernetes', 'kubernetes/kubernetes']:
prefix = ''
elif repo.startswith('k8s.io/'):
prefix = repo[len('k8s.io/'):]
elif repo.startswith('kubernetes/'):
prefix = repo[len('kubernetes/'):]
elif repo.startswith('github.com/'):
prefix = repo[len('github.com/'):].replace('/', '_')
else:
prefix = repo.replace('/', '_')
# Batch merges are those with more than one PR specified.
pr_nums = pull_numbers(pull)
if len(pr_nums) > 1:
pull = os.path.join(prefix, 'batch')
else:
pull = os.path.join(prefix, pr_nums[0])
pr_path = os.path.join(base, 'pull', pull, job, build)
result_cache = os.path.join(
base, 'directory', job, 'jobResultsCache.json')
pr_result_cache = os.path.join(
base, 'pull', pull, job, 'jobResultsCache.json')
return Paths(
artifacts=os.path.join(pr_path, 'artifacts'),
build_log=os.path.join(pr_path, 'build-log.txt'),
pr_path=pr_path,
finished=os.path.join(pr_path, 'finished.json'),
latest=os.path.join(base, 'directory', job, 'latest-build.txt'),
pr_build_link=os.path.join(base, 'directory', job, '%s.txt' % build),
pr_latest=os.path.join(base, 'pull', pull, job, 'latest-build.txt'),
pr_result_cache=pr_result_cache,
result_cache=result_cache,
started=os.path.join(pr_path, 'started.json'),
)
BUILD_ENV = 'BUILD_NUMBER'
BOOTSTRAP_ENV = 'BOOTSTRAP_MIGRATION'
CLOUDSDK_ENV = 'CLOUDSDK_CONFIG'
GCE_KEY_ENV = 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE'
GUBERNATOR = 'https://k8s-gubernator.appspot.com/build'
HOME_ENV = 'HOME'
JOB_ENV = 'JOB_NAME'
NODE_ENV = 'NODE_NAME'
SERVICE_ACCOUNT_ENV = 'GOOGLE_APPLICATION_CREDENTIALS'
WORKSPACE_ENV = 'WORKSPACE'
GCS_ARTIFACTS_ENV = 'GCS_ARTIFACTS_DIR'
def build_name(started):
"""Return the unique(ish) string representing this build."""
# TODO(fejta): right now jenkins sets the BUILD_NUMBER and does this
# in an environment variable. Consider migrating this to a
# bootstrap.py flag
if BUILD_ENV not in os.environ:
# Automatically generate a build number if none is set
uniq = '%x-%d' % (hash(node()), os.getpid())
autogen = time.strftime('%Y%m%d-%H%M%S-' + uniq, time.gmtime(started))
os.environ[BUILD_ENV] = autogen
return os.environ[BUILD_ENV]
def setup_credentials(call, robot, upload):
"""Activate the service account unless robot is none."""
# TODO(fejta): stop activating inside the image
# TODO(fejta): allow use of existing gcloud auth
if robot:
os.environ[SERVICE_ACCOUNT_ENV] = robot
if not os.getenv(SERVICE_ACCOUNT_ENV) and upload:
logging.warning('Cannot --upload=%s, no active gcloud account.', upload)
raise ValueError('--upload requires --service-account')
if not os.getenv(SERVICE_ACCOUNT_ENV) and not upload:
logging.info('Will not upload results.')
return
if not os.path.isfile(os.environ[SERVICE_ACCOUNT_ENV]):
raise IOError(
'Cannot find service account credentials',
os.environ[SERVICE_ACCOUNT_ENV],
'Create service account and then create key at '
'https://console.developers.google.com/iam-admin/serviceaccounts/project', # pylint: disable=line-too-long
)
call([
'gcloud',
'auth',
'activate-service-account',
'--key-file=%s' % os.environ[SERVICE_ACCOUNT_ENV],
])
try: # Old versions of gcloud may not support this value
account = call(
['gcloud', 'config', 'get-value', 'account'], output=True).strip()
except subprocess.CalledProcessError:
account = 'unknown'
logging.info('Will upload results to %s using %s', upload, account)
def setup_logging(path):
"""Initialize logging to screen and path."""
# See https://docs.python.org/2/library/logging.html#logrecord-attributes
# [IWEF]mmdd HH:MM:SS.mmm] msg
fmt = '%(levelname).1s%(asctime)s.%(msecs)03d] %(message)s' # pylint: disable=line-too-long
datefmt = '%m%d %H:%M:%S'
logging.basicConfig(
level=logging.INFO,
format=fmt,
datefmt=datefmt,
)
build_log = logging.FileHandler(filename=path, mode='w')
build_log.setLevel(logging.INFO)
formatter = logging.Formatter(fmt, datefmt=datefmt)
build_log.setFormatter(formatter)
logging.getLogger('').addHandler(build_log)
return build_log
def setup_magic_environment(job):
"""Set magic environment variables scripts currently expect."""
home = os.environ[HOME_ENV]
# TODO(fejta): jenkins sets these values. Consider migrating to using
# a secret volume instead and passing the path to this volume
# into bootstrap.py as a flag.
os.environ.setdefault(
GCE_KEY_ENV,
os.path.join(home, '.ssh/google_compute_engine'),
)
os.environ.setdefault(
'JENKINS_GCE_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/google_compute_engine.pub'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PRIVATE_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa'),
)
os.environ.setdefault(
'JENKINS_AWS_SSH_PUBLIC_KEY_FILE',
os.path.join(home, '.ssh/kube_aws_rsa.pub'),
)
cwd = os.getcwd()
# TODO(fejta): jenkins sets WORKSPACE and pieces of our infra expect this
# value. Consider doing something else in the future.
os.environ[WORKSPACE_ENV] = cwd
# TODO(fejta): Previously dockerized-e2e-runner.sh also sets HOME to WORKSPACE,
# probably to minimize leakage between jobs.
# Consider accomplishing this another way.
os.environ[HOME_ENV] = cwd
# TODO(fejta): jenkins sets JOB_ENV and pieces of our infra expect this
# value. Consider making everything below here agnostic to the
# job name.
if JOB_ENV not in os.environ:
os.environ[JOB_ENV] = job
elif os.environ[JOB_ENV] != job:
logging.warning('%s=%s (overrides %s)', JOB_ENV, job, os.environ[JOB_ENV])
os.environ[JOB_ENV] = job
# TODO(fejta): Magic value to tell our test code not do upload started.json
# TODO(fejta): delete upload-to-gcs.sh and then this value.
os.environ[BOOTSTRAP_ENV] = 'yes'
# This helps prevent reuse of cloudsdk configuration. It also reduces the
# risk that running a job on a workstation corrupts the user's config.
os.environ[CLOUDSDK_ENV] = '%s/.config/gcloud' % cwd
def job_args(args):
"""Converts 'a ${FOO} $bar' into 'a wildly different string'."""
return [os.path.expandvars(a) for a in args]
def job_script(job):
"""Return path to script for job."""
with open(test_infra('jobs/config.json')) as fp:
config = json.loads(fp.read())
job_config = config[job]
cmd = test_infra('scenarios/%s.py' % job_config['scenario'])
return [cmd] + job_args(job_config.get('args', []))
def gubernator_uri(paths):
"""Return a gubernator link for this build."""
job = os.path.dirname(paths.build_log)
if job.startswith('gs:/'):
return job.replace('gs:/', GUBERNATOR, 1)
return job
@contextlib.contextmanager
def choose_ssh_key(ssh):
"""Creates a script for GIT_SSH that uses -i ssh if set."""
if not ssh: # Nothing to do
yield
return
# Create a script for use with GIT_SSH, which defines the program git uses
# during git fetch. In the future change this to GIT_SSH_COMMAND
# https://superuser.com/questions/232373/how-to-tell-git-which-private-key-to-use
with tempfile.NamedTemporaryFile(prefix='ssh', delete=False) as fp:
fp.write('#!/bin/sh\nssh -o StrictHostKeyChecking=no -i \'%s\' -F /dev/null "${@}"\n' % ssh)
try:
os.chmod(fp.name, 0500)
had = 'GIT_SSH' in os.environ
old = os.getenv('GIT_SSH')
os.environ['GIT_SSH'] = fp.name
yield
del os.environ['GIT_SSH']
if had:
os.environ['GIT_SSH'] = old
finally:
os.unlink(fp.name)
def setup_root(call, root, repos, ssh, git_cache, clean):
"""Create root dir, checkout repo and cd into resulting dir."""
if not os.path.exists(root):
os.makedirs(root)
root_dir = os.path.realpath(root)
logging.info('Root: %s', root_dir)
os.chdir(root_dir)
logging.info('cd to %s', root_dir)
with choose_ssh_key(ssh):
for repo, (branch, pull) in repos.items():
os.chdir(root_dir)
logging.info(
'Checkout: %s %s',
os.path.join(root_dir, repo),
pull and pull or branch)
checkout(call, repo, branch, pull, ssh, git_cache, clean)
if len(repos) > 1: # cd back into the primary repo
os.chdir(root_dir)
os.chdir(repos.main)
class Repos(dict):
"""{"repo": (branch, pull)} dict with a .main attribute."""
main = ''
def __setitem__(self, k, v):
if not self:
self.main = k
return super(Repos, self).__setitem__(k, v)
def parse_repos(args):
"""Convert --repo=foo=this,123:abc,555:ddd into a Repos()."""
repos = args.repo or {}
if not repos and not args.bare:
raise ValueError('--bare or --repo required')
ret = Repos()
if len(repos) != 1:
if args.pull:
raise ValueError('Multi --repo does not support --pull, use --repo=R=branch,p1,p2')
if args.branch:
raise ValueError('Multi --repo does not support --branch, use --repo=R=branch')
elif len(repos) == 1 and (args.branch or args.pull):
repo = repos[0]
if '=' in repo or ':' in repo:
raise ValueError('--repo cannot contain = or : with --branch or --pull')
ret[repo] = (args.branch, args.pull)
return ret
for repo in repos:
mat = re.match(r'([^=]+)(=([^:,~^\s]+(:[0-9a-fA-F]+)?(,|$))+)?$', repo)
if not mat:
raise ValueError('bad repo', repo, repos)
this_repo = mat.group(1)
if not mat.group(2):
ret[this_repo] = ('master', '')
continue
commits = mat.group(2)[1:].split(',')
if len(commits) == 1:
# Checking out a branch, possibly at a specific commit
ret[this_repo] = (commits[0], '')
continue
# Checking out one or more PRs
ret[this_repo] = ('', ','.join(commits))
return ret
def bootstrap(args):
"""Clone repo at pull/branch into root and run job script."""
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
job = args.job
repos = parse_repos(args)
upload = args.upload
build_log_path = os.path.abspath('build-log.txt')
build_log = setup_logging(build_log_path)
started = time.time()
if args.timeout:
end = started + args.timeout * 60
else:
end = 0
call = lambda *a, **kw: _call(end, *a, **kw)
gsutil = GSUtil(call)
logging.info('Bootstrap %s...', job)
build = build_name(started)
if upload:
if repos and repos[repos.main][1]: # merging commits, a pr
paths = pr_paths(upload, repos, job, build)
else:
paths = ci_paths(upload, job, build)
logging.info('Gubernator results at %s', gubernator_uri(paths))
# TODO(fejta): Replace env var below with a flag eventually.
os.environ[GCS_ARTIFACTS_ENV] = paths.artifacts
version = 'unknown'
exc_type = None
setup_creds = False
try:
setup_root(call, args.root, repos, args.ssh, args.git_cache, args.clean)
logging.info('Configure environment...')
if repos:
version = find_version(call)
else:
version = ''
setup_magic_environment(job)
setup_credentials(call, args.service_account, upload)
setup_creds = True
logging.info('Start %s at %s...', build, version)
if upload:
start(gsutil, paths, started, node(), version, repos)
success = False
try:
call(job_script(job))
logging.info('PASS: %s', job)
success = True
except subprocess.CalledProcessError:
logging.error('FAIL: %s', job)
except Exception: # pylint: disable=broad-except
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception('unexpected error')
success = False
if not setup_creds:
setup_credentials(call, args.service_account, upload)
if upload:
logging.info('Upload result and artifacts...')
logging.info('Gubernator results at %s', gubernator_uri(paths))
try:
finish(gsutil, paths, success, '_artifacts', build, version, repos, call)
except subprocess.CalledProcessError: # Still try to upload build log
success = False
logging.getLogger('').removeHandler(build_log)
build_log.close()
if upload:
gsutil.copy_file(paths.build_log, build_log_path)
if exc_type:
raise exc_type, exc_value, exc_traceback # pylint: disable=raising-bad-type
if not success:
# TODO(fejta/spxtr): we should distinguish infra and non-infra problems
# by exit code and automatically retrigger after an infra-problem.
sys.exit(1)
def parse_args(arguments=None):
"""Parse arguments or sys.argv[1:]."""
parser = argparse.ArgumentParser()
parser.add_argument('--root', default='.', help='Root dir to work with')
parser.add_argument(
'--timeout', type=float, default=0, help='Timeout in minutes if set')
parser.add_argument(
'--repo',
action='append',
help='Fetch the specified repositories, with the first one considered primary')
parser.add_argument(
'--bare',
action='store_true',
help='Do not check out a repository')
parser.add_argument('--job', required=True, help='Name of the job to run')
parser.add_argument(
'--upload',
help='Upload results here if set, requires --service-account')
parser.add_argument(
'--service-account',
help='Activate and use path/to/service-account.json if set.')
parser.add_argument(
'--ssh',
help='Use the ssh key to fetch the repository instead of https if set.')
parser.add_argument(
'--git-cache',
help='Location of the git cache.')
parser.add_argument(
'--clean',
action='store_true',
help='Clean the git repo before running tests.')
args = parser.parse_args(arguments)
# --pull is deprecated, use --repo=k8s.io/foo=master:abcd,12:ef12,45:ff65
setattr(args, 'pull', None)
# --branch is deprecated, use --repo=k8s.io/foo=master
setattr(args, 'branch', None)
if bool(args.repo) == bool(args.bare):
raise argparse.ArgumentTypeError(
'Expected --repo xor --bare:', args.repo, args.bare)
return args
if __name__ == '__main__':
ARGS = parse_args()
bootstrap(ARGS)
| 35.163655 | 119 | 0.608115 | [
"Apache-2.0"
] | Acidburn0zzz/test-infra | jenkins/bootstrap.py | 35,023 | Python |
# pylint: disable=W0231,E1101
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
from textwrap import dedent
import warnings
import weakref
import numpy as np
from pandas._libs import Timestamp, iNaT, properties
import pandas.compat as compat
from pandas.compat import (
cPickle as pkl, isidentifier, lrange, lzip, map, set_function_name,
string_types, to_str, zip)
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
Appender, Substitution, rewrite_axis_style_signature)
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.common import (
ensure_int64, ensure_object, is_bool, is_bool_dtype,
is_datetime64_any_dtype, is_datetime64tz_dtype, is_dict_like,
is_extension_array_dtype, is_integer, is_list_like, is_number,
is_numeric_dtype, is_object_dtype, is_period_arraylike, is_re_compilable,
is_scalar, is_timedelta64_dtype, pandas_dtype)
from pandas.core.dtypes.generic import ABCDataFrame, ABCPanel, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas.core import config, missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.index import (
Index, InvalidIndexError, MultiIndex, RangeIndex, ensure_index)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
_shared_doc_kwargs = dict(
axes='keywords for axes', klass='NDFrame',
axes_single_arg='int or labels for object',
args_transpose='axes to permute (int or label for object)',
optional_by="""
by : str or list of str
Name or list of names to sort by""")
# sentinel value to use as kwarg in place of None when None has special meaning
# and needs to be distinguished from a user explicitly passing None.
sentinel = object()
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache', '_is_copy',
'_subtyp', '_name', '_index', '_default_kind',
'_default_fill_value', '_metadata', '__array_struct__',
'__array_interface__']
_internal_names_set = set(_internal_names)
_accessors = frozenset()
_deprecations = frozenset(['as_blocks', 'blocks',
'convert_objects', 'is_copy'])
_metadata = []
_is_copy = None
# dummy attribute so that datetime.__eq__(Series/DataFrame) defers
# by returning NotImplemented
timetuple = None
# ----------------------------------------------------------------------
# Constructors
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, '_is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def is_copy(self):
"""
Return the copy.
"""
warnings.warn("Attribute 'is_copy' is deprecated and will be removed "
"in a future version.", FutureWarning, stacklevel=2)
return self._is_copy
@is_copy.setter
def is_copy(self, msg):
warnings.warn("Attribute 'is_copy' is deprecated and will be removed "
"in a future version.", FutureWarning, stacklevel=2)
self._is_copy = msg
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame() and DataFrame.to_panel()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {self._AXIS_SLICEMAP[a]: self._get_axis(a)
for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(
self, args, kwargs, require_all=False, sentinel=None):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError("arguments are mutually exclusive "
"for [%s,%s]" % (a, alias))
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments "
"specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(cls)))
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, string_types):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(cls)))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = '{prefix}level_{i}'.format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
def set_axis(self, labels, axis=0, inplace=None):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
inplace : bool, default None
Whether to return a new %(klass)s instance.
.. warning::
``inplace=None`` currently falls back to to True, but in a
future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
a 1
b 2
c 3
dtype: int64
The original object is not modified.
>>> s
0 1
1 2
2 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index', inplace=False)
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns', inplace=False)
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
def transpose(self, *args, **kwargs):
"""
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
y : same as input
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
"""
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs,
require_all=True)
axes_names = tuple(self._get_axis_name(axes[a])
for a in self._AXIS_ORDERS)
axes_numbers = tuple(self._get_axis_number(axes[a])
for a in self._AXIS_ORDERS)
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)
for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if kwargs.pop('copy', None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self, level, axis=0):
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame.droplevel()
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple(0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes))]
except Exception:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
def rename(self, *args, **kwargs):
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame or Panel.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
errors = kwargs.pop('errors', 'ignore')
if axis is not None:
# Validate the axis
self._get_axis_number(axis)
if kwargs:
raise TypeError('rename() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if com.count_not_none(*axes.values()) == 0:
raise TypeError('must pass an index to rename')
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = com._get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
# GH 13473
if not callable(v):
indexer = self.axes[axis].get_indexer_for(v)
if errors == 'raise' and len(indexer[indexer == -1]):
missing_labels = [label for index, label in enumerate(v)
if indexer[index] == -1]
raise KeyError('{} not found in axis'
.format(missing_labels))
result._data = result._data.rename_axis(f, axis=baxis, copy=copy,
level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
@rewrite_axis_style_signature('mapper', [('copy', True),
('inplace', False)])
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
# Deprecated (v0.21) behavior is if mapper is specified,
# and not a list or scalar, then call rename
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not
is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis,
inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other):
return all(self._get_axis(a).equals(other._get_axis(a))
for a in self._AXIS_ORDERS)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)
or is_object_dtype(values)):
arr = operator.neg(values)
else:
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(values.dtype))
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if (is_bool_dtype(values) or is_period_arraylike(values)):
arr = values
elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)
or is_object_dtype(values)):
arr = operator.pos(values)
else:
raise TypeError("Unary plus expects numeric dtype, not {}"
.format(values.dtype))
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(com.values_from_object(self))
return self.__array_wrap__(arr)
except Exception:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis))
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_reference is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (key is not None and
is_hashable(key) and
any(key in self.axes[ax] for ax in other_axes))
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_or_level_reference is not implemented for {type}"
.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis))
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
raise ValueError(msg)
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
if self.ndim > 2:
raise NotImplementedError(
"_get_label_or_level_values is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(
self._get_axis(other_axes[0]), MultiIndex):
multi_message = ('\n'
'For a multi-index, the label must be a '
'tuple with elements corresponding to '
'each level.')
else:
multi_message = ''
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
"is not unique.{multi_message}")
.format(key=key,
label_axis_name=label_axis_name,
multi_message=multi_message))
return values
def _drop_labels_or_levels(self, keys, axis=0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
if self.ndim > 2:
raise NotImplementedError(
"_drop_labels_or_levels is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [k for k in keys if not
self._is_label_or_level_reference(k, axis=axis)]
if invalid_keys:
raise ValueError(("The following keys are not valid labels or "
"levels for axis {axis}: {invalid_keys}")
.format(axis=axis,
invalid_keys=invalid_keys))
# Compute levels and labels to drop
levels_to_drop = [k for k in keys
if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys
if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
' hashed'.format(self.__class__.__name__))
def __iter__(self):
"""Iterate over info axis"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel.
"""
return self._info_axis
def iteritems(self):
"""Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
"""
for h in self._info_axis:
yield h, self[h]
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None):
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""
Return dense representation of NDFrame (as opposed to sparse).
"""
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,
**meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get('_typ')
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __unicode__(self):
# unicode representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = '[%s]' % ','.join(map(pprint_thing, self))
return '%s(%s)' % (self.__class__.__name__, prepr)
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs['to_excel'] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
.. versionadded:: 0.20.0.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="",
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep="inf", verbose=True,
freeze_panes=None):
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns,
header=header,
float_format=float_format, index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow,
startcol=startcol, freeze_panes=freeze_panes,
engine=engine)
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression,
index=index)
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
Serialize object to input file path using msgpack format.
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
def to_sql(self, name, con, schema=None, if_exists='fail', index=True,
index_label=None, chunksize=None, dtype=None, method=None):
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : string
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects.
schema : string, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Rows will be written in batches of this size at a time. By default,
all rows will be written at once.
dtype : dict, optional
Specifying the datatype for columns. The keys should be the column
names and the values should be the SQLAlchemy types or strings for
the sqlite3 legacy mode.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] http://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(self, name, con, schema=schema, if_exists=if_exists,
index=index, index_label=index_label, chunksize=chunksize,
dtype=dtype, method=method)
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For
Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a
valid value. For Python >= 3.4, 4 is a valid value. A negative
value for the protocol parameter is equivalent to setting its value
to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
return to_pickle(self, path, compression=compression,
protocol=protocol)
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a
string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression='infer', quoting=None,
quotechar='"', line_terminator=None, chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : str, default 'infer'
Compression mode among the following possible values: {'infer',
'gzip', 'bz2', 'zip', 'xz', None}. If 'infer' and `path_or_buf`
is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip' or '.xz'. (otherwise no
compression).
.. versionchanged:: 0.24.0
'infer' option added and set to default.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
tupleize_cols : bool, default False
Write MultiIndex columns as a list of tuples (if True) or in
the new, expanded format, where each MultiIndex column is a row
in the CSV (if False).
.. deprecated:: 0.21.0
This argument will be removed and will always write each row
of the multi-index as a separate row in the CSV file.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Load an Excel file into a DataFrame.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
if tupleize_cols is not None:
warnings.warn("The 'tupleize_cols' parameter is deprecated and "
"will be removed in a future version",
FutureWarning, stacklevel=2)
else:
tupleize_cols = False
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(df, path_or_buf,
line_terminator=line_terminator, sep=sep,
encoding=encoding,
compression=compression, quoting=quoting,
na_rep=na_rep, float_format=float_format,
cols=columns, header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar, decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, '_cacher'):
del self._cacher
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, '_cacher', None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
cacher = cacher[1]()
return cacher
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
def _clear_item_cache(self, i=None):
if i is not None:
self._item_cache.pop(i, None)
else:
self._item_cache.clear()
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self._is_copy = None
else:
if ref is not None:
self._is_copy = weakref.ref(ref)
else:
self._is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
"""
Parameters
----------
stacklevel : integer, default 4
the level to show of the stack when the error is output
t : string, the type of setting error
force : boolean, default False
if True, then force showing an error
validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
if force or self._is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
try:
gc.collect(2)
if not gc.get_referents(self._is_copy()):
self._is_copy = None
return
except Exception:
pass
# we might be a false positive
try:
if self._is_copy().shape == self.shape:
self._is_copy = None
return
except Exception:
pass
# a custom message
if isinstance(self._is_copy, string_types):
t = self._is_copy
elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
if value == 'raise':
raise com.SettingWithCopyError(t)
elif value == 'warn':
warnings.warn(t, com.SettingWithCopyWarning,
stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key, )
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
def _take(self, indices, axis=0, is_copy=True):
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
This is the internal version of ``.take()`` and will contain a wider
selection of parameters useful for internal use but not as suitable
for public usage.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
self._consolidate_inplace()
new_data = self._data.take(indices,
axis=self._get_block_manager_axis(axis),
verify=True)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
.. deprecated:: 0.21.0
In the future, negative indices will always be converted.
is_copy : bool, default True
Whether to return a copy of the original object or not.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
nv.validate_take(tuple(), kwargs)
return self._take(indices, axis=axis, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level,
drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key,
drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self._take(inds, axis=axis)
else:
return self._take(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return com.maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values, index=self.columns,
name=self.index[loc], dtype=new_values.dtype)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view slicable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria.
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : same type as caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{prefix}{}'.format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{}{suffix}'.format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
def reindex(self, *args, **kwargs):
"""
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
tolerance = kwargs.pop('tolerance', None)
fill_value = kwargs.pop('fill_value', None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError('reindex() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(self._get_axis(axis).identical(ax)
for axis, ax in axes.items() if ax is not None):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except Exception:
pass
# perform the reindex on the axes
return self._reindex_axes(axes, level, limit, tolerance, method,
fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
_shared_docs['reindex_axis'] = ("""
Conform input object to new index.
.. deprecated:: 0.21.0
Use `reindex` instead.
By default, places NaN in locations having no value in the
previous index. A new object is produced unless the new index
is equivalent to the current one and copy=False.
Parameters
----------
labels : array-like
New labels / index to conform to. Preferably an Index object to
avoid duplicating data.
axis : %(axes_single_arg)s
Indicate whether to use rows or columns.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
Method to use for filling holes in reindexed DataFrame:
* default: don't fill gaps.
* pad / ffill: propagate last valid observation forward to next
valid.
* backfill / bfill: use next valid observation to fill gap.
* nearest: use nearest valid observations to fill gap.
level : int or str
Broadcast across a level, matching Index values on the
passed MultiIndex level.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, optional
Maximum number of consecutive elements to forward or backward fill.
fill_value : float, default NaN
Value used to fill in locations having no value in the previous
index.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s
Returns a new DataFrame object with new indices, unless the new
index is equivalent to the current one and copy=False.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> df.reindex(['num_wings', 'num_legs', 'num_heads'],
... axis='columns')
num_wings num_legs num_heads
dog 0 4 NaN
hawk 2 2 NaN
""")
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=None):
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
self._consolidate_inplace()
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
method = missing.clean_reindex_fill_method(method)
warnings.warn(msg, FutureWarning, stacklevel=3)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value, copy=copy)
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of axis to restrict to (must not all be present).
like : string
Keep axis where "arg in col == True".
regex : string (regular expression)
Keep axis with re.search(regex, col) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1,2,3], [4,5,6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
import re
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(
**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in to_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Sample with or without replacement.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, string_types):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs['pipe'] = (r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply
DataFrame.applymap
Series.map
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
""")
@Appender(_shared_docs['pipe'] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
_shared_docs['aggregate'] = dedent("""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s
""")
_shared_docs['transform'] = ("""
Call ``func`` on self producing a %(klass)s with transformed values
and that has the same axis length as self.
.. versionadded:: 0.20.0
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
""")
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (name in self._internal_names_set or name in self._metadata or
name in self._accessors):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn("Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
object.__setattr__(self, name, value)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {c for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, string_types) and isidentifier(c)}
return super(NDFrame, self)._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Getting and setting elements
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except Exception:
pass
raise TypeError('Cannot do inplace boolean setting on '
'mixed-types with a non np.nan value')
return True
def _get_numeric_data(self):
return self._constructor(
self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
.. deprecated:: 0.23.0
Use :meth:`DataFrame.values` instead.
Parameters
----------
columns : list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
See Also
--------
DataFrame.values
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a float64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
"""
warnings.warn("Method .as_matrix will be removed in a future version. "
"Use .values instead.", FutureWarning, stacklevel=2)
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED,
items=columns)
@property
def values(self):
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.values
def get_values(self):
"""
Return an ndarray after converting sparse values to dense.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],
... 'c': [1.0, 2.0]})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
>>> df.get_values()
array([[1, True, 1.0], [2, False, 2.0]], dtype=object)
>>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),
... "c": [1.0, 2.0, 3.0]})
>>> df
a c
0 1.0 1.0
1 NaN 2.0
2 NaN 3.0
>>> df.get_values()
array([[ 1., 1.],
[nan, 2.],
[nan, 3.]])
"""
return self.values
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
Returns
-------
dtype : Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_dtype_counts()
float64 1
int64 1
object 1
dtype: int64
"""
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""
Return counts of unique ftypes in this object.
.. deprecated:: 0.23.0
This is useful for SparseDataFrame or for DataFrames containing
sparse arrays.
Returns
-------
dtype : Series
Series with the count of columns with each type and
sparsity (dense/sparse).
See Also
--------
ftypes : Return ftypes (indication of sparse/dense and dtype) in
this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_ftype_counts() # doctest: +SKIP
float64:dense 1
int64:dense 1
object:dense 1
dtype: int64
"""
warnings.warn("get_ftype_counts is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
DataFrame.ftypes : Dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype) in DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type and indication of sparse/dense of each column.
See Also
--------
DataFrame.dtypes: Series with just dtype information.
SparseDataFrame : Container for sparse tabular data.
Notes
-----
Sparse data should have the same dtypes as its dense representation.
Examples
--------
>>> arr = np.random.RandomState(0).randn(100, 4)
>>> arr[arr < .8] = np.nan
>>> pd.DataFrame(arr).ftypes
0 float64:dense
1 float64:dense
2 float64:dense
3 float64:dense
dtype: object
>>> pd.SparseDataFrame(arr).ftypes
0 float64:sparse
1 float64:sparse
2 float64:sparse
3 float64:sparse
dtype: object
"""
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis,
dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> Constructor Types
"""
warnings.warn("as_blocks is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks().
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()}
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError('Only the Series name can be used for '
'the key in Series dtype mappings.')
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
'astype() only accepts a dtype arg of type dict when '
'invoked on Series and DataFrames. A single dtype must be '
'specified when invoked on a Panel.'
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = (self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns)))
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
**kwargs)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(self, datetime=False, numeric=False, timedelta=False,
coerce=False, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(datetime=datetime, numeric=numeric,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(datetime=True, numeric=False,
timedelta=True, coerce=False,
copy=True)).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : %(axes_single_arg)s
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
%(klass)s
Object with missing values filled.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
from pandas import DataFrame
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
# > 3d
if self.ndim > 3:
raise NotImplementedError('Cannot fillna with a method for > '
'3dims')
# 3d
elif self.ndim == 3:
# fill in 2d chunks
result = {col: s.fillna(method=method, value=value)
for col, s in self.iteritems()}
prelim_obj = self._constructor.from_dict(result)
new_obj = prelim_obj.__finalize__(self)
new_data = new_obj._data
else:
# 2d or less
new_data = self._data.interpolate(method=method, axis=axis,
limit=limit, inplace=inplace,
coerce=True,
downcast=downcast)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
'by column')
result = self if inplace else self.copy()
for k, v in compat.iteritems(value):
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
_shared_docs['replace'] = ("""
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
""")
@Appender(_shared_docs['replace'] % _shared_doc_kwargs)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, pd.DataFrame):
return self.apply(_single_replace,
args=(to_replace, method, inplace,
limit))
return _single_replace(self, to_replace, method, inplace,
limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError('If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
'regex must be a mapping')
to_replace = regex
regex = True
items = list(compat.iteritems(to_replace))
keys, values = lzip(*items) or ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError("If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings")
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = lzip(*v.items()) or ([], [])
if set(keys) & set(values):
raise ValueError("Replacement not allowed with "
"overlapping keys and values")
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
limit=limit, regex=regex)
else:
# need a non-zero len on all axes
for a in self._AXIS_ORDERS:
if not len(self._get_axis(a)):
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursivelly
res[c] = res[c].replace(to_replace=src,
value=value[c],
inplace=False,
regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in compat.iteritems(to_replace)
if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError('Replacement lists must match '
'in length. Expecting %d got %d ' %
(len(to_replace), len(value)))
new_data = self._data.replace_list(src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
elif to_replace is None:
if not (is_re_compilable(regex) or
is_list_like(regex) or is_dict_like(regex)):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__))
return self.replace(regex, value, inplace=inplace, limit=limit,
regex=True)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in compat.iteritems(value):
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v, filter=[k],
inplace=inplace,
regex=regex)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
else:
msg = ('Invalid "to_replace" type: '
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs['interpolate'] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
.. versionadded:: 0.18.1
Added support for the 'akima' method.
Added interpolate method 'from_derivatives' which replaces
'piecewise_polynomial' in SciPy 0.18; backwards-compatible with
SciPy < 0.18
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry befofe it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
"on Panel and Panel 4D objects.")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if (isinstance(_maybe_transposed_self.index, MultiIndex) and
method != 'linear'):
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
if _maybe_transposed_self._data.get_dtype_counts().get(
'object') == len(_maybe_transposed_self.T):
raise TypeError("Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype.")
# create/use the index
if method == 'linear':
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
if isna(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
data = _maybe_transposed_self._data
new_data = data.interpolate(method=method, axis=ax, index=index,
values=_maybe_transposed_self, limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace, downcast=downcast,
**kwargs)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError("asof is not implemented "
"for {type}".format(type=type(self)))
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs['isna'] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs['notna'] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if ((lower is not None and np.any(isna(lower))) or
(upper is not None and np.any(isna(upper)))):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all='ignore'):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == 'le':
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = pd.Series(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold,
axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False,
*args, **kwargs):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
if isinstance(self, ABCPanel):
raise NotImplementedError("clip is not supported yet for panels")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(pd.isnull(lower)):
lower = None
if not is_list_like(upper) and np.any(pd.isnull(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if ((lower is None or (is_scalar(lower) and is_number(lower))) and
(upper is None or (is_scalar(upper) and is_number(upper)))):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(lower, method=self.ge,
axis=axis, inplace=inplace)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(upper, method=self.le,
axis=axis, inplace=inplace)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Trim values above a given threshold.
.. deprecated:: 0.24.0
Use clip(upper=threshold) instead.
Elements above the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Maximum value allowed. All values above threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align object with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.clip(upper=3)
0 1
1 2
2 3
3 3
4 3
dtype: int64
>>> elemwise_thresholds = [5, 4, 3, 2, 1]
>>> elemwise_thresholds
[5, 4, 3, 2, 1]
>>> s.clip(upper=elemwise_thresholds)
0 1
1 2
2 3
3 2
4 1
dtype: int64
"""
warnings.warn('clip_upper(threshold) is deprecated, '
'use clip(upper=threshold) instead',
FutureWarning, stacklevel=2)
return self._clip_with_one_bound(threshold, method=self.le,
axis=axis, inplace=inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Trim values below a given threshold.
.. deprecated:: 0.24.0
Use clip(lower=threshold) instead.
Elements below the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Minimum value allowed. All values below threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align `self` with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
Series single threshold clipping:
>>> s = pd.Series([5, 6, 7, 8, 9])
>>> s.clip(lower=8)
0 8
1 8
2 8
3 8
4 9
dtype: int64
Series clipping element-wise using an array of thresholds. `threshold`
should be the same length as the Series.
>>> elemwise_thresholds = [4, 8, 7, 2, 5]
>>> s.clip(lower=elemwise_thresholds)
0 5
1 8
2 7
3 8
4 9
dtype: int64
DataFrames can be compared to a scalar.
>>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> df.clip(lower=3)
A B
0 3 3
1 3 4
2 5 6
Or to an array of values. By default, `threshold` should be the same
shape as the DataFrame.
>>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))
A B
0 3 4
1 3 4
2 6 6
Control how `threshold` is broadcast with `axis`. In this case
`threshold` should be the same length as the axis specified by
`axis`.
>>> df.clip(lower=[3, 3, 5], axis='index')
A B
0 3 3
1 3 4
2 5 6
>>> df.clip(lower=[4, 5], axis='columns')
A B
0 4 5
1 4 5
2 5 6
"""
warnings.warn('clip_lower(threshold) is deprecated, '
'use clip(lower=threshold) instead',
FutureWarning, stacklevel=2)
return self._clip_with_one_bound(threshold, method=self.ge,
axis=axis, inplace=inplace)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, observed=False, **kwargs):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
**kwargs
Optional, only accepts keyword argument 'mutated' and is passed
to groupby.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level=1).mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
from pandas.core.groupby.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze,
observed=observed, **kwargs)
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize,
fill_value=fill_value)
def at_time(self, time, asof=False, axis=None):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
return self._take(indexer, axis=axis)
def between_time(self, start_time, end_time, include_start=True,
include_end=True, axis=None):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
return self._take(indexer, axis=axis)
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : str
The offset string or object representing target conversion.
how : str
Method for down/re-sampling, default to 'mean' for downsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).mean()``, or
``.resample(...).apply(<func>)``
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
fill_method : str, default None
Filling method for upsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).<func>()``,
e.g. ``.resample(...).pad()``
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
limit : int, default None
Maximum size gap when reindexing with `fill_method`.
.. deprecated:: 0.18.0
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
.. versionadded:: 0.19.0
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import (resample,
_maybe_process_deprecations)
axis = self._get_axis_number(axis)
r = resample(self, freq=rule, label=label, closed=closed,
axis=axis, kind=kind, loffset=loffset,
convention=convention,
base=base, key=on, level=level)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.iloc[start:]
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
index to direct ranking
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : boolean, default None
Include only float, int, boolean data. Valid only for DataFrame or
Panel objects
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : same type as caller
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
msg = "rank does not make sense when ndim > 2"
raise NotImplementedError(msg)
if na_option not in {'keep', 'top', 'bottom'}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs['align'] = ("""
Align two objects on their axes with the
specified join method for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : boolean, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
""")
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons({c: self for c in other.columns},
**other._construct_axes_dict())
return df._align_frame(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis)
elif isinstance(other, Series):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons({c: other for c in self.columns},
**self._construct_axes_dict())
return self._align_frame(df, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
elif isinstance(other, Series):
return self._align_series(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(reindexers, copy=copy,
fill_value=fill_value,
allow_dups=True)
# other must be always DataFrame
right = other._reindex_with_indexers({0: [join_index, iridx],
1: [join_columns, cridx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(other.index, how=join,
level=level,
return_indexers=True)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError('Must specify axis=0 or 1')
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit,
axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join='right', broadcast_axis=1)
else:
if not hasattr(cond, 'shape'):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError('Array conditional must be same shape as '
'self')
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, pd.DataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, 'align'):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(other, join='left', axis=axis,
level=level, fill_value=np.nan)
# if we are NOT aligned, raise as we cannot where index
if (axis is None and
not all(other._get_axis(i).equals(ax)
for i, ax in enumerate(self.axes))):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError("cannot align with a higher "
"dimensional NDFrame")
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
except Exception:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError('Length of replacements must equal '
'series length')
else:
raise ValueError('other must be the same shape as self '
'when an ndarray')
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, 'ndim', 0):
align = True
else:
align = (self._get_axis_number(axis) == 1)
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(mask=cond, new=other, align=align,
inplace=True, axis=block_axis,
transpose=self._AXIS_REVERSED)
self._update_inplace(new_data)
else:
new_data = self._data.where(other=other, cond=cond, align=align,
errors=errors,
try_cast=try_cast, axis=block_axis,
transpose=self._AXIS_REVERSED)
return self._constructor(new_data).__finalize__(self)
_shared_docs['where'] = ("""
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as cond.
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as other.
inplace : boolean, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default `raise`
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- `raise` : allow exceptions to be raised.
- `ignore` : suppress exceptions. On error return original object.
try_cast : boolean, default False
Try to cast the result back to the input type (if possible).
raise_on_error : boolean, default True
Whether to raise on invalid data types (e.g. trying to where on
strings).
.. deprecated:: 0.21.0
Use `errors`.
Returns
-------
wh : same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
""")
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True",
cond_rev="False", name='where',
name_other='mask'))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
other = com.apply_if_callable(other, self)
return self._where(cond, other, inplace, axis, level,
errors=errors, try_cast=try_cast)
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False",
cond_rev="True", name='mask',
name_other='where'))
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(~cond, other=other, inplace=inplace, axis=axis,
level=level, try_cast=try_cast,
errors=errors)
_shared_docs['shift'] = ("""
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
""")
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(periods=periods, axis=block_axis,
fill_value=fill_value)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Returns
-------
shifted : NDFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, string_types):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : boolean, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to convert
level : int, str, default None
If axis ia a MultiIndex, convert a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, 'tz_convert'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise', nonexistent='raise'):
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid valuse are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7), index=pd.DatetimeIndex([
... '2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3), index=pd.DatetimeIndex([
... '2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2), index=pd.DatetimeIndex([
... '2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ('raise', 'NaT', 'shift_forward',
'shift_backward')
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta):
raise ValueError("The nonexistent argument must be one of 'raise',"
" 'NaT', 'shift_forward', 'shift_backward' or"
" a timedelta object")
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(
tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(
ax.levels[level], tz, ambiguous, nonexistent
)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim >= 3:
msg = "describe is not implemented on Panel objects."
raise NotImplementedError(msg)
elif self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
series.quantile(percentiles).tolist() + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_any_dtype(data):
tz = data.dt.tz
asint = data.dropna().values.view('i8')
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ['top', 'freq', 'first', 'last']
result += [top, freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz)]
else:
names += ['top', 'freq']
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""
Validate percentiles (used by describe and quantile).
"""
msg = ("percentiles should all be in the interval [0, 1]. "
"Try {0} instead.")
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs['pct_change'] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
rs = rs.reindex_like(data)
if freq is None:
mask = isna(com.values_from_object(data))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany,
_any_see_also, _any_examples, empty_value=False)
cls.all = _make_logical_function(
cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall,
_all_see_also, _all_examples, empty_value=True)
@Substitution(desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name, name2=name2, axis_descr=axis_descr,
min_count='', see_also='', examples='')
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('mad', axis=axis, level=level,
skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls, 'sem', name, name2, axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem)
cls.var = _make_stat_function_ddof(
cls, 'var', name, name2, axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar)
cls.std = _make_stat_function_ddof(
cls, 'std', name, name2, axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd)
@Substitution(desc="Return the compound percentage of the values for "
"the requested axis.", name1=name, name2=name2,
axis_descr=axis_descr,
min_count='', see_also='', examples='')
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls, 'cummin', name, name2, axis_descr, "minimum",
lambda y, axis: np.minimum.accumulate(y, axis), "min",
np.inf, np.nan, _cummin_examples)
cls.cumsum = _make_cum_function(
cls, 'cumsum', name, name2, axis_descr, "sum",
lambda y, axis: y.cumsum(axis), "sum", 0.,
np.nan, _cumsum_examples)
cls.cumprod = _make_cum_function(
cls, 'cumprod', name, name2, axis_descr, "product",
lambda y, axis: y.cumprod(axis), "prod", 1.,
np.nan, _cumprod_examples)
cls.cummax = _make_cum_function(
cls, 'cummax', name, name2, axis_descr, "maximum",
lambda y, axis: np.maximum.accumulate(y, axis), "max",
-np.inf, np.nan, _cummax_examples)
cls.sum = _make_min_count_stat_function(
cls, 'sum', name, name2, axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum, _stat_func_see_also, _sum_examples)
cls.mean = _make_stat_function(
cls, 'mean', name, name2, axis_descr,
'Return the mean of the values for the requested axis.',
nanops.nanmean)
cls.skew = _make_stat_function(
cls, 'skew', name, name2, axis_descr,
'Return unbiased skew over requested axis\nNormalized by N-1.',
nanops.nanskew)
cls.kurt = _make_stat_function(
cls, 'kurt', name, name2, axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls, 'prod', name, name2, axis_descr,
'Return the product of the values for the requested axis.',
nanops.nanprod, examples=_prod_examples)
cls.product = cls.prod
cls.median = _make_stat_function(
cls, 'median', name, name2, axis_descr,
'Return the median of the values for the requested axis.',
nanops.nanmedian)
cls.max = _make_stat_function(
cls, 'max', name, name2, axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax, _stat_func_see_also, _max_examples)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin, _stat_func_see_also, _min_examples)
@classmethod
def _add_series_only_operations(cls):
"""
Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
warnings.warn("Method .ptp is deprecated and will be removed "
"in a future version. Use numpy.ptp instead.",
FutureWarning, stacklevel=4)
return nmax - nmin
cls.ptp = _make_stat_function(
cls, 'ptp', name, name2, axis_descr,
"""Return the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0
Use numpy.ptp instead""",
nanptp)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return rwindow.expanding(self, min_periods=min_periods,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
alpha=alpha, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm
@Appender(_shared_docs['transform'] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce "
"aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs['valid_index'] = """
Return index for %(position)s non-NA/null value.
Returns
--------
scalar : type of index
Notes
--------
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
assert how in ['first', 'last']
if len(self) == 0: # early stop
return None
is_valid = ~self.isna()
if self.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == 'first':
idxpos = is_valid.values[::].argmax()
if how == 'last':
idxpos = len(self) - 1 - is_valid.values[::-1].argmax()
chk_notna = is_valid.iat[idxpos]
idx = self.index[idxpos]
if not chk_notna:
return None
return idx
@Appender(_shared_docs['valid_index'] % {'position': 'first',
'klass': 'NDFrame'})
def first_valid_index(self):
return self._find_valid_index('first')
@Appender(_shared_docs['valid_index'] % {'position': 'last',
'klass': 'NDFrame'})
def last_valid_index(self):
return self._find_valid_index('last')
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS))
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)
%(see_also)s
%(examples)s\
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s\n
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s
"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs['stat_func_example'] = """\
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
"""
_sum_examples = _shared_docs['stat_func_example'].format(
stat_func='sum',
verb='Sum',
default_output=14,
level_output_0=6,
level_output_1=8)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan
"""
_max_examples = _shared_docs['stat_func_example'].format(
stat_func='max',
verb='Max',
default_output=8,
level_output_0=4,
level_output_1=8)
_min_examples = _shared_docs['stat_func_example'].format(
stat_func='min',
verb='Min',
default_output=0,
level_output_0=2,
level_output_1=0)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.
"""
_prod_examples = """\
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan
"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded :: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc,
f, see_also='', examples=''):
@Substitution(desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, min_count=_min_count_stub,
see_also=see_also, examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0,
**kwargs):
if name == 'sum':
nv.validate_sum(tuple(), kwargs)
elif name == 'prod':
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, min_count=min_count)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only, min_count=min_count)
return set_function_name(stat_func, name, cls)
def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f,
see_also='', examples=''):
@Substitution(desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, min_count='', see_also=see_also,
examples=examples)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
if name == 'median':
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, ddof=ddof)
return self._reduce(f, name, axis=axis, numeric_only=numeric_only,
skipna=skipna, ddof=ddof)
return set_function_name(stat_func, name, cls)
def _make_cum_function(cls, name, name1, name2, axis_descr, desc,
accum_func, accum_func_name, mask_a, mask_b, examples):
@Substitution(desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, accum_func_name=accum_func_name,
examples=examples)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = com.values_from_object(self).copy()
if (skipna and
issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d['copy'] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f,
see_also, examples, empty_value):
@Substitution(desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, see_also=see_also, examples=examples,
empty_value=empty_value)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None,
**kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError("Option bool_only is not "
"implemented with option level.")
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool')
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
| 35.060932 | 96 | 0.53092 | [
"BSD-3-Clause"
] | kapilepatel/pandas | pandas/core/generic.py | 388,405 | Python |
# coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1RetrieveBusinessRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
V1RetrieveBusinessRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.260417 | 77 | 0.555474 | [
"Apache-2.0"
] | reduceus/connect-python-sdk | squareconnect/models/v1_retrieve_business_request.py | 2,713 | Python |
# pylint: skip-file
import getpass
import shlex
from subprocess import PIPE # nosec
from django.core.management.base import BaseCommand
from django.utils.autoreload import run_with_reloader
import psutil
def restart_celery():
for proc in psutil.process_iter():
if proc.username() != getpass.getuser(): # skip processes not owned by user
continue
if proc.name() != "celery":
continue
# SIGTERM should only be sent to parent process, never to children processes
# see: https://github.com/celery/celery/issues/2700#issuecomment-259716123
if not proc.children():
continue
celery_proc = proc # found parent celery process
celery_proc.terminate()
break
cmd = "celery worker -A ideabox_backend -l INFO"
psutil.Popen(shlex.split(cmd), stdout=PIPE)
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Starting celery worker with autoreload")
run_with_reloader(restart_celery)
| 31.090909 | 84 | 0.682261 | [
"MIT"
] | www-norma-dev/idea-box-backend | backend/common/management/commands/celery.py | 1,026 | Python |
if True:
if True:
a = ''''Hello'<caret> - said man.\n
Let's go.
And they went out.''' | 23 | 39 | 0.443478 | [
"Apache-2.0"
] | 06needhamt/intellij-community | python/testData/intentions/convertTripleQuotedString.py | 115 | Python |
#!/usr/local/bin/python3
import json
from sys import argv
version = str(argv[1])
with open("ReactorKit", 'r+') as file:
data = json.load(file)
file.seek(0)
data[version] = "https://github.com/kawoou/ReactorKit-Carthage/releases/download/" + version + "/ReactorKit.framework.zip"
json.dump(data, file, indent=4, sort_keys=True)
file.close()
with open("ReactorKit-Static", 'r+') as file:
data = json.load(file)
file.seek(0)
data[version] = "https://github.com/kawoou/ReactorKit-Carthage/releases/download/" + version + "/ReactorKit-Static.framework.zip"
json.dump(data, file, indent=4, sort_keys=True)
file.close()
| 31.380952 | 133 | 0.682853 | [
"MIT"
] | cruisediary/ReactorKit-Carthage | scripts/update.py | 659 | Python |
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.views.generic import View, TemplateView, FormView
def Index(request):
if request.method == 'POST':
if 'indexLogin' in request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('goal_app:goals')
else:
login_form = AuthenticationForm()
signup_form = UserCreationForm()
errorMessage = True
context = {"intro_page": "active", 'login_form': login_form, 'errorMessage': errorMessage, 'signup_form': signup_form,}
elif 'indexSignup' in request.POST:
signup_form = UserCreationForm(request.POST)
if signup_form.is_valid():
signup_form.save()
username = signup_form.cleaned_data.get('username')
raw_password = signup_form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('goal_app:goals')
else:
login_form = AuthenticationForm()
context = {"intro_page": "active", 'login_form': login_form, 'signup_form': signup_form, }
else:
login_form = AuthenticationForm()
signup_form = UserCreationForm()
context = {"intro_page": "active", 'login_form': login_form, 'signup_form': signup_form, }
return render(request, 'index.html', context)
def About(request):
context = {"about_page": "active"}
return render(request, 'about.html', context)
def Signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('goal_app:goals')
else:
context = {'signup_page': 'active'}
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
| 44.75 | 135 | 0.622905 | [
"MIT"
] | Ange1ique/GoalPlanner | GoalPlanner/views.py | 2,506 | Python |
from django.apps import AppConfig
class IgovMainConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'igov_main'
| 21.285714 | 56 | 0.765101 | [
"MIT"
] | morrisedu/igov_africa | igov_main/apps.py | 149 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import atexit
import json
import logging
from pathlib import Path
import platform
import shutil
import tempfile
from typing import Dict, List, Optional, Set, TYPE_CHECKING
from .config import Configuration
from ._dispatcher import Dispatcher
from . import _ffi
from . import hardware
from .net import PingUploadWorker
from .net import DeletionPingUploadWorker
from . import util
# To avoid cyclical imports, but still make mypy type-checking work.
# See https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles
if TYPE_CHECKING:
from .metrics import PingType, RecordedExperimentData
log = logging.getLogger(__name__)
class Glean:
"""
The main Glean API.
Before any data collection can take place, the Glean SDK **must** be
initialized from the application.
>>> Glean.initialize(application_id="my-app", application_version="0.0.0", upload_enabled=True)
"""
# Whether Glean was initialized
_initialized = False # type: bool
# The Configuration that was passed to `initialize`
_configuration = None # type: Configuration
# The directory that Glean stores data in
_data_dir = Path() # type: Path
# Whether Glean "owns" the data directory and should destroy it upon reset.
_destroy_data_dir = False # type: bool
# Keep track of this setting before Glean is initialized
_upload_enabled = True # type: bool
# The ping types, so they can be registered prior to Glean initialization,
# and saved between test runs.
_ping_type_queue = set() # type: Set[PingType]
# The application id to send in the ping.
_application_id = None # type: str
# The version of the application sending Glean data.
_application_version = None # type: str
@classmethod
def initialize(
cls,
application_id: str,
application_version: str,
upload_enabled: bool,
configuration: Optional[Configuration] = None,
data_dir: Optional[Path] = None,
):
"""
Initialize the Glean SDK.
This should only be initialized once by the application, and not by
libraries using the Glean SDK. A message is logged to error and no
changes are made to the state if initialize is called a more than
once.
Args:
application_id (str): The application id to use when sending pings.
application_version (str): The version of the application sending
Glean data.
upload_enabled (bool): Controls whether telemetry is enabled. If
disabled, all persisted metrics, events and queued pings
(except first_run_date) are cleared.
configuration (glean.config.Configuration): (optional) An object with
global settings.
data_dir (pathlib.Path): (optional) The path to the Glean data
directory. If not provided, uses a temporary directory.
"""
if cls.is_initialized():
return
if configuration is None:
configuration = Configuration()
if data_dir is None:
data_dir = Path(tempfile.TemporaryDirectory().name)
cls._destroy_data_dir = True
else:
cls._destroy_data_dir = False
cls._data_dir = data_dir
cls._configuration = configuration
cls._application_id = application_id
cls._application_version = application_version
cls._upload_enabled = upload_enabled
cfg = _ffi.make_config(
cls._data_dir,
application_id,
cls._upload_enabled,
configuration.max_events,
)
cls._initialized = _ffi.lib.glean_initialize(cfg) != 0
# If initialization of Glean fails, we bail out and don't initialize
# further
if not cls._initialized:
return
for ping in cls._ping_type_queue:
cls.register_ping_type(ping)
# Initialize the core metrics
cls._initialize_core_metrics()
# Glean Android sets up the metrics ping scheduler here, but we don't
# have one.
# Deal with any pending events so we can start recording new ones
@Dispatcher.launch_at_front
def submit_pending_events():
if _ffi.lib.glean_on_ready_to_submit_pings():
PingUploadWorker.process()
Dispatcher.flush_queued_initial_tasks()
# Glean Android sets up the lifecycle observer here. We don't really
# have a lifecycle.
if cls._upload_enabled is False:
@Dispatcher.launch
def check_pending_deletion_request():
DeletionPingUploadWorker.process()
@classmethod
def reset(cls):
"""
Resets the Glean singleton.
"""
# TODO: 1594184 Send the metrics ping
Dispatcher.reset()
if cls._initialized:
_ffi.lib.glean_destroy_glean()
cls._initialized = False
if cls._destroy_data_dir and cls._data_dir.exists():
shutil.rmtree(str(cls._data_dir))
@classmethod
def is_initialized(cls) -> bool:
"""
Returns True if the Glean SDK has been initialized.
"""
return cls._initialized
@classmethod
def register_ping_type(cls, ping: "PingType"):
"""
Register the ping type in the registry.
"""
if cls.is_initialized():
_ffi.lib.glean_register_ping_type(ping._handle)
# We need to keep track of pings, so they get re-registered after a
# reset. This state is kept across Glean resets, which should only ever
# happen in test mode. It's a set and keeping them around forever
# should not have much of an impact.
cls._ping_type_queue.add(ping)
@classmethod
def test_has_ping_type(cls, ping_name: str):
"""
Returns True if a ping by this name is in the ping registry.
"""
return bool(
_ffi.lib.glean_test_has_ping_type(_ffi.ffi_encode_string(ping_name))
)
@classmethod
def set_upload_enabled(cls, enabled: bool):
"""
Enable or disable Glean collection and upload.
Metric collection is enabled by default.
When uploading is disabled, metrics aren't recorded at all and no data
is uploaded.
When disabling, all pending metrics, events and queued pings are cleared.
When enabling, the core Glean metrics are recreated.
Args:
enabled (bool): When True, enable metric collection.
"""
if cls.is_initialized():
original_enabled = cls.get_upload_enabled()
@Dispatcher.launch
def set_upload_enabled():
_ffi.lib.glean_set_upload_enabled(enabled)
if original_enabled is False and cls.get_upload_enabled() is True:
cls._initialize_core_metrics()
if original_enabled is True and cls.get_upload_enabled() is False:
# If uploading is disabled, we need to send the deletion-request ping
DeletionPingUploadWorker.process()
else:
cls._upload_enabled = enabled
@classmethod
def get_upload_enabled(cls) -> bool:
"""
Get whether or not Glean is allowed to record and upload data.
"""
if cls.is_initialized():
return bool(_ffi.lib.glean_is_upload_enabled())
else:
return cls._upload_enabled
@classmethod
def set_experiment_active(
cls, experiment_id: str, branch: str, extra: Optional[Dict[str, str]] = None
):
"""
Indicate that an experiment is running. Glean will then add an
experiment annotation to the environment which is sent with pings. This
information is not persisted between runs.
Args:
experiment_id (str): The id of the active experiment (maximum 100
bytes)
branch (str): The experiment branch (maximum 100 bytes)
extra (dict of str -> str): Optional metadata to output with the
ping
"""
if extra is None:
keys = [] # type: List[str]
values = [] # type: List[str]
else:
keys, values = zip(*extra.items()) # type: ignore
@Dispatcher.launch
def set_experiment_active():
_ffi.lib.glean_set_experiment_active(
_ffi.ffi_encode_string(experiment_id),
_ffi.ffi_encode_string(branch),
_ffi.ffi_encode_vec_string(keys),
_ffi.ffi_encode_vec_string(values),
len(keys),
)
@classmethod
def set_experiment_inactive(cls, experiment_id: str):
"""
Indicate that the experiment is no longer running.
Args:
experiment_id (str): The id of the experiment to deactivate.
"""
@Dispatcher.launch
def set_experiment_inactive():
_ffi.lib.glean_set_experiment_inactive(
_ffi.ffi_encode_string(experiment_id)
)
@classmethod
def test_is_experiment_active(cls, experiment_id: str) -> bool:
"""
Tests whether an experiment is active, for testing purposes only.
Args:
experiment_id (str): The id of the experiment to look for.
Returns:
is_active (bool): If the experiement is active and reported in
pings.
"""
return bool(
_ffi.lib.glean_experiment_test_is_active(
_ffi.ffi_encode_string(experiment_id)
)
)
@classmethod
def test_get_experiment_data(cls, experiment_id: str) -> "RecordedExperimentData":
"""
Returns the stored data for the requested active experiment, for testing purposes only.
Args:
experiment_id (str): The id of the experiment to look for.
Returns:
experiment_data (RecordedExperimentData): The data associated with
the experiment.
"""
from .metrics import RecordedExperimentData
json_string = _ffi.ffi_decode_string(
_ffi.lib.glean_experiment_test_get_data(
_ffi.ffi_encode_string(experiment_id)
)
)
json_tree = json.loads(json_string)
return RecordedExperimentData(**json_tree) # type: ignore
@classmethod
def _initialize_core_metrics(cls):
"""
Set a few metrics that will be sent as part of every ping.
"""
from ._builtins import metrics
metrics.glean.baseline.locale.set(util.get_locale_tag())
metrics.glean.internal.metrics.os.set(platform.system())
metrics.glean.internal.metrics.os_version.set(platform.release())
metrics.glean.internal.metrics.architecture.set(platform.machine())
metrics.glean.internal.metrics.locale.set(util.get_locale_tag())
sysinfo = hardware.get_system_information()
metrics.glean.internal.metrics.device_manufacturer.set(sysinfo.manufacturer)
metrics.glean.internal.metrics.device_model.set(sysinfo.model)
if cls._configuration.channel is not None:
metrics.glean.internal.metrics.app_channel.set(cls._configuration.channel)
metrics.glean.internal.metrics.app_build.set(cls._application_id)
if cls._application_version is not None:
metrics.glean.internal.metrics.app_display_version.set(
cls._application_version
)
@classmethod
def get_data_dir(cls) -> Path:
"""
Get the data directory for Glean.
"""
return cls._data_dir
@classmethod
def test_collect(cls, ping: "PingType") -> str:
"""
Collect a ping and return as a string.
"""
return _ffi.ffi_decode_string(_ffi.lib.glean_ping_collect(ping._handle))
@classmethod
def _submit_pings(cls, pings: List["PingType"]):
"""
Collect and submit a list of pings for eventual uploading.
If the ping currently contains no content, it will not be assembled and
queued for sending.
Args:
pings (list of PingType): List of pings to submit.
"""
ping_names = [ping.name for ping in pings]
cls._submit_pings_by_name(ping_names)
@classmethod
@Dispatcher.task
def _submit_pings_by_name(cls, ping_names: List[str]):
"""
Collect and submit a list of pings for eventual uploading by name.
Each ping will be looked up in the known instances of
`glean.metrics.PingType`. If the ping isn't known, an error is logged
and the ping isn't queued for uploading.
If the ping currently contains no content, it will not be assembled and
queued for sending.
Args:
ping_names (list of str): List of ping names to submit.
"""
if not cls.is_initialized():
log.error("Glean must be initialized before submitting pings.")
return
if not cls.get_upload_enabled():
log.error("Glean must be enabled before submitting pings.")
return
sent_ping = _ffi.lib.glean_submit_pings_by_name(
_ffi.ffi_encode_vec_string(ping_names), len(ping_names)
)
if sent_ping:
PingUploadWorker.process()
__all__ = ["Glean"]
atexit.register(Glean.reset)
| 32.411765 | 99 | 0.633176 | [
"MPL-2.0"
] | tdsmith/glean | glean-core/python/glean/glean.py | 13,775 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
try:
from pkg_resources import VersionConflict, require
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
def get_extra_requires(add_all=True, add_all_dev=True):
from distutils.dist import Distribution
dist = Distribution()
dist.parse_config_files()
dist.parse_command_line()
extras = {}
extra_deps = dist.get_option_dict("options.extras_require")
for extra_name, data in extra_deps.items():
_, dep_string = data
deps = []
d = dep_string.split("\n")
for line in d:
if not line:
continue
deps.append(line)
extras[extra_name] = deps
if add_all:
all = set()
for e_n, deps in extras.items():
if not e_n.startswith("dev_"):
all.update(deps)
extras["all"] = all
# add tag `all` at the end
if add_all_dev:
extras["all_dev"] = set(vv for v in extras.values() for vv in v)
extras["dev_all"] = extras["all_dev"]
return extras
if __name__ in ["__main__", "builtins", "__builtin__"]:
setup(
use_scm_version={
"write_to": "src/kiara_modules/language_processing/version.txt"
},
extras_require=get_extra_requires(),
)
| 23.8 | 75 | 0.606443 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | DHARPA-Project/kiara_modules.language_processing | setup.py | 1,428 | Python |
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
import aiomysql
async def index(request):
return web.Response(text='Awsome')
app = web.Application()
app.add_routes([web.get('/', index)])
logging.info('server started at http://127.0.0.1:9000 ...')
web.run_app(app, host='127.0.0.1', port=9000)
| 22 | 59 | 0.729798 | [
"Apache-2.0"
] | XuanYang-cn/Myblog | awesome-python3-webapp/www/app.py | 396 | Python |
#!/usr/bin/env python3
"""
Description:
Usage:
$> roslaunch turtle_nodes.launch
$> ./executive_step_04.py
Output:
[INFO] : State machine starting in initial state 'RESET' with userdata:
[]
[INFO] : State machine transitioning 'RESET':'succeeded'-->'SPAWN'
[INFO] : State machine transitioning 'SPAWN':'succeeded'-->'TELEPORT1'
[INFO] : State machine transitioning 'TELEPORT1':'succeeded'-->'TELEPORT2'
[INFO] : State machine transitioning 'TELEPORT2':'succeeded'-->'BIG'
[WARN] : Still waiting for action server 'turtle_shape1' to start... is it running?
[INFO] : Connected to action server 'turtle_shape1'.
[INFO] 1279655938.783058: State machine transitioning 'BIG':'succeeded'-->'SMALL'
[INFO] 1279655975.025202: State machine terminating 'SMALL':'succeeded':'succeeded'
"""
import rospy
import threading
import smach
from smach import StateMachine, ServiceState, SimpleActionState, IntrospectionServer
import std_srvs.srv
import turtlesim.srv
import turtle_actionlib.msg
def main():
rospy.init_node('smach_usecase_step_04')
# Construct static goals
polygon_big = turtle_actionlib.msg.ShapeGoal(edges = 11, radius = 4.0)
polygon_small = turtle_actionlib.msg.ShapeGoal(edges = 6, radius = 0.5)
# Create a SMACH state machine
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Open the container
with sm0:
# Reset turtlesim
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
# Create a second turtle
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')),
{'succeeded':'TELEPORT1'})
# Teleport turtle 1
StateMachine.add('TELEPORT1',
ServiceState('turtle1/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(5.0,1.0,0.0)),
{'succeeded':'TELEPORT2'})
# Teleport turtle 2
StateMachine.add('TELEPORT2',
ServiceState('turtle2/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(9.0,5.0,0.0)),
{'succeeded':'BIG'})
# Draw a large polygon with the first turtle
StateMachine.add('BIG',
SimpleActionState('turtle_shape1',turtle_actionlib.msg.ShapeAction,
goal = polygon_big),
{'succeeded':'SMALL'})
# Draw a small polygon with the second turtle
StateMachine.add('SMALL',
SimpleActionState('turtle_shape2',turtle_actionlib.msg.ShapeAction,
goal = polygon_small))
# Attach a SMACH introspection server
sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE')
sis.start()
# Set preempt handler
smach.set_preempt_handler(sm0)
# Execute SMACH tree in a separate thread so that we can ctrl-c the script
smach_thread = threading.Thread(target = sm0.execute)
smach_thread.start()
# Signal handler
rospy.spin()
if __name__ == '__main__':
main()
| 33.969072 | 89 | 0.646434 | [
"MIT"
] | OxRAMSociety/RobotArm | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_04.py | 3,295 | Python |
from .. import abc
from .. import util
importlib = util.import_importlib('importlib')
importlib_abc = util.import_importlib('importlib.abc')
machinery = util.import_importlib('importlib.machinery')
importlib_util = util.import_importlib('importlib.util')
import errno
import marshal
import os
import py_compile
import shutil
import stat
import sys
import types
import unittest
import warnings
from test.support import make_legacy_pyc, unload
from test.test_py_compile import without_source_date_epoch
from test.test_py_compile import SourceDateEpochTestMeta
class SimpleTest(abc.LoaderTests):
"""Should have no issue importing a source module [basic]. And if there is
a syntax error, it should raise a SyntaxError [syntax error].
"""
def setUp(self):
self.name = 'spam'
self.filepath = os.path.join('ham', self.name + '.py')
self.loader = self.machinery.SourceFileLoader(self.name, self.filepath)
def test_load_module_API(self):
class Tester(self.abc.FileLoader):
def get_source(self, _): return 'attr = 42'
def is_package(self, _): return False
loader = Tester('blah', 'blah.py')
self.addCleanup(unload, 'blah')
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module() # Should not raise an exception.
def test_get_filename_API(self):
# If fullname is not set then assume self.path is desired.
class Tester(self.abc.FileLoader):
def get_code(self, _): pass
def get_source(self, _): pass
def is_package(self, _): pass
def module_repr(self, _): pass
path = 'some_path'
name = 'some_name'
loader = Tester(name, path)
self.assertEqual(path, loader.get_filename(name))
self.assertEqual(path, loader.get_filename())
self.assertEqual(path, loader.get_filename(None))
with self.assertRaises(ImportError):
loader.get_filename(name + 'XXX')
def test_equality(self):
other = self.machinery.SourceFileLoader(self.name, self.filepath)
self.assertEqual(self.loader, other)
def test_inequality(self):
other = self.machinery.SourceFileLoader('_' + self.name, self.filepath)
self.assertNotEqual(self.loader, other)
# [basic]
def test_module(self):
with util.create_modules('_temp') as mapping:
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
self.assertIn('_temp', sys.modules)
check = {'__name__': '_temp', '__file__': mapping['_temp'],
'__package__': ''}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_package(self):
with util.create_modules('_pkg.__init__') as mapping:
loader = self.machinery.SourceFileLoader('_pkg',
mapping['_pkg.__init__'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_pkg')
self.assertIn('_pkg', sys.modules)
check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'],
'__path__': [os.path.dirname(mapping['_pkg.__init__'])],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def test_lacking_parent(self):
with util.create_modules('_pkg.__init__', '_pkg.mod')as mapping:
loader = self.machinery.SourceFileLoader('_pkg.mod',
mapping['_pkg.mod'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_pkg.mod')
self.assertIn('_pkg.mod', sys.modules)
check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'],
'__package__': '_pkg'}
for attr, value in check.items():
self.assertEqual(getattr(module, attr), value)
def fake_mtime(self, fxn):
"""Fake mtime to always be higher than expected."""
return lambda name: fxn(name) + 1
def test_module_reuse(self):
with util.create_modules('_temp') as mapping:
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
module_id = id(module)
module_dict_id = id(module.__dict__)
with open(mapping['_temp'], 'w') as file:
file.write("testing_var = 42\n")
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module('_temp')
self.assertIn('testing_var', module.__dict__,
"'testing_var' not in "
"{0}".format(list(module.__dict__.keys())))
self.assertEqual(module, sys.modules['_temp'])
self.assertEqual(id(module), module_id)
self.assertEqual(id(module.__dict__), module_dict_id)
def test_state_after_failure(self):
# A failed reload should leave the original module intact.
attributes = ('__file__', '__path__', '__package__')
value = '<test>'
name = '_temp'
with util.create_modules(name) as mapping:
orig_module = types.ModuleType(name)
for attr in attributes:
setattr(orig_module, attr, value)
with open(mapping[name], 'w') as file:
file.write('+++ bad syntax +++')
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
loader.exec_module(orig_module)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
with self.assertRaises(SyntaxError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module(name)
for attr in attributes:
self.assertEqual(getattr(orig_module, attr), value)
# [syntax error]
def test_bad_syntax(self):
with util.create_modules('_temp') as mapping:
with open(mapping['_temp'], 'w') as file:
file.write('=')
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
with self.assertRaises(SyntaxError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module('_temp')
self.assertNotIn('_temp', sys.modules)
def test_file_from_empty_string_dir(self):
# Loading a module found from an empty string entry on sys.path should
# not only work, but keep all attributes relative.
file_path = '_temp.py'
with open(file_path, 'w') as file:
file.write("# test file for importlib")
try:
with util.uncache('_temp'):
loader = self.machinery.SourceFileLoader('_temp', file_path)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
mod = loader.load_module('_temp')
self.assertEqual(file_path, mod.__file__)
self.assertEqual(self.util.cache_from_source(file_path),
mod.__cached__)
finally:
os.unlink(file_path)
pycache = os.path.dirname(self.util.cache_from_source(file_path))
if os.path.exists(pycache):
shutil.rmtree(pycache)
@unittest.skip("TODO: RUSTPYTHON")
@util.writes_bytecode_files
def test_timestamp_overflow(self):
# When a modification timestamp is larger than 2**32, it should be
# truncated rather than raise an OverflowError.
with util.create_modules('_temp') as mapping:
source = mapping['_temp']
compiled = self.util.cache_from_source(source)
with open(source, 'w') as f:
f.write("x = 5")
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
loader = self.machinery.SourceFileLoader('_temp', mapping['_temp'])
# PEP 451
module = types.ModuleType('_temp')
module.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(module)
self.assertEqual(module.x, 5)
self.assertTrue(os.path.exists(compiled))
os.unlink(compiled)
# PEP 302
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
mod = loader.load_module('_temp')
# Sanity checks.
self.assertEqual(mod.__cached__, compiled)
self.assertEqual(mod.x, 5)
# The pyc file was created.
self.assertTrue(os.path.exists(compiled))
def test_unloadable(self):
loader = self.machinery.SourceFileLoader('good name', {})
module = types.ModuleType('bad name')
module.__spec__ = self.machinery.ModuleSpec('bad name', loader)
with self.assertRaises(ImportError):
loader.exec_module(module)
with self.assertRaises(ImportError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
loader.load_module('bad name')
@unittest.skip("TODO: RUSTPYTHON")
@util.writes_bytecode_files
def test_checked_hash_based_pyc(self):
with util.create_modules('_temp') as mapping:
source = mapping['_temp']
pyc = self.util.cache_from_source(source)
with open(source, 'wb') as fp:
fp.write(b'state = "old"')
os.utime(source, (50, 50))
py_compile.compile(
source,
invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
)
loader = self.machinery.SourceFileLoader('_temp', source)
mod = types.ModuleType('_temp')
mod.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
# Write a new source with the same mtime and size as before.
with open(source, 'wb') as fp:
fp.write(b'state = "new"')
os.utime(source, (50, 50))
loader.exec_module(mod)
self.assertEqual(mod.state, 'new')
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11)
self.assertEqual(
self.util.source_hash(b'state = "new"'),
data[8:16],
)
@unittest.skip("TODO: RUSTPYTHON")
@util.writes_bytecode_files
def test_overridden_checked_hash_based_pyc(self):
with util.create_modules('_temp') as mapping, \
unittest.mock.patch('_imp.check_hash_based_pycs', 'never'):
source = mapping['_temp']
pyc = self.util.cache_from_source(source)
with open(source, 'wb') as fp:
fp.write(b'state = "old"')
os.utime(source, (50, 50))
py_compile.compile(
source,
invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
)
loader = self.machinery.SourceFileLoader('_temp', source)
mod = types.ModuleType('_temp')
mod.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
# Write a new source with the same mtime and size as before.
with open(source, 'wb') as fp:
fp.write(b'state = "new"')
os.utime(source, (50, 50))
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
@unittest.skip("TODO: RUSTPYTHON")
@util.writes_bytecode_files
def test_unchecked_hash_based_pyc(self):
with util.create_modules('_temp') as mapping:
source = mapping['_temp']
pyc = self.util.cache_from_source(source)
with open(source, 'wb') as fp:
fp.write(b'state = "old"')
os.utime(source, (50, 50))
py_compile.compile(
source,
invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH,
)
loader = self.machinery.SourceFileLoader('_temp', source)
mod = types.ModuleType('_temp')
mod.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
# Update the source file, which should be ignored.
with open(source, 'wb') as fp:
fp.write(b'state = "new"')
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b1)
self.assertEqual(
self.util.source_hash(b'state = "old"'),
data[8:16],
)
@unittest.skip("TODO: RUSTPYTHON")
@util.writes_bytecode_files
def test_overiden_unchecked_hash_based_pyc(self):
with util.create_modules('_temp') as mapping, \
unittest.mock.patch('_imp.check_hash_based_pycs', 'always'):
source = mapping['_temp']
pyc = self.util.cache_from_source(source)
with open(source, 'wb') as fp:
fp.write(b'state = "old"')
os.utime(source, (50, 50))
py_compile.compile(
source,
invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH,
)
loader = self.machinery.SourceFileLoader('_temp', source)
mod = types.ModuleType('_temp')
mod.__spec__ = self.util.spec_from_loader('_temp', loader)
loader.exec_module(mod)
self.assertEqual(mod.state, 'old')
# Update the source file, which should be ignored.
with open(source, 'wb') as fp:
fp.write(b'state = "new"')
loader.exec_module(mod)
self.assertEqual(mod.state, 'new')
with open(pyc, 'rb') as fp:
data = fp.read()
self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b1)
self.assertEqual(
self.util.source_hash(b'state = "new"'),
data[8:16],
)
(Frozen_SimpleTest,
Source_SimpleTest
) = util.test_both(SimpleTest, importlib=importlib, machinery=machinery,
abc=importlib_abc, util=importlib_util)
class SourceDateEpochTestMeta(SourceDateEpochTestMeta,
type(Source_SimpleTest)):
pass
class SourceDateEpoch_SimpleTest(Source_SimpleTest,
metaclass=SourceDateEpochTestMeta,
source_date_epoch=True):
pass
class BadBytecodeTest:
def import_(self, file, module_name):
raise NotImplementedError
def manipulate_bytecode(self,
name, mapping, manipulator, *,
del_source=False,
invalidation_mode=py_compile.PycInvalidationMode.TIMESTAMP):
"""Manipulate the bytecode of a module by passing it into a callable
that returns what to use as the new bytecode."""
try:
del sys.modules['_temp']
except KeyError:
pass
py_compile.compile(mapping[name], invalidation_mode=invalidation_mode)
if not del_source:
bytecode_path = self.util.cache_from_source(mapping[name])
else:
os.unlink(mapping[name])
bytecode_path = make_legacy_pyc(mapping[name])
if manipulator:
with open(bytecode_path, 'rb') as file:
bc = file.read()
new_bc = manipulator(bc)
with open(bytecode_path, 'wb') as file:
if new_bc is not None:
file.write(new_bc)
return bytecode_path
def _test_empty_file(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'',
del_source=del_source)
test('_temp', mapping, bc_path)
@util.writes_bytecode_files
def _test_partial_magic(self, test, *, del_source=False):
# When their are less than 4 bytes to a .pyc, regenerate it if
# possible, else raise ImportError.
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:3],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_magic_only(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:4],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_flags(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:7],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_hash(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode(
'_temp',
mapping,
lambda bc: bc[:13],
del_source=del_source,
invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH,
)
test('_temp', mapping, bc_path)
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode(
'_temp',
mapping,
lambda bc: bc[:13],
del_source=del_source,
invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH,
)
test('_temp', mapping, bc_path)
def _test_partial_timestamp(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:11],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_partial_size(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:15],
del_source=del_source)
test('_temp', mapping, bc_path)
def _test_no_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:16],
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bc_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_non_code_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:16] + marshal.dumps(b'abcd'),
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(ImportError) as cm:
self.import_(file_path, '_temp')
self.assertEqual(cm.exception.name, '_temp')
self.assertEqual(cm.exception.path, bytecode_path)
def _test_bad_marshal(self, *, del_source=False):
with util.create_modules('_temp') as mapping:
bytecode_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: bc[:16] + b'<test>',
del_source=del_source)
file_path = mapping['_temp'] if not del_source else bytecode_path
with self.assertRaises(EOFError):
self.import_(file_path, '_temp')
def _test_bad_magic(self, test, *, del_source=False):
with util.create_modules('_temp') as mapping:
bc_path = self.manipulate_bytecode('_temp', mapping,
lambda bc: b'\x00\x00\x00\x00' + bc[4:])
test('_temp', mapping, bc_path)
class BadBytecodeTestPEP451(BadBytecodeTest):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
module = types.ModuleType(module_name)
module.__spec__ = self.util.spec_from_loader(module_name, loader)
loader.exec_module(module)
class BadBytecodeTestPEP302(BadBytecodeTest):
def import_(self, file, module_name):
loader = self.loader(module_name, file)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = loader.load_module(module_name)
self.assertIn(module_name, sys.modules)
class SourceLoaderBadBytecodeTest:
@classmethod
def setUpClass(cls):
cls.loader = cls.machinery.SourceFileLoader
@util.writes_bytecode_files
def test_empty_file(self):
# When a .pyc is empty, regenerate it if possible, else raise
# ImportError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_empty_file(test)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_partial_magic(test)
@util.writes_bytecode_files
def test_magic_only(self):
# When there is only the magic number, regenerate the .pyc if possible,
# else raise EOFError.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_magic_only(test)
@util.writes_bytecode_files
def test_bad_magic(self):
# When the magic number is different, the bytecode should be
# regenerated.
def test(name, mapping, bytecode_path):
self.import_(mapping[name], name)
with open(bytecode_path, 'rb') as bytecode_file:
self.assertEqual(bytecode_file.read(4),
self.util.MAGIC_NUMBER)
self._test_bad_magic(test)
@util.writes_bytecode_files
def test_partial_timestamp(self):
# When the timestamp is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_partial_timestamp(test)
@util.writes_bytecode_files
def test_partial_flags(self):
# When the flags is partial, regenerate the .pyc, else raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_partial_flags(test)
@util.writes_bytecode_files
def test_partial_hash(self):
# When the hash is partial, regenerate the .pyc, else raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_partial_hash(test)
@util.writes_bytecode_files
def test_partial_size(self):
# When the size is partial, regenerate the .pyc, else
# raise EOFError.
def test(name, mapping, bc_path):
self.import_(mapping[name], name)
with open(bc_path, 'rb') as file:
self.assertGreater(len(file.read()), 16)
self._test_partial_size(test)
@util.writes_bytecode_files
def test_no_marshal(self):
# When there is only the magic number and timestamp, raise EOFError.
self._test_no_marshal()
@util.writes_bytecode_files
def test_non_code_marshal(self):
self._test_non_code_marshal()
# XXX ImportError when sourceless
# [bad marshal]
@util.writes_bytecode_files
def test_bad_marshal(self):
# Bad marshal data should raise a ValueError.
self._test_bad_marshal()
# [bad timestamp]
@util.writes_bytecode_files
@without_source_date_epoch
def test_old_timestamp(self):
# When the timestamp is older than the source, bytecode should be
# regenerated.
zeros = b'\x00\x00\x00\x00'
with util.create_modules('_temp') as mapping:
py_compile.compile(mapping['_temp'])
bytecode_path = self.util.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(8)
bytecode_file.write(zeros)
self.import_(mapping['_temp'], '_temp')
source_mtime = os.path.getmtime(mapping['_temp'])
source_timestamp = self.importlib._pack_uint32(source_mtime)
with open(bytecode_path, 'rb') as bytecode_file:
bytecode_file.seek(8)
self.assertEqual(bytecode_file.read(4), source_timestamp)
# [bytecode read-only]
@util.writes_bytecode_files
def test_read_only_bytecode(self):
# When bytecode is read-only but should be rewritten, fail silently.
with util.create_modules('_temp') as mapping:
# Create bytecode that will need to be re-created.
py_compile.compile(mapping['_temp'])
bytecode_path = self.util.cache_from_source(mapping['_temp'])
with open(bytecode_path, 'r+b') as bytecode_file:
bytecode_file.seek(0)
bytecode_file.write(b'\x00\x00\x00\x00')
# Make the bytecode read-only.
os.chmod(bytecode_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
try:
# Should not raise OSError!
self.import_(mapping['_temp'], '_temp')
finally:
# Make writable for eventual clean-up.
os.chmod(bytecode_path, stat.S_IWUSR)
# TODO: RustPython
# class SourceLoaderBadBytecodeTestPEP451(
# SourceLoaderBadBytecodeTest, BadBytecodeTestPEP451):
# pass
# (Frozen_SourceBadBytecodePEP451,
# Source_SourceBadBytecodePEP451
# ) = util.test_both(SourceLoaderBadBytecodeTestPEP451, importlib=importlib,
# machinery=machinery, abc=importlib_abc,
# util=importlib_util)
# class SourceLoaderBadBytecodeTestPEP302(
# SourceLoaderBadBytecodeTest, BadBytecodeTestPEP302):
# pass
# (Frozen_SourceBadBytecodePEP302,
# Source_SourceBadBytecodePEP302
# ) = util.test_both(SourceLoaderBadBytecodeTestPEP302, importlib=importlib,
# machinery=machinery, abc=importlib_abc,
# util=importlib_util)
class SourcelessLoaderBadBytecodeTest:
@classmethod
def setUpClass(cls):
cls.loader = cls.machinery.SourcelessFileLoader
def test_empty_file(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_empty_file(test, del_source=True)
def test_partial_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_partial_magic(test, del_source=True)
def test_magic_only(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_magic_only(test, del_source=True)
def test_bad_magic(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(ImportError) as cm:
self.import_(bytecode_path, name)
self.assertEqual(cm.exception.name, name)
self.assertEqual(cm.exception.path, bytecode_path)
self._test_bad_magic(test, del_source=True)
def test_partial_timestamp(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_timestamp(test, del_source=True)
def test_partial_flags(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_flags(test, del_source=True)
def test_partial_hash(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_hash(test, del_source=True)
def test_partial_size(self):
def test(name, mapping, bytecode_path):
with self.assertRaises(EOFError):
self.import_(bytecode_path, name)
self._test_partial_size(test, del_source=True)
def test_no_marshal(self):
self._test_no_marshal(del_source=True)
def test_non_code_marshal(self):
self._test_non_code_marshal(del_source=True)
# TODO: RustPython
# class SourcelessLoaderBadBytecodeTestPEP451(SourcelessLoaderBadBytecodeTest,
# BadBytecodeTestPEP451):
# pass
# (Frozen_SourcelessBadBytecodePEP451,
# Source_SourcelessBadBytecodePEP451
# ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP451, importlib=importlib,
# machinery=machinery, abc=importlib_abc,
# util=importlib_util)
# class SourcelessLoaderBadBytecodeTestPEP302(SourcelessLoaderBadBytecodeTest,
# BadBytecodeTestPEP302):
# pass
# (Frozen_SourcelessBadBytecodePEP302,
# Source_SourcelessBadBytecodePEP302
# ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP302, importlib=importlib,
# machinery=machinery, abc=importlib_abc,
# util=importlib_util)
if __name__ == '__main__':
unittest.main()
| 40.679503 | 93 | 0.595658 | [
"MIT"
] | Ma233/RustPython | Lib/test/test_importlib/source/test_file_loader.py | 32,747 | Python |
import logging
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic import RedirectView
from mayan.apps.acls.models import AccessControlList
from mayan.apps.documents.models import Document, DocumentType
from mayan.apps.documents.permissions import permission_document_type_edit
from mayan.apps.views.generics import (
AddRemoveView, SingleObjectCreateView, SingleObjectDeleteView,
SingleObjectEditView, SingleObjectListView
)
from mayan.apps.views.mixins import ExternalObjectViewMixin
from .events import event_web_link_edited
from .forms import WebLinkForm
from .icons import icon_web_link_setup
from .links import link_web_link_create
from .models import ResolvedWebLink, WebLink
from .permissions import (
permission_web_link_create, permission_web_link_delete,
permission_web_link_edit, permission_web_link_instance_view,
permission_web_link_view
)
logger = logging.getLogger(name=__name__)
class DocumentTypeWebLinksView(AddRemoveView):
main_object_permission = permission_document_type_edit
main_object_model = DocumentType
main_object_pk_url_kwarg = 'document_type_id'
secondary_object_model = WebLink
secondary_object_permission = permission_web_link_edit
list_available_title = _('Available web links')
list_added_title = _('Web links enabled')
related_field = 'web_links'
def action_add(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.add(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def action_remove(self, queryset, _event_actor):
for obj in queryset:
self.main_object.web_links.remove(obj)
event_web_link_edited.commit(
actor=_event_actor, action_object=self.main_object, target=obj
)
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Web links to enable for document type: %s'
) % self.main_object,
}
class ResolvedWebLinkView(ExternalObjectViewMixin, RedirectView):
external_object_pk_url_kwarg = 'document_id'
external_object_permission = permission_web_link_instance_view
external_object_queryset = Document.valid.all()
def get_redirect_url(self, *args, **kwargs):
return self.get_web_link().get_redirect(
document=self.external_object, user=self.request.user
).url
def get_web_link(self):
return get_object_or_404(
klass=self.get_web_link_queryset(), pk=self.kwargs['web_link_id']
)
def get_web_link_queryset(self):
queryset = ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
return AccessControlList.objects.restrict_queryset(
permission=permission_web_link_instance_view, queryset=queryset,
user=self.request.user
)
class WebLinkCreateView(SingleObjectCreateView):
extra_context = {'title': _('Create new web link')}
form_class = WebLinkForm
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
view_permission = permission_web_link_create
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkDeleteView(SingleObjectDeleteView):
model = WebLink
object_permission = permission_web_link_delete
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Delete web link: %s') % self.object
}
class WebLinkDocumentTypesView(AddRemoveView):
main_object_method_add_name = 'document_types_add'
main_object_method_remove_name = 'document_types_remove'
main_object_permission = permission_web_link_edit
main_object_model = WebLink
main_object_pk_url_kwarg = 'web_link_id'
secondary_object_model = DocumentType
secondary_object_permission = permission_document_type_edit
list_available_title = _('Available document types')
list_added_title = _('Document types enabled')
related_field = 'document_types'
def get_actions_extra_kwargs(self):
return {'_event_actor': self.request.user}
def get_extra_context(self):
return {
'object': self.main_object,
'title': _(
'Document type for which to enable web link: %s'
) % self.main_object,
}
class WebLinkEditView(SingleObjectEditView):
form_class = WebLinkForm
model = WebLink
object_permission = permission_web_link_edit
pk_url_kwarg = 'web_link_id'
post_action_redirect = reverse_lazy(
viewname='web_links:web_link_list'
)
def get_extra_context(self):
return {
'object': self.object,
'title': _('Edit web link: %s') % self.object
}
def get_instance_extra_data(self):
return {'_event_actor': self.request.user}
class WebLinkListView(SingleObjectListView):
object_permission = permission_web_link_view
def get_extra_context(self):
return {
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_main_link': link_web_link_create.resolve(
context=RequestContext(request=self.request)
),
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links'
),
'title': _('Web links'),
}
def get_source_queryset(self):
return self.get_web_link_queryset()
def get_web_link_queryset(self):
return WebLink.objects.all()
class DocumentWebLinkListView(ExternalObjectViewMixin, WebLinkListView):
external_object_permission = permission_web_link_instance_view
external_object_pk_url_kwarg = 'document_id'
external_object_queryset = Document.valid.all()
object_permission = permission_web_link_instance_view
def get_extra_context(self):
return {
'document': self.external_object,
'hide_link': True,
'hide_object': True,
'no_results_icon': icon_web_link_setup,
'no_results_text': _(
'Web links allow generating HTTP links from documents to '
'external resources. The link URL\'s can contain document '
'properties values.'
),
'no_results_title': _(
'There are no web links for this document'
),
'object': self.external_object,
'title': _('Web links for document: %s') % self.external_object,
}
def get_web_link_queryset(self):
return ResolvedWebLink.objects.get_for(
document=self.external_object, user=self.request.user
)
| 34.027273 | 78 | 0.687016 | [
"Apache-2.0"
] | Bank5323/Mayan-EDMS | mayan/apps/web_links/views.py | 7,486 | Python |
"""
pycmark.inlineparser.link_processors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Link processor classes for InlineParser.
:copyright: Copyright 2017-2019 by Takeshi KOMIYA
:license: Apache License 2.0, see LICENSE for details.
"""
import re
from typing import Tuple
from docutils import nodes
from docutils.nodes import Element, Text
from pycmark import addnodes
from pycmark.inlineparser import PatternInlineProcessor, backtrack_onerror
from pycmark.readers import TextReader
from pycmark.utils import entitytrans, normalize_uri
from pycmark.utils import (
ESCAPED_CHARS, escaped_chars_pattern, get_root_document, normalize_link_label, unescape, transplant_nodes
)
LABEL_NOT_MATCHED = object()
# 6.5 Links
# 6.6 Images
class LinkOpenerProcessor(PatternInlineProcessor):
pattern = re.compile(r'\!?\[')
def run(self, reader: TextReader, document: Element) -> bool:
marker = reader.consume(self.pattern).group(0)
document += addnodes.bracket(marker=marker, can_open=True, active=True, position=reader.position)
return True
class LinkCloserProcessor(PatternInlineProcessor):
pattern = re.compile(r'\]')
def run(self, reader: TextReader, document: Element) -> bool:
reader.step(1)
document += addnodes.bracket(marker="]", can_open=False, position=reader.position - 1)
self.process_link_or_image(reader, document)
return True
@backtrack_onerror
def process_link_or_image(self, reader: TextReader, document: Element) -> bool:
brackets = list(n for n in document.children if isinstance(n, addnodes.bracket))
openers = list(d for d in brackets if d['can_open'])
if len(openers) == 0:
return True
opener = openers.pop()
closer = brackets.pop()
if not opener['active']:
opener.replace_self(Text(opener['marker']))
closer.replace_self(Text(closer['marker']))
return True
try:
if reader.remain.startswith('('):
# link destination + link title (optional)
# [...](<.+> ".+")
# [...](.+ ".+")
destination, title = self.parse_link_destination(reader, document)
elif reader.remain.startswith('['):
# link label
# [...][.+]
# [...][]
destination, title = self.parse_link_label(reader, document, opener=opener, closer=closer)
else:
destination = None
title = None
except (TypeError, ValueError):
destination = None
title = None
if destination is None:
# shortcut reference link
# [...]
refid = reader[opener['position']:closer['position']]
target = self.lookup_target(document, refid)
if target:
destination = target.get('refuri')
title = target.get('title')
else:
# deactivate brackets because no trailing link destination or link-label
opener.replace_self(Text(opener['marker']))
closer.replace_self(Text(closer['marker']))
raise
elif destination == LABEL_NOT_MATCHED:
opener.replace_self(Text(opener['marker']))
closer.replace_self(Text(closer['marker']))
raise
node: Element = None
if opener['marker'] == '![':
from pycmark.transforms import EmphasisConverter # lazy loading
para = transplant_nodes(document, nodes.paragraph(), start=opener, end=closer)
EmphasisConverter(para).apply()
node = nodes.image('', uri=destination, alt=para.astext())
if title:
node['title'] = title
else:
node = nodes.reference('', refuri=destination)
transplant_nodes(document, node, start=opener, end=closer)
if title:
node['reftitle'] = title
# deactivate all left brackets before the link
for n in openers:
if n['marker'] == '[':
n['active'] = False
document += node
document.remove(opener)
document.remove(closer)
return True
@backtrack_onerror
def parse_link_destination(self, reader: TextReader, document: Element) -> Tuple[str, str]:
reader.step()
destination = LinkDestinationParser().parse(reader, document)
title = LinkTitleParser().parse(reader, document)
assert reader.consume(re.compile(r'\s*\)'))
return destination, title
@backtrack_onerror
def parse_link_label(self, reader: TextReader, document: Element, opener: Element = None, closer: Element = None) -> Tuple[object, str]: # NOQA
reader.step()
refname = LinkLabelParser().parse(reader, document)
if refname == '':
# collapsed reference link
# [...][]
refname = reader[opener['position']:closer['position']]
target = self.lookup_target(document, refname)
if target:
destination = target.get('refuri')
title = target.get('title')
return destination, title
else:
return LABEL_NOT_MATCHED, None
def lookup_target(self, node: Element, refname: str) -> nodes.Element:
document = get_root_document(node)
refname = normalize_link_label(refname)
node_id = document.nameids.get(refname)
if node_id is None:
return None
return document.ids.get(node_id)
class LinkDestinationParser:
pattern = re.compile(r'\s*<((?:[^<>\n\\]|' + ESCAPED_CHARS + r')*)>', re.S)
def parse(self, reader: TextReader, document: Element) -> str:
if re.match(r'^\s*<', reader.remain):
matched = reader.consume(self.pattern)
if not matched:
return ''
else:
return self.normalize_link_destination(matched.group(1))
else:
return self.parseBareLinkDestination(reader, document)
def normalize_link_destination(self, s: str) -> str:
s = entitytrans._unescape(s)
s = unescape(s)
s = normalize_uri(s)
return s
def parseBareLinkDestination(self, reader: TextReader, document: Element) -> str:
assert reader.consume(re.compile(r'[ \n]*'))
if reader.remain == '': # must be empty line!
return None
parens = 0
start = reader.position
while reader.remain:
c = reader.remain[0]
if c in (' ', '\n'):
break
elif c == '(':
parens += 1
elif c == ')':
parens -= 1
if parens < 0:
break
elif escaped_chars_pattern.match(reader.remain):
reader.step() # one more step for escaping
reader.step()
end = reader.position
return self.normalize_link_destination(reader[start:end])
class LinkTitleParser:
pattern = re.compile(r'\s*("(' + ESCAPED_CHARS + r'|[^"])*"|' +
r"'(" + ESCAPED_CHARS + r"|[^'])*'|" +
r"\((" + ESCAPED_CHARS + r"|[^)])*\))")
def parse(self, reader: TextReader, document: Element) -> str:
matched = reader.consume(self.pattern)
if matched:
return unescape(entitytrans._unescape(matched.group(1)[1:-1]))
else:
return None
class LinkLabelParser:
pattern = re.compile(r'(?:[^\[\]\\]|' + ESCAPED_CHARS + r'|\\){0,1000}\]')
def parse(self, reader: TextReader, document: Element) -> str:
matched = reader.consume(self.pattern)
if matched:
return matched.group(0)[:-1]
else:
return None
| 34.758772 | 148 | 0.575521 | [
"BSD-2-Clause"
] | pycmark/pycmark | pycmark/inlineparser/link_processors.py | 7,925 | Python |
import re
import collections
from slugify import slugify
from .utils import limpa_conteudo, cria_hash_do_movimento
PADRAO_MOV = re.compile(r'numMov=(\d+)')
def parse_metadados(linhas_de_dados, numero_processo, inicio_metadados,
fim_metadados):
metadados = {
'status': [''],
'comarca': [''],
'endereco': [''],
'bairro': [''],
'cidade': [''],
'acao': [''],
'assunto': [''],
'classe': [''],
'livro': [''],
'folha': [''],
'numero-do-tombo': [''],
'aviso-ao-advogado': [''],
'autor': [''],
'requerido': [''],
'requerente': [''],
'advogado-s': ['']
}
# Delimita o processo na regiao dos metadados
linhas_com_metadados = linhas_de_dados[inicio_metadados:fim_metadados]
metadados['numero-processo'] = numero_processo
metadados['status'] = limpa_conteudo(
linhas_com_metadados[0].find_all('td')[0].get_text()
)
# Apaga linhas utilizadas
del linhas_com_metadados[:2]
comarcas = []
comecou_comarca = False
for tr in list(linhas_com_metadados):
linhas_com_metadados.pop(0)
colunas = tr.find_all('td')
dados = ''.join([c.get_text() for c in colunas])
if 'Comarca' in dados or \
'Regional' in dados:
comecou_comarca = True
if comecou_comarca:
comarcas += extrai_dados_colunas(colunas)
if len(colunas) == 1 and comecou_comarca:
break
metadados['comarca'] = comarcas
for tr in list(linhas_com_metadados):
linhas_com_metadados.pop(0)
linha = []
colunas = tr.find_all('td')
linha = extrai_dados_colunas(colunas)
if linha:
metadados[slugify(linha[0])] = linha[1:]
return metadados
def estripa(texto):
return ' '.join(limpa_conteudo(texto).split("\n")).strip()
def atribui(chave, item, valor):
valor = estripa(valor)
if valor:
item[chave].append(valor)
def parse_processo_apensado(cols, item, campo):
dados = cols[1].find_all('a')
if dados:
item[campo] = [estripa(link.get_text()) for link in dados]
def parse_descricao(cols, item, campo):
for link in cols[1].find_all('a'):
if 'onclick' in link.attrs:
conteudo_escondido = link.attrs['onclick']
inteiro_teor = PADRAO_MOV.findall(
conteudo_escondido)
if inteiro_teor:
item['inteiro-teor'] = inteiro_teor
atribui(campo, item, next(cols[1].descendants))
METODOS_PARSING = {
'processo-s-apensado-s': parse_processo_apensado,
'processo-s-no-tribunal-de-justica': parse_processo_apensado,
'descricao': parse_descricao,
}
def parse_itens(soup, numero_processo, inicio_itens):
# Recorta area com os itens
itens = {}
itens['numero-processo'] = numero_processo
lista_de_itens = []
linhas_de_dados = soup.find_all(attrs={'name': 'formResultado'})[0]\
.find_all('tr')
linhas_com_itens = linhas_de_dados[inicio_itens:]
for indice, linha in enumerate(list(linhas_com_itens)):
if linha.attrs == {'class': ['tipoMovimento']}:
item = collections.defaultdict(list)
colunas = linha.find_all('td')
# Podem existir cabeçalhos de itens sem texto, como o
# Mandado de Pagamento
# Nesses caso registraremos como tipo de movimento o
# título do bloco
if len(colunas) == 1:
texto = colunas[0].get_text().strip()
chave = 'tipo-do-movimento'
else:
texto = limpa_conteudo(
colunas[1].get_text()
)
chave = slugify(colunas[0].get_text())
item[chave] = texto
info = linhas_com_itens[indice + 1:]
cont = 0
while cont < len(info) and\
info[cont].attrs != {'class': ['tipoMovimento']}:
cols = info[cont].find_all('td')
if len(cols) > 1:
campo = slugify(cols[0].get_text())
if campo == 'tipo-do-movimento':
campo = 'sub-tipo-do-movimento'
if campo in METODOS_PARSING:
METODOS_PARSING[campo](cols, item, campo)
else:
atribui(campo, item, cols[1].get_text())
else:
cont += 1
continue
cont += 1
lista_de_itens.append(item)
for item in lista_de_itens:
if 'inteiro-teor' in item:
item['inteiro-teor'] = soup.find(
'input', {
'type': 'HIDDEN',
'name': 'descMov{0}'.format(item['inteiro-teor'][0])
}).attrs['value']
for item in lista_de_itens:
item['hash'] = cria_hash_do_movimento(item)
itens['itens'] = lista_de_itens
return itens
def area_dos_metadados(linhas_de_dados):
# Aparentemente esse valor e fixo
inicio = 0
atributos_inicio_metadados = {'align': 'center',
'class': ['negrito'],
'colspan': '2'}
for indice, linha in enumerate(linhas_de_dados):
coluna = linha.find('td')
if not inicio and coluna.attrs == atributos_inicio_metadados:
inicio = indice
if 'Tipo do Movimento:' in linha.get_text():
fim = indice - 1
break
return inicio, fim
def extrai_dados_colunas(colunas):
linha = []
for td in colunas:
linha += list(
filter(None, [limpa_conteudo(td.get_text()) if td else ''])
)
return linha
| 29.472081 | 74 | 0.550982 | [
"MIT"
] | MinisterioPublicoRJ/robotj | extrator/crawler/parser.py | 5,808 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualNetworkTapResult',
'AwaitableGetVirtualNetworkTapResult',
'get_virtual_network_tap',
]
@pulumi.output_type
class GetVirtualNetworkTapResult:
"""
Virtual Network Tap resource.
"""
def __init__(__self__, destination_load_balancer_front_end_ip_configuration=None, destination_network_interface_ip_configuration=None, destination_port=None, etag=None, id=None, location=None, name=None, network_interface_tap_configurations=None, provisioning_state=None, resource_guid=None, tags=None, type=None):
if destination_load_balancer_front_end_ip_configuration and not isinstance(destination_load_balancer_front_end_ip_configuration, dict):
raise TypeError("Expected argument 'destination_load_balancer_front_end_ip_configuration' to be a dict")
pulumi.set(__self__, "destination_load_balancer_front_end_ip_configuration", destination_load_balancer_front_end_ip_configuration)
if destination_network_interface_ip_configuration and not isinstance(destination_network_interface_ip_configuration, dict):
raise TypeError("Expected argument 'destination_network_interface_ip_configuration' to be a dict")
pulumi.set(__self__, "destination_network_interface_ip_configuration", destination_network_interface_ip_configuration)
if destination_port and not isinstance(destination_port, int):
raise TypeError("Expected argument 'destination_port' to be a int")
pulumi.set(__self__, "destination_port", destination_port)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_interface_tap_configurations and not isinstance(network_interface_tap_configurations, list):
raise TypeError("Expected argument 'network_interface_tap_configurations' to be a list")
pulumi.set(__self__, "network_interface_tap_configurations", network_interface_tap_configurations)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> Optional['outputs.FrontendIPConfigurationResponse']:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap.
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> Optional['outputs.NetworkInterfaceIPConfigurationResponse']:
"""
The reference to the private IP Address of the collector nic that will receive the tap.
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> Optional[int]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> Sequence['outputs.NetworkInterfaceTapConfigurationResponse']:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the virtual network tap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the virtual network tap resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetVirtualNetworkTapResult(GetVirtualNetworkTapResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkTapResult(
destination_load_balancer_front_end_ip_configuration=self.destination_load_balancer_front_end_ip_configuration,
destination_network_interface_ip_configuration=self.destination_network_interface_ip_configuration,
destination_port=self.destination_port,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
network_interface_tap_configurations=self.network_interface_tap_configurations,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type)
def get_virtual_network_tap(resource_group_name: Optional[str] = None,
tap_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkTapResult:
"""
Virtual Network Tap resource.
:param str resource_group_name: The name of the resource group.
:param str tap_name: The name of virtual network tap.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['tapName'] = tap_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200701:getVirtualNetworkTap', __args__, opts=opts, typ=GetVirtualNetworkTapResult).value
return AwaitableGetVirtualNetworkTapResult(
destination_load_balancer_front_end_ip_configuration=__ret__.destination_load_balancer_front_end_ip_configuration,
destination_network_interface_ip_configuration=__ret__.destination_network_interface_ip_configuration,
destination_port=__ret__.destination_port,
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_interface_tap_configurations=__ret__.network_interface_tap_configurations,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type)
| 42.628571 | 318 | 0.699173 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/network/v20200701/get_virtual_network_tap.py | 8,952 | Python |
# Copyright 2019 Alastair Pharo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy
import os
import scipy
from numpy.testing import assert_allclose
from infsocsol.helpers import matrix
@pytest.fixture(scope="module", params=[
# engine states time_step start steps steady steady_accuracy optim_accuracy
( 'matlab', 10, 1, (100.0, 0.5), 100, True, 0.01, 0.009 ),
( 'matlab', 20, 0.5, (600.0, 0.6), 200, True, 0.01, 0.015 ),
( 'matlab', 40, 0.25, (60.0, 0.1), 300, True, 0.01, 0.018 ),
( 'matlab', 10, 1, (600.0, 1.0), 200, False, 0.001, None ),
( 'octave', 10, 1, (100.0, 0.5), 100, True, 0.001, 0.009 ),
( 'octave', 20, 0.5, (600.0, 0.6), 200, True, 0.001, 0.015 )
])
def fisheries_scenario(request):
return request.param
def test_fisheries_det_basic(engines, fisheries_scenario):
_engine, states, time_step, _start, steps, steady, steady_accuracy, optim_accuracy = fisheries_scenario
engine = engines[_engine]
start = matrix(engine, _start)
engine.cd(os.path.join(os.path.dirname(__file__), "fisheries_det_basic"))
engine.solve(float(states), float(time_step), nargout=0)
final = numpy.array(engine.sim_final(start, steps))
# This is determined by setting s\dot = 0, which solves to 1 = x/L + q/r e
steady_one = numpy.dot(final, [1/600, 5/4])
if steady:
assert_allclose(steady_one, 1, atol=steady_accuracy)
# This is the most profitable steady state -- x = L/2 + c/2pq
profit_max_steady = numpy.array([[302.5, 0.39667]])
assert_allclose(final, profit_max_steady, rtol=optim_accuracy)
else:
assert steady_one > 1 + steady_accuracy
| 43.603774 | 107 | 0.648637 | [
"Apache-2.0"
] | socsol/infsocsol | tests/test_fisheries_det_basic.py | 2,311 | Python |
addrs = {
"140": "[2401:db00:21:604c:face:0:5:0]",
"216": "[2401:db00:21:604c:face:0:7:0]",
"199": "[2401:db00:21:604f:face:0:5:0]",
"194": "[2401:db00:21:605b:face:0:7:0]",
"213": "[2401:db00:21:6046:face:0:7:0]",
"145": "[2401:db00:21:6043:face:0:1:0]",
"212": "[2401:db00:21:6046:face:0:5:0]",
"198": "[2401:db00:21:604f:face:0:3:0]",
"157": "[2401:db00:21:6059:face:0:7:0]",
# These are for FAIR clusters, add additional nodes and their
# IP addresses here
"learnfair100": "[100.97.17.10]",
"learnfair101": "[100.97.17.11]",
"learnfair128": "[100.97.17.78]",
"learnfair102": "[100.97.17.12]",
}
| 36.611111 | 65 | 0.559939 | [
"BSD-3-Clause"
] | zchen0211/ELF_inf | experimental/server_addrs.py | 659 | Python |
#!/usr/bin/env python2.7
# coding=utf-8
from __future__ import print_function
import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../../amalgamation/python/")
sys.path.append("../../python/")
from mxnet_predict import Predictor
import mxnet as mx
import numpy as np
import cv2
import os
class lstm_ocr_model(object):
# Keep Zero index for blank. (CTC request it)
CONST_CHAR='0123456789'
def __init__(self, path_of_json, path_of_params):
super(lstm_ocr_model, self).__init__()
self.path_of_json = path_of_json
self.path_of_params = path_of_params
self.predictor = None
self.__init_ocr()
def __init_ocr(self):
num_label = 4 # Set your max length of label, add one more for blank
batch_size = 1
num_hidden = 100
num_lstm_layer = 2
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]
init_states = init_c + init_h
all_shapes = [('data', (batch_size, 80 * 30))] + init_states + [('label', (batch_size, num_label))]
all_shapes_dict = {}
for _shape in all_shapes:
all_shapes_dict[_shape[0]] = _shape[1]
self.predictor = Predictor(open(self.path_of_json).read(),
open(self.path_of_params).read(),
all_shapes_dict)
def forward_ocr(self, img):
img = cv2.resize(img, (80, 30))
img = img.transpose(1, 0)
img = img.reshape((80 * 30))
img = np.multiply(img, 1/255.0)
self.predictor.forward(data=img)
prob = self.predictor.get_output(0)
label_list = []
for p in prob:
max_index = np.argsort(p)[::-1][0]
label_list.append(max_index)
return self.__get_string(label_list)
def __get_string(self, label_list):
# Do CTC label rule
# CTC cannot emit a repeated symbol on consecutive timesteps
ret = []
label_list2 = [0] + list(label_list)
for i in range(len(label_list)):
c1 = label_list2[i]
c2 = label_list2[i+1]
if c2 == 0 or c2 == c1:
continue
ret.append(c2)
# change to ascii
s = ''
for l in ret:
if l > 0 and l < (len(lstm_ocr_model.CONST_CHAR)+1):
c = lstm_ocr_model.CONST_CHAR[l-1]
else:
c = ''
s += c
return s
if __name__ == '__main__':
_lstm_ocr_model = lstm_ocr_model('ocr-symbol.json', 'ocr-0010.params')
img = cv2.imread('sample.jpg', 0)
_str = _lstm_ocr_model.forward_ocr(img)
print('Result: ', _str)
| 33.654762 | 107 | 0.58861 | [
"Apache-2.0"
] | YanLiang1102/dmlc-mxnet | example/warpctc/ocr_predict.py | 2,827 | Python |
import datetime
import more_itertools
from annoworkapi.actual_working_time import (
_create_actual_working_hours_dict,
create_actual_working_times_daily,
get_term_start_end_from_date_for_actual_working_time,
)
ACTUAL_WORKING_TIME_LIST = [
{
"job_id": "JOB_A",
"workspace_member_id": "alice",
"start_datetime": "2021-11-01T10:00:00.000Z",
"end_datetime": "2021-11-01T11:00:00.000Z",
},
{
"job_id": "JOB_B",
"workspace_member_id": "alice",
"start_datetime": "2021-11-01T12:00:00.000Z",
"end_datetime": "2021-11-01T14:00:00.000Z",
},
{
"job_id": "JOB_A",
"workspace_member_id": "alice",
"start_datetime": "2021-11-01T14:00:00.000Z",
"end_datetime": "2021-11-01T18:00:00.000Z",
},
]
class Test_get_term_start_end_from_date_for_actual_working_time:
def test_with_jtc(self):
actual = get_term_start_end_from_date_for_actual_working_time(
"2021-04-01", "2021-04-01", tzinfo=datetime.timezone(datetime.timedelta(hours=9))
)
assert actual[0] == "2021-03-31T15:00:00.000Z"
assert actual[1] == "2021-04-01T14:59:59.999Z"
def test_with_utc(self):
actual = get_term_start_end_from_date_for_actual_working_time(
"2021-04-01", "2021-04-01", tzinfo=datetime.timezone.utc
)
assert actual[0] == "2021-04-01T00:00:00.000Z"
assert actual[1] == "2021-04-01T23:59:59.999Z"
class Test__create_actual_working_hours_dict:
jtc_tzinfo = datetime.timezone(datetime.timedelta(hours=9))
def test_evening(self):
actual = _create_actual_working_hours_dict(ACTUAL_WORKING_TIME_LIST[0], tzinfo=self.jtc_tzinfo)
expected = {(datetime.date(2021, 11, 1), "alice", "JOB_A"): 1.0}
assert actual == expected
def test_midnight(self):
actual = _create_actual_working_hours_dict(ACTUAL_WORKING_TIME_LIST[2], tzinfo=self.jtc_tzinfo)
expected = {
(datetime.date(2021, 11, 1), "alice", "JOB_A"): 1.0,
(datetime.date(2021, 11, 2), "alice", "JOB_A"): 3.0,
}
assert actual == expected
class Test_create_actual_working_times_daily:
jtc_tzinfo = datetime.timezone(datetime.timedelta(hours=9))
def test_normal(self):
actual = create_actual_working_times_daily(ACTUAL_WORKING_TIME_LIST, tzinfo=self.jtc_tzinfo)
assert len(actual) == 3
assert (
more_itertools.first_true(actual, pred=lambda e: e["date"] == "2021-11-01" and e["job_id"] == "JOB_A")[
"actual_working_hours"
]
== 2.0
)
assert (
more_itertools.first_true(actual, pred=lambda e: e["date"] == "2021-11-01" and e["job_id"] == "JOB_B")[
"actual_working_hours"
]
== 2.0
)
assert (
more_itertools.first_true(actual, pred=lambda e: e["date"] == "2021-11-02" and e["job_id"] == "JOB_A")[
"actual_working_hours"
]
== 3.0
)
| 33.901099 | 115 | 0.618801 | [
"MIT"
] | kurusugawa-computer/annowork-api-python-client | tests/test_local_actual_working_time.py | 3,085 | Python |
import os
# globals
PIC_EXT_NAME = '.png'
DEFAULT_TARGET_NAME = 'DEFAULT_TARGET_NAME'
ALLOWED_EXTRA_ARGS = set(
'mask_pic_path',
)
# arg parse
PORT_ENV_NAME = 'FINDIT_SERVER_PORT'
PIC_ROOT_ENV_NAME = 'FINDIT_SERVER_PIC_ROOT_PATH'
SERVER_PORT: int = int(os.environ.get(PORT_ENV_NAME, default=9410))
PIC_DIR_PATH: str = os.environ.get(PIC_ROOT_ENV_NAME, default='')
| 23.1875 | 67 | 0.778976 | [
"MIT"
] | yanzongzhen/hyper-find | findit/server/config.py | 371 | Python |
from setuptools import setup
from setuptools import find_packages
setup(name='Keras-bot-enforce-codeowners',
packages=find_packages())
| 21.285714 | 43 | 0.758389 | [
"MIT"
] | JoshuaRM/keras-bot | enforce_codeowners/setup.py | 149 | Python |
#!/usr/bin/python3
import glob
from PIL import Image
# get all the jpg files from the current folder
for infile in glob.glob("*.jpg"):
im = Image.open(infile)
# convert to thumbnail image
im.thumbnail((500, 500), Image.ANTIALIAS)
# don't save if thumbnail already exists
if infile[0:2] != "T_":
# prefix thumbnail file with T_
im.save("thumbs/T_" + infile, "JPEG")
| 27.428571 | 47 | 0.6875 | [
"BSD-3-Clause"
] | mdr78/mdr78.github.io | scripts/mk_thumbs.py | 384 | Python |
# MIT License
#
# Copyright (c) 2019 Meyers Tom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import fileinput
import runner.config
def exclude(list, value=["-v", "-vv", "-vvv"]):
"""
remove value from the list
"""
new = []
for item in list:
if not item in value:
new.append(item)
return new
def help():
"""
print the help menu and exit
"""
print("Usage: {} [-h | --help] [-vv] domain".format(runner.config.NAME))
print("\t {} -h | --help \t\tprint this help menu".format(runner.config.NAME))
print("\t {} -v <domain> \t\tEnable debug messages".format(runner.config.NAME))
print("\t {} -vv <domain> \t\tEnable debug messages with more information".format(runner.config.NAME))
print("\t {} <domain> \t\tperform a scan on the given domain/URI or URL\n".format(runner.config.NAME))
print("Copyright Meyers Tom")
print("Licensed under the MIT License")
quit()
def getInput():
"""
Check if the input is null. If that is the case simply listen for stdin
Returns the input that it got eg a url, uri or domain
Second return type is if debug messages should be enabled
"""
if len(sys.argv) == 1:
return fileinput.input()[0], False, False
if "-h" in sys.argv or "--help" in sys.argv:
help()
domain = "".join(exclude(sys.argv[1:], ["-v", "-vv"]))
if domain == "":
print("Wrong input formation\n")
help()
return domain, "-v" in sys.argv, "-vv" in sys.argv | 38.984615 | 106 | 0.679163 | [
"MIT"
] | F0xedb/IS-HTTP-HEADER | runner/input.py | 2,534 | Python |
from git import Repo
import logging
from wily.archivers import BaseArchiver, Revision
logger = logging.getLogger(__name__)
class DirtyGitRepositoryError(Exception):
def __init__(self, untracked_files):
self.untracked_files = untracked_files
self.message = "Dirty repository, make sure you commit/stash files first"
class GitArchiver(BaseArchiver):
name = "git"
def __init__(self, config):
self.repo = Repo(config.path)
self.config = config
self.current_branch = self.repo.active_branch
assert not self.repo.bare, "Not a Git repository"
def revisions(self, path, max_revisions):
if self.repo.is_dirty():
raise DirtyGitRepositoryError(self.repo.untracked_files)
revisions = []
for commit in self.repo.iter_commits(
self.current_branch, max_count=self.config.max_revisions
):
rev = Revision(
key=commit.name_rev.split(" ")[0],
author_name=commit.author.name,
author_email=commit.author.email,
revision_date=commit.committed_date,
message=commit.message,
)
revisions.append(rev)
return revisions
def checkout(self, revision, options):
rev = revision.key
self.repo.git.checkout(rev)
def finish(self):
# Make sure you checkout HEAD on the original branch when finishing
self.repo.git.checkout(self.current_branch)
| 30.673469 | 81 | 0.646707 | [
"Apache-2.0"
] | wcooley/wily | wily/archivers/git.py | 1,503 | Python |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
import datetime
from odoo import api, fields, models, _
from odoo.tools.safe_eval import safe_eval
class Team(models.Model):
_name = 'crm.team'
_inherit = ['mail.alias.mixin', 'crm.team']
_description = 'Sales Team'
use_leads = fields.Boolean('Leads', help="Check this box to filter and qualify incoming requests as leads before converting them into opportunities and assigning them to a salesperson.")
use_opportunities = fields.Boolean('Pipeline', default=True, help="Check this box to manage a presales process with opportunities.")
alias_id = fields.Many2one(
'mail.alias', string='Alias', ondelete="restrict", required=True,
help="The email address associated with this channel. New emails received will automatically create new leads assigned to the channel.")
# statistics about leads / opportunities / both
lead_unassigned_count = fields.Integer(
string='# Unassigned Leads', compute='_compute_lead_unassigned_count')
lead_all_assigned_month_count = fields.Integer(
string='# Leads/Opps assigned this month', compute='_compute_lead_all_assigned_month_count',
help="Number of leads and opportunities assigned this last month.")
opportunities_count = fields.Integer(
string='# Opportunities', compute='_compute_opportunities_data')
opportunities_amount = fields.Monetary(
string='Opportunities Revenues', compute='_compute_opportunities_data')
opportunities_overdue_count = fields.Integer(
string='# Overdue Opportunities', compute='_compute_opportunities_overdue_data')
opportunities_overdue_amount = fields.Monetary(
string='Overdue Opportunities Revenues', compute='_compute_opportunities_overdue_data',)
# alias: improve fields coming from _inherits, use inherited to avoid replacing them
alias_user_id = fields.Many2one(
'res.users', related='alias_id.alias_user_id', inherited=True,
domain=lambda self: [('groups_id', 'in', self.env.ref('sales_team.group_sale_salesman_all_leads').id)])
def _compute_lead_unassigned_count(self):
leads_data = self.env['crm.lead'].read_group([
('team_id', 'in', self.ids),
('type', '=', 'lead'),
('user_id', '=', False),
], ['team_id'], ['team_id'])
counts = {datum['team_id'][0]: datum['team_id_count'] for datum in leads_data}
for team in self:
team.lead_unassigned_count = counts.get(team.id, 0)
def _compute_lead_all_assigned_month_count(self):
limit_date = datetime.datetime.now() - datetime.timedelta(days=30)
leads_data = self.env['crm.lead'].read_group([
('team_id', 'in', self.ids),
('date_open', '>=', fields.Datetime.to_string(limit_date)),
('user_id', '!=', False),
], ['team_id'], ['team_id'])
counts = {datum['team_id'][0]: datum['team_id_count'] for datum in leads_data}
for team in self:
team.lead_all_assigned_month_count = counts.get(team.id, 0)
def _compute_opportunities_data(self):
opportunity_data = self.env['crm.lead'].read_group([
('team_id', 'in', self.ids),
('probability', '<', 100),
('type', '=', 'opportunity'),
], ['expected_revenue:sum', 'team_id'], ['team_id'])
counts = {datum['team_id'][0]: datum['team_id_count'] for datum in opportunity_data}
amounts = {datum['team_id'][0]: datum['expected_revenue'] for datum in opportunity_data}
for team in self:
team.opportunities_count = counts.get(team.id, 0)
team.opportunities_amount = amounts.get(team.id, 0)
def _compute_opportunities_overdue_data(self):
opportunity_data = self.env['crm.lead'].read_group([
('team_id', 'in', self.ids),
('probability', '<', 100),
('type', '=', 'opportunity'),
('date_deadline', '<', fields.Date.to_string(fields.Datetime.now()))
], ['expected_revenue', 'team_id'], ['team_id'])
counts = {datum['team_id'][0]: datum['team_id_count'] for datum in opportunity_data}
amounts = {datum['team_id'][0]: (datum['expected_revenue']) for datum in opportunity_data}
for team in self:
team.opportunities_overdue_count = counts.get(team.id, 0)
team.opportunities_overdue_amount = amounts.get(team.id, 0)
@api.onchange('use_leads', 'use_opportunities')
def _onchange_use_leads_opportunities(self):
if not self.use_leads and not self.use_opportunities:
self.alias_name = False
# ------------------------------------------------------------
# ORM
# ------------------------------------------------------------
def write(self, vals):
result = super(Team, self).write(vals)
if 'use_leads' in vals or 'use_opportunities' in vals:
for team in self:
alias_vals = team._alias_get_creation_values()
team.write({
'alias_name': alias_vals.get('alias_name', team.alias_name),
'alias_defaults': alias_vals.get('alias_defaults'),
})
return result
# ------------------------------------------------------------
# MESSAGING
# ------------------------------------------------------------
def _alias_get_creation_values(self):
values = super(Team, self)._alias_get_creation_values()
values['alias_model_id'] = self.env['ir.model']._get('crm.lead').id
if self.id:
if not self.use_leads and not self.use_opportunities:
values['alias_name'] = False
values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}")
has_group_use_lead = self.env.user.has_group('crm.group_use_lead')
defaults['type'] = 'lead' if has_group_use_lead and self.use_leads else 'opportunity'
defaults['team_id'] = self.id
return values
# ------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------
#TODO JEM : refactor this stuff with xml action, proper customization,
@api.model
def action_your_pipeline(self):
action = self.env["ir.actions.actions"]._for_xml_id("crm.crm_lead_action_pipeline")
user_team_id = self.env.user.sale_team_id.id
if user_team_id:
# To ensure that the team is readable in multi company
user_team_id = self.search([('id', '=', user_team_id)], limit=1).id
else:
user_team_id = self.search([], limit=1).id
action['help'] = _("""<p class='o_view_nocontent_smiling_face'>Add new opportunities</p><p>
Looks like you are not a member of a Sales Team. You should add yourself
as a member of one of the Sales Team.
</p>""")
if user_team_id:
action['help'] += _("<p>As you don't belong to any Sales Team, Odoo opens the first one by default.</p>")
action_context = safe_eval(action['context'], {'uid': self.env.uid})
if user_team_id:
action_context['default_team_id'] = user_team_id
action['context'] = action_context
return action
def _compute_dashboard_button_name(self):
super(Team, self)._compute_dashboard_button_name()
team_with_pipelines = self.filtered(lambda el: el.use_opportunities)
team_with_pipelines.update({'dashboard_button_name': _("Pipeline")})
def action_primary_channel_button(self):
if self.use_opportunities:
return self.env["ir.actions.actions"]._for_xml_id("crm.crm_case_form_view_salesteams_opportunity")
return super(Team,self).action_primary_channel_button()
def _graph_get_model(self):
if self.use_opportunities:
return 'crm.lead'
return super(Team,self)._graph_get_model()
def _graph_date_column(self):
if self.use_opportunities:
return 'create_date'
return super(Team,self)._graph_date_column()
def _graph_y_query(self):
if self.use_opportunities:
return 'count(*)'
return super(Team,self)._graph_y_query()
def _extra_sql_conditions(self):
if self.use_opportunities:
return "AND type LIKE 'opportunity'"
return super(Team,self)._extra_sql_conditions()
def _graph_title_and_key(self):
if self.use_opportunities:
return ['', _('New Opportunities')] # no more title
return super(Team,self)._graph_title_and_key()
| 47.407609 | 190 | 0.623983 | [
"Apache-2.0"
] | SHIVJITH/Odoo_Machine_Test | addons/crm/models/crm_team.py | 8,723 | Python |
import random
# ToDo: With shuffle_ganon option, prevent gtower from linking to an exit only location through a 2 entrance cave.
def link_entrances(world, player):
connect_two_way(world, 'Links House', 'Links House Exit', player) # unshuffled. For now
connect_exit(world, 'Chris Houlihan Room Exit', 'Links House', player) # should always match link's house, except for plandos
Dungeon_Exits = Dungeon_Exits_Base.copy()
Cave_Exits = Cave_Exits_Base.copy()
Old_Man_House = Old_Man_House_Base.copy()
Cave_Three_Exits = Cave_Three_Exits_Base.copy()
unbias_some_entrances(Dungeon_Exits, Cave_Exits, Old_Man_House, Cave_Three_Exits)
# setup mandatory connections
for exitname, regionname in mandatory_connections:
connect_simple(world, exitname, regionname, player)
# if we do not shuffle, set default connections
if world.shuffle[player] == 'vanilla':
for exitname, regionname in default_connections:
connect_simple(world, exitname, regionname, player)
for exitname, regionname in default_dungeon_connections:
connect_simple(world, exitname, regionname, player)
elif world.shuffle[player] == 'dungeonssimple':
for exitname, regionname in default_connections:
connect_simple(world, exitname, regionname, player)
simple_shuffle_dungeons(world, player)
elif world.shuffle[player] == 'dungeonsfull':
for exitname, regionname in default_connections:
connect_simple(world, exitname, regionname, player)
skull_woods_shuffle(world, player)
dungeon_exits = list(Dungeon_Exits)
lw_entrances = list(LW_Dungeon_Entrances)
dw_entrances = list(DW_Dungeon_Entrances)
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
elif world.doorShuffle[player] != 'vanilla':
lw_entrances.append('Hyrule Castle Entrance (South)')
else:
dungeon_exits.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
else:
dw_entrances.append('Ganons Tower')
dungeon_exits.append('Ganons Tower Exit')
if world.mode[player] == 'standard':
# rest of hyrule castle must be in light world, so it has to be the one connected to east exit of desert
connect_mandatory_exits(world, lw_entrances, [('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)')], list(LW_Dungeon_Entrances_Must_Exit), player)
elif world.doorShuffle[player] != 'vanilla':
# sanc is in light world, so must all of HC if door shuffle is on
connect_mandatory_exits(world, lw_entrances,
[('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)', 'Hyrule Castle Exit (South)')],
list(LW_Dungeon_Entrances_Must_Exit), player)
else:
connect_mandatory_exits(world, lw_entrances, dungeon_exits, list(LW_Dungeon_Entrances_Must_Exit), player)
connect_mandatory_exits(world, dw_entrances, dungeon_exits, list(DW_Dungeon_Entrances_Must_Exit), player)
connect_caves(world, lw_entrances, dw_entrances, dungeon_exits, player)
elif world.shuffle[player] == 'simple':
simple_shuffle_dungeons(world, player)
old_man_entrances = list(Old_Man_Entrances)
caves = list(Cave_Exits)
three_exit_caves = list(Cave_Three_Exits)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# we shuffle all 2 entrance caves as pairs as a start
# start with the ones that need to be directed
two_door_caves = list(Two_Door_Caves_Directional)
random.shuffle(two_door_caves)
random.shuffle(caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# now the remaining pairs
two_door_caves = list(Two_Door_Caves)
random.shuffle(two_door_caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# at this point only Light World death mountain entrances remain
# place old man, has limited options
remaining_entrances = ['Old Man Cave (West)', 'Old Man House (Bottom)', 'Death Mountain Return Cave (West)', 'Paradox Cave (Bottom)', 'Paradox Cave (Middle)', 'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Top)', 'Spiral Cave', 'Spiral Cave (Bottom)']
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
remaining_entrances.extend(old_man_entrances)
random.shuffle(remaining_entrances)
old_man_entrance = remaining_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
# add old man house to ensure it is always somewhere on light death mountain
caves.extend(list(Old_Man_House))
caves.extend(list(three_exit_caves))
# connect rest
connect_caves(world, remaining_entrances, [], caves, player)
# scramble holes
scramble_holes(world, player)
# place blacksmith, has limited options
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'restricted':
simple_shuffle_dungeons(world, player)
lw_entrances = list(LW_Entrances + LW_Single_Cave_Doors + Old_Man_Entrances)
dw_entrances = list(DW_Entrances + DW_Single_Cave_Doors)
dw_must_exits = list(DW_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances)
caves = list(Cave_Exits + Cave_Three_Exits)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors + Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# in restricted, the only mandatory exits are in dark world
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in lw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
lw_entrances.remove(old_man_exit)
# place blacksmith, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in all_entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
if blacksmith_hut in lw_entrances:
lw_entrances.remove(blacksmith_hut)
if blacksmith_hut in dw_entrances:
dw_entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in all_entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
if bomb_shop in lw_entrances:
lw_entrances.remove(bomb_shop)
if bomb_shop in dw_entrances:
dw_entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere in the light world
random.shuffle(lw_entrances)
old_man_entrance = lw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# place Old Man House in Light World
connect_caves(world, lw_entrances, [], list(Old_Man_House), player) #for multiple seeds
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_holes(world, player)
doors = lw_entrances + dw_entrances
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'restricted_legacy':
simple_shuffle_dungeons(world, player)
lw_entrances = list(LW_Entrances)
dw_entrances = list(DW_Entrances)
dw_must_exits = list(DW_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances)
caves = list(Cave_Exits)
three_exit_caves = list(Cave_Three_Exits)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# only use two exit caves to do mandatory dw connections
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
# add three exit doors to pool for remainder
caves.extend(three_exit_caves)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
lw_entrances.extend(old_man_entrances)
random.shuffle(lw_entrances)
old_man_entrance = lw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
# place Old Man House in Light World
connect_caves(world, lw_entrances, [], Old_Man_House, player)
# connect rest. There's 2 dw entrances remaining, so we will not run into parity issue placing caves
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_holes(world, player)
# place blacksmith, has limited options
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place dam and pyramid fairy, have limited options
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'full':
skull_woods_shuffle(world, player)
lw_entrances = list(LW_Entrances + LW_Dungeon_Entrances + LW_Single_Cave_Doors + Old_Man_Entrances)
dw_entrances = list(DW_Entrances + DW_Dungeon_Entrances + DW_Single_Cave_Doors)
dw_must_exits = list(DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit)
lw_must_exits = list(LW_Dungeon_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances + ['Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits) # don't need to consider three exit caves, have one exit caves to avoid parity issues
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors + Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Single_Cave_Targets)
old_man_house = list(Old_Man_House)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
elif world.doorShuffle[player] != 'vanilla':
lw_entrances.append('Hyrule Castle Entrance (South)')
else:
caves.append(tuple(random.sample(['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'],3)))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
else:
dw_entrances.append('Ganons Tower')
caves.append('Ganons Tower Exit')
# we randomize which world requirements we fulfill first so we get better dungeon distribution
#we also places the Old Man House at this time to make sure he can be connected to the desert one way
if random.randint(0, 1) == 0:
caves += old_man_house
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
try:
caves.remove(old_man_house[0])
except ValueError:
pass
else: #if the cave wasn't placed we get here
connect_caves(world, lw_entrances, [], old_man_house, player)
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
else:
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
caves += old_man_house
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
try:
caves.remove(old_man_house[0])
except ValueError:
pass
else: #if the cave wasn't placed we get here
connect_caves(world, lw_entrances, [], old_man_house, player)
if world.mode[player] == 'standard':
# rest of hyrule castle must be in light world
connect_caves(world, lw_entrances, [], [('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)')], player)
# in full, Sanc must be in light world, so must all of HC if door shuffle is on
elif world.doorShuffle[player] != 'vanilla':
connect_caves(world, lw_entrances, [], [('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)', 'Hyrule Castle Exit (South)')], player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in lw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
lw_entrances.remove(old_man_exit)
# place blacksmith, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in all_entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
if blacksmith_hut in lw_entrances:
lw_entrances.remove(blacksmith_hut)
if blacksmith_hut in dw_entrances:
dw_entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in all_entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
if bomb_shop in lw_entrances:
lw_entrances.remove(bomb_shop)
if bomb_shop in dw_entrances:
dw_entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere in the light world
old_man_entrance = lw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_holes(world, player)
doors = lw_entrances + dw_entrances
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'crossed':
skull_woods_shuffle(world, player)
entrances = list(LW_Entrances + LW_Dungeon_Entrances + LW_Single_Cave_Doors + Old_Man_Entrances + DW_Entrances + DW_Dungeon_Entrances + DW_Single_Cave_Doors)
must_exits = list(DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + LW_Dungeon_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances + ['Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits + Old_Man_House) # don't need to consider three exit caves, have one exit caves to avoid parity issues
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors + Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
else:
caves.append(tuple(random.sample(['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'],3)))
entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
else:
entrances.append('Ganons Tower')
caves.append('Ganons Tower Exit')
#place must-exit caves
connect_mandatory_exits(world, entrances, caves, must_exits, player)
if world.mode[player] == 'standard':
# rest of hyrule castle must be dealt with
connect_caves(world, entrances, [], [('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)')], player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
entrances.remove(old_man_exit)
# place blacksmith, has limited options
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
# cannot place it anywhere already taken (or that are otherwise not eligable for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere
random.shuffle(entrances)
old_man_entrance = entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, entrances, [], caves, player)
# scramble holes
scramble_holes(world, player)
# place remaining doors
connect_doors(world, entrances, door_targets, player)
elif world.shuffle[player] == 'full_legacy':
skull_woods_shuffle(world, player)
lw_entrances = list(LW_Entrances + LW_Dungeon_Entrances + Old_Man_Entrances)
dw_entrances = list(DW_Entrances + DW_Dungeon_Entrances)
dw_must_exits = list(DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit)
lw_must_exits = list(LW_Dungeon_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances + ['Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits) # don't need to consider three exit caves, have one exit caves to avoid parity issues
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
else:
caves.append(tuple(random.sample(['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'],3)))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
else:
dw_entrances.append('Ganons Tower')
caves.append('Ganons Tower Exit')
# we randomize which world requirements we fulfill first so we get better dungeon distribution
if random.randint(0, 1) == 0:
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
else:
connect_mandatory_exits(world, dw_entrances, caves, dw_must_exits, player)
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
if world.mode[player] == 'standard':
# rest of hyrule castle must be in light world
connect_caves(world, lw_entrances, [], [('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)')], player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in lw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
lw_entrances.remove(old_man_exit)
random.shuffle(lw_entrances)
old_man_entrance = lw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
# place Old Man House in Light World
connect_caves(world, lw_entrances, [], list(Old_Man_House), player) #need this to avoid badness with multiple seeds
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_holes(world, player)
# place blacksmith, has limited options
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'madness_legacy':
# here lie dragons, connections are no longer two way
lw_entrances = list(LW_Entrances + LW_Dungeon_Entrances + Old_Man_Entrances)
dw_entrances = list(DW_Entrances + DW_Dungeon_Entrances)
dw_entrances_must_exits = list(DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit)
lw_doors = list(LW_Entrances + LW_Dungeon_Entrances + LW_Dungeon_Entrances_Must_Exit) + ['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump',
'Lumberjack Tree Cave'] + list(Old_Man_Entrances)
dw_doors = list(DW_Entrances + DW_Dungeon_Entrances + DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit) + ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)']
random.shuffle(lw_doors)
random.shuffle(dw_doors)
dw_entrances_must_exits.append('Skull Woods Second Section Door (West)')
dw_entrances.append('Skull Woods Second Section Door (East)')
dw_entrances.append('Skull Woods First Section Door')
lw_entrances.extend(['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave'])
lw_entrances_must_exits = list(LW_Dungeon_Entrances_Must_Exit)
old_man_entrances = list(Old_Man_Entrances) + ['Tower of Hera']
mandatory_light_world = ['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)']
mandatory_dark_world = []
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits)
# shuffle up holes
lw_hole_entrances = ['Kakariko Well Drop', 'Bat Cave Drop', 'North Fairy Cave Drop', 'Lost Woods Hideout Drop', 'Lumberjack Tree Tree', 'Sanctuary Grave']
dw_hole_entrances = ['Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole']
hole_targets = [('Kakariko Well Exit', 'Kakariko Well (top)'),
('Bat Cave Exit', 'Bat Cave (right)'),
('North Fairy Cave Exit', 'North Fairy Cave'),
('Lost Woods Hideout Exit', 'Lost Woods Hideout (top)'),
('Lumberjack Tree Exit', 'Lumberjack Tree (top)'),
(('Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)'), 'Skull Back Drop')]
if world.mode[player] == 'standard':
# cannot move uncle cave
connect_entrance(world, 'Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance', player)
connect_exit(world, 'Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance Stairs', player)
connect_entrance(world, 'Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Exit', player)
else:
lw_hole_entrances.append('Hyrule Castle Secret Entrance Drop')
hole_targets.append(('Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance'))
lw_doors.append('Hyrule Castle Secret Entrance Stairs')
lw_entrances.append('Hyrule Castle Secret Entrance Stairs')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
connect_two_way(world, 'Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Pyramid Hole', 'Pyramid', player)
else:
dw_entrances.append('Ganons Tower')
caves.append('Ganons Tower Exit')
dw_hole_entrances.append('Pyramid Hole')
hole_targets.append(('Pyramid Exit', 'Pyramid'))
dw_entrances_must_exits.append('Pyramid Entrance')
dw_doors.extend(['Ganons Tower', 'Pyramid Entrance'])
random.shuffle(lw_hole_entrances)
random.shuffle(dw_hole_entrances)
random.shuffle(hole_targets)
# decide if skull woods first section should be in light or dark world
sw_light = random.randint(0, 1) == 0
if sw_light:
sw_hole_pool = lw_hole_entrances
mandatory_light_world.append('Skull Woods First Section Exit')
else:
sw_hole_pool = dw_hole_entrances
mandatory_dark_world.append('Skull Woods First Section Exit')
for target in ['Skull Left Drop', 'Skull Pinball', 'Skull Pot Circle']:
connect_entrance(world, sw_hole_pool.pop(), target, player)
# sanctuary has to be in light world
connect_entrance(world, lw_hole_entrances.pop(), 'Sewer Drop', player)
mandatory_light_world.append('Sanctuary Exit')
# fill up remaining holes
for hole in dw_hole_entrances:
exits, target = hole_targets.pop()
mandatory_dark_world.append(exits)
connect_entrance(world, hole, target, player)
for hole in lw_hole_entrances:
exits, target = hole_targets.pop()
mandatory_light_world.append(exits)
connect_entrance(world, hole, target, player)
# hyrule castle handling
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_entrance(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_exit(world, 'Hyrule Castle Exit (South)', 'Hyrule Castle Entrance (South)', player)
mandatory_light_world.append(('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
else:
lw_doors.append('Hyrule Castle Entrance (South)')
lw_entrances.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock and Spectracle Rock cave have two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise RuntimeError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, general, worldspecific, worldoors):
# select which one is the primary option
if random.randint(0, 1) == 0:
primary = general
secondary = worldspecific
else:
primary = worldspecific
secondary = general
try:
cave = extract_reachable_exit(primary)
except RuntimeError:
cave = extract_reachable_exit(secondary)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, worldoors.pop(), exit, player)
# rest of cave now is forced to be in this world
worldspecific.append(cave)
# we randomize which world requirements we fulfill first so we get better dungeon distribution
if random.randint(0, 1) == 0:
for entrance in lw_entrances_must_exits:
connect_reachable_exit(entrance, caves, mandatory_light_world, lw_doors)
for entrance in dw_entrances_must_exits:
connect_reachable_exit(entrance, caves, mandatory_dark_world, dw_doors)
else:
for entrance in dw_entrances_must_exits:
connect_reachable_exit(entrance, caves, mandatory_dark_world, dw_doors)
for entrance in lw_entrances_must_exits:
connect_reachable_exit(entrance, caves, mandatory_light_world, lw_doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in lw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
lw_entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, lw_doors.pop(), 'Old Man Cave Exit (East)', player)
mandatory_light_world.append('Old Man Cave Exit (West)')
# we connect up the mandatory associations we have found
for mandatory in mandatory_light_world:
if not isinstance(mandatory, tuple):
mandatory = (mandatory,)
for exit in mandatory:
# point out somewhere
connect_exit(world, exit, lw_entrances.pop(), player)
# point in from somewhere
connect_entrance(world, lw_doors.pop(), exit, player)
for mandatory in mandatory_dark_world:
if not isinstance(mandatory, tuple):
mandatory = (mandatory,)
for exit in mandatory:
# point out somewhere
connect_exit(world, exit, dw_entrances.pop(), player)
# point in from somewhere
connect_entrance(world, dw_doors.pop(), exit, player)
# handle remaining caves
while caves:
# connect highest exit count caves first, prevent issue where we have 2 or 3 exits accross worlds left to fill
cave_candidate = (None, 0)
for i, cave in enumerate(caves):
if isinstance(cave, str):
cave = (cave,)
if len(cave) > cave_candidate[1]:
cave_candidate = (i, len(cave))
cave = caves.pop(cave_candidate[0])
place_lightworld = random.randint(0, 1) == 0
if place_lightworld:
target_doors = lw_doors
target_entrances = lw_entrances
else:
target_doors = dw_doors
target_entrances = dw_entrances
if isinstance(cave, str):
cave = (cave,)
# check if we can still fit the cave into our target group
if len(target_doors) < len(cave):
if not place_lightworld:
target_doors = lw_doors
target_entrances = lw_entrances
else:
target_doors = dw_doors
target_entrances = dw_entrances
for exit in cave:
connect_exit(world, exit, target_entrances.pop(), player)
connect_entrance(world, target_doors.pop(), exit, player)
# handle simple doors
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# place blacksmith, has limited options
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place dam and pyramid fairy, have limited options
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'insanity':
# beware ye who enter here
entrances = LW_Entrances + LW_Dungeon_Entrances + DW_Entrances + DW_Dungeon_Entrances + Old_Man_Entrances + ['Skull Woods Second Section Door (East)', 'Skull Woods First Section Door', 'Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave']
entrances_must_exits = DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + LW_Dungeon_Entrances_Must_Exit + ['Skull Woods Second Section Door (West)']
doors = LW_Entrances + LW_Dungeon_Entrances + LW_Dungeon_Entrances_Must_Exit + ['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave'] + Old_Man_Entrances +\
DW_Entrances + DW_Dungeon_Entrances + DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)'] +\
LW_Single_Cave_Doors + DW_Single_Cave_Doors
# TODO: there are other possible entrances we could support here by way of exiting from a connector,
# and rentering to find bomb shop. However appended list here is all those that we currently have
# bomb shop logic for.
# Specifically we could potentially add: 'Dark Death Mountain Ledge (East)' and doors associated with pits
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors + Bomb_Shop_Multi_Cave_Doors+['Desert Palace Entrance (East)', 'Turtle Rock Isolated Ledge Entrance', 'Bumper Cave (Top)', 'Hookshot Cave Back Entrance'])
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Single_Cave_Targets)
random.shuffle(doors)
old_man_entrances = list(Old_Man_Entrances) + ['Tower of Hera']
caves = Cave_Exits + Dungeon_Exits + Cave_Three_Exits + ['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)', 'Skull Woods First Section Exit', 'Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)',
'Kakariko Well Exit', 'Bat Cave Exit', 'North Fairy Cave Exit', 'Lost Woods Hideout Exit', 'Lumberjack Tree Exit', 'Sanctuary Exit']
# shuffle up holes
hole_entrances = ['Kakariko Well Drop', 'Bat Cave Drop', 'North Fairy Cave Drop', 'Lost Woods Hideout Drop', 'Lumberjack Tree Tree', 'Sanctuary Grave',
'Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole']
hole_targets = ['Kakariko Well (top)', 'Bat Cave (right)', 'North Fairy Cave', 'Lost Woods Hideout (top)', 'Lumberjack Tree (top)', 'Sewer Drop', 'Skull Back Drop',
'Skull Left Drop', 'Skull Pinball', 'Skull Pot Circle']
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
if world.mode[player] == 'standard':
# cannot move uncle cave
connect_entrance(world, 'Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance', player)
connect_exit(world, 'Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance Stairs', player)
connect_entrance(world, 'Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Exit', player)
else:
hole_entrances.append('Hyrule Castle Secret Entrance Drop')
hole_targets.append('Hyrule Castle Secret Entrance')
doors.append('Hyrule Castle Secret Entrance Stairs')
entrances.append('Hyrule Castle Secret Entrance Stairs')
caves.append('Hyrule Castle Secret Entrance Exit')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
connect_two_way(world, 'Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Pyramid Hole', 'Pyramid', player)
else:
entrances.append('Ganons Tower')
caves.extend(['Ganons Tower Exit', 'Pyramid Exit'])
hole_entrances.append('Pyramid Hole')
hole_targets.append('Pyramid')
entrances_must_exits.append('Pyramid Entrance')
doors.extend(['Ganons Tower', 'Pyramid Entrance'])
random.shuffle(hole_entrances)
random.shuffle(hole_targets)
random.shuffle(entrances)
# fill up holes
for hole in hole_entrances:
connect_entrance(world, hole, hole_targets.pop(), player)
# hyrule castle handling
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_entrance(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_exit(world, 'Hyrule Castle Exit (South)', 'Hyrule Castle Entrance (South)', player)
caves.append(('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
else:
doors.append('Hyrule Castle Entrance (South)')
entrances.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock has two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise RuntimeError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, caves, doors):
cave = extract_reachable_exit(caves)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, doors.pop(), exit, player)
# rest of cave now is forced to be in this world
caves.append(cave)
# connect mandatory exits
for entrance in entrances_must_exits:
connect_reachable_exit(entrance, caves, doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, doors.pop(), 'Old Man Cave Exit (East)', player)
caves.append('Old Man Cave Exit (West)')
# place blacksmith, has limited options
blacksmith_doors = [door for door in blacksmith_doors if door in doors]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
doors.remove(blacksmith_hut)
# place dam and pyramid fairy, have limited options
bomb_shop_doors = [door for door in bomb_shop_doors if door in doors]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
doors.remove(bomb_shop)
# handle remaining caves
for cave in caves:
if isinstance(cave, str):
cave = (cave,)
for exit in cave:
connect_exit(world, exit, entrances.pop(), player)
connect_entrance(world, doors.pop(), exit, player)
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'insanity_legacy':
world.fix_fake_world[player] = False
# beware ye who enter here
entrances = LW_Entrances + LW_Dungeon_Entrances + DW_Entrances + DW_Dungeon_Entrances + Old_Man_Entrances + ['Skull Woods Second Section Door (East)', 'Skull Woods First Section Door', 'Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave']
entrances_must_exits = DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + LW_Dungeon_Entrances_Must_Exit + ['Skull Woods Second Section Door (West)']
doors = LW_Entrances + LW_Dungeon_Entrances + LW_Dungeon_Entrances_Must_Exit + ['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave'] + Old_Man_Entrances +\
DW_Entrances + DW_Dungeon_Entrances + DW_Entrances_Must_Exit + DW_Dungeon_Entrances_Must_Exit + ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)']
random.shuffle(doors)
old_man_entrances = list(Old_Man_Entrances) + ['Tower of Hera']
caves = Cave_Exits + Dungeon_Exits + Cave_Three_Exits + ['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)', 'Skull Woods First Section Exit', 'Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)',
'Kakariko Well Exit', 'Bat Cave Exit', 'North Fairy Cave Exit', 'Lost Woods Hideout Exit', 'Lumberjack Tree Exit', 'Sanctuary Exit']
# shuffle up holes
hole_entrances = ['Kakariko Well Drop', 'Bat Cave Drop', 'North Fairy Cave Drop', 'Lost Woods Hideout Drop', 'Lumberjack Tree Tree', 'Sanctuary Grave',
'Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole']
hole_targets = ['Kakariko Well (top)', 'Bat Cave (right)', 'North Fairy Cave', 'Lost Woods Hideout (top)', 'Lumberjack Tree (top)', 'Sewer Drop', 'Skull Back Drop',
'Skull Left Drop', 'Skull Pinball', 'Skull Pot Circle']
if world.mode[player] == 'standard':
# cannot move uncle cave
connect_entrance(world, 'Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance', player)
connect_exit(world, 'Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance Stairs', player)
connect_entrance(world, 'Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Exit', player)
else:
hole_entrances.append('Hyrule Castle Secret Entrance Drop')
hole_targets.append('Hyrule Castle Secret Entrance')
doors.append('Hyrule Castle Secret Entrance Stairs')
entrances.append('Hyrule Castle Secret Entrance Stairs')
caves.append('Hyrule Castle Secret Entrance Exit')
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
connect_two_way(world, 'Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Pyramid Hole', 'Pyramid', player)
else:
entrances.append('Ganons Tower')
caves.extend(['Ganons Tower Exit', 'Pyramid Exit'])
hole_entrances.append('Pyramid Hole')
hole_targets.append('Pyramid')
entrances_must_exits.append('Pyramid Entrance')
doors.extend(['Ganons Tower', 'Pyramid Entrance'])
random.shuffle(hole_entrances)
random.shuffle(hole_targets)
random.shuffle(entrances)
# fill up holes
for hole in hole_entrances:
connect_entrance(world, hole, hole_targets.pop(), player)
# hyrule castle handling
if world.mode[player] == 'standard':
# must connect front of hyrule castle to do escape
connect_entrance(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_exit(world, 'Hyrule Castle Exit (South)', 'Hyrule Castle Entrance (South)', player)
caves.append(('Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
else:
doors.append('Hyrule Castle Entrance (South)')
entrances.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock has two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise RuntimeError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, caves, doors):
cave = extract_reachable_exit(caves)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, doors.pop(), exit, player)
# rest of cave now is forced to be in this world
caves.append(cave)
# connect mandatory exits
for entrance in entrances_must_exits:
connect_reachable_exit(entrance, caves, doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, doors.pop(), 'Old Man Cave Exit (East)', player)
caves.append('Old Man Cave Exit (West)')
# handle remaining caves
for cave in caves:
if isinstance(cave, str):
cave = (cave,)
for exit in cave:
connect_exit(world, exit, entrances.pop(), player)
connect_entrance(world, doors.pop(), exit, player)
# handle simple doors
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Single_Cave_Targets)
# place blacksmith, has limited options
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place dam and pyramid fairy, have limited options
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
else:
raise NotImplementedError('Shuffling not supported yet')
# check for swamp palace fix
if world.get_entrance('Dam', player).connected_region.name != 'Dam' or world.get_entrance('Swamp Palace', player).connected_region.name != 'Swamp Lobby':
world.swamp_patch_required[player] = True
# check for potion shop location
if world.get_entrance('Potion Shop', player).connected_region.name != 'Potion Shop':
world.powder_patch_required[player] = True
# check for ganon location
if world.get_entrance('Pyramid Hole', player).connected_region.name != 'Pyramid':
world.ganon_at_pyramid[player] = False
# check for Ganon's Tower location
if world.get_entrance('Ganons Tower', player).connected_region.name != 'GT Lobby':
world.ganonstower_vanilla[player] = False
def link_inverted_entrances(world, player):
# Link's house shuffled freely, Houlihan set in mandatory_connections
Dungeon_Exits = Inverted_Dungeon_Exits_Base.copy()
Cave_Exits = Cave_Exits_Base.copy()
Old_Man_House = Old_Man_House_Base.copy()
Cave_Three_Exits = Cave_Three_Exits_Base.copy()
unbias_some_entrances(Dungeon_Exits, Cave_Exits, Old_Man_House, Cave_Three_Exits)
# setup mandatory connections
for exitname, regionname in inverted_mandatory_connections:
connect_simple(world, exitname, regionname, player)
# if we do not shuffle, set default connections
if world.shuffle[player] == 'vanilla':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
for exitname, regionname in inverted_default_dungeon_connections:
connect_simple(world, exitname, regionname, player)
elif world.shuffle[player] == 'dungeonssimple':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
simple_shuffle_dungeons(world, player)
elif world.shuffle[player] == 'dungeonsfull':
for exitname, regionname in inverted_default_connections:
connect_simple(world, exitname, regionname, player)
skull_woods_shuffle(world, player)
dungeon_exits = list(Dungeon_Exits)
lw_entrances = list(Inverted_LW_Dungeon_Entrances)
lw_dungeon_entrances_must_exit = list(Inverted_LW_Dungeon_Entrances_Must_Exit)
dw_entrances = list(Inverted_DW_Dungeon_Entrances)
# randomize which desert ledge door is a must-exit
if random.randint(0, 1) == 0:
lw_dungeon_entrances_must_exit.append('Desert Palace Entrance (North)')
dp_must_exit = 'Desert Palace Entrance (North)'
lw_entrances.append('Desert Palace Entrance (West)')
else:
lw_dungeon_entrances_must_exit.append('Desert Palace Entrance (West)')
dp_must_exit = 'Desert Palace Entrance (West)'
lw_entrances.append('Desert Palace Entrance (North)')
dungeon_exits.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)']
else:
lw_entrances.append('Inverted Ganons Tower')
dungeon_exits.append('Inverted Ganons Tower Exit')
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)', 'Inverted Ganons Tower']
# shuffle aga door first. If it's on HC ledge, remaining HC ledge door must be must-exit
all_entrances_aga = lw_entrances + dw_entrances
aga_doors = [i for i in all_entrances_aga]
random.shuffle(aga_doors)
aga_door = aga_doors.pop()
if aga_door in hc_ledge_entrances:
lw_entrances.remove(aga_door)
hc_ledge_entrances.remove(aga_door)
random.shuffle(hc_ledge_entrances)
hc_ledge_must_exit = hc_ledge_entrances.pop()
lw_entrances.remove(hc_ledge_must_exit)
lw_dungeon_entrances_must_exit.append(hc_ledge_must_exit)
if aga_door in lw_entrances:
lw_entrances.remove(aga_door)
elif aga_door in dw_entrances:
dw_entrances.remove(aga_door)
connect_two_way(world, aga_door, 'Inverted Agahnims Tower Exit', player)
dungeon_exits.remove('Inverted Agahnims Tower Exit')
all_dungeon_entrances = dw_entrances + lw_entrances
connect_mandatory_exits(world, all_dungeon_entrances, dungeon_exits, lw_dungeon_entrances_must_exit, player, dp_must_exit)
remaining_dw_entrances = [i for i in all_dungeon_entrances if i in dw_entrances]
remaining_lw_entrances = [i for i in all_dungeon_entrances if i in lw_entrances]
connect_caves(world, remaining_lw_entrances, remaining_dw_entrances, dungeon_exits, player)
elif world.shuffle[player] == 'simple':
simple_shuffle_dungeons(world, player)
old_man_entrances = list(Inverted_Old_Man_Entrances)
caves = list(Cave_Exits)
three_exit_caves = list(Cave_Three_Exits)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
# we shuffle all 2 entrance caves as pairs as a start
# start with the ones that need to be directed
two_door_caves = list(Inverted_Two_Door_Caves_Directional)
random.shuffle(two_door_caves)
random.shuffle(caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# now the remaining pairs
two_door_caves = list(Inverted_Two_Door_Caves)
random.shuffle(two_door_caves)
while two_door_caves:
entrance1, entrance2 = two_door_caves.pop()
exit1, exit2 = caves.pop()
connect_two_way(world, entrance1, exit1, player)
connect_two_way(world, entrance2, exit2, player)
# place links house
links_house_doors = [i for i in bomb_shop_doors + blacksmith_doors if i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in bomb_shop_doors:
bomb_shop_doors.remove(links_house)
if links_house in blacksmith_doors:
blacksmith_doors.remove(links_house)
if links_house in old_man_entrances:
old_man_entrances.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in bomb_shop_doors]
sanc_door = random.choice(sanc_doors)
bomb_shop_doors.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
lw_dm_entrances = ['Paradox Cave (Bottom)', 'Paradox Cave (Middle)', 'Paradox Cave (Top)', 'Old Man House (Bottom)',
'Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Top)', 'Spiral Cave (Bottom)', 'Old Man Cave (East)',
'Death Mountain Return Cave (East)', 'Spiral Cave', 'Old Man House (Top)', 'Spectacle Rock Cave',
'Spectacle Rock Cave Peak', 'Spectacle Rock Cave (Bottom)']
# place old man, bumper cave bottom to DDM entrances not in east bottom
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, 'Bumper Cave (Bottom)', 'Old Man Cave Exit (West)', player)
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
if old_man_exit == 'Spike Cave':
bomb_shop_doors.remove('Spike Cave')
bomb_shop_doors.extend(old_man_entrances)
# add old man house to ensure it is always somewhere on light death mountain
caves.extend(list(Old_Man_House))
caves.extend(list(three_exit_caves))
# connect rest
connect_caves(world, lw_dm_entrances, [], caves, player)
# scramble holes
scramble_inverted_holes(world, player)
# place blacksmith, has limited options
blacksmith_doors = [door for door in blacksmith_doors[:]]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
bomb_shop_doors = [door for door in bomb_shop_doors[:]]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
single_doors.extend(bomb_shop_doors)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place remaining doors
connect_doors(world, single_doors, door_targets, player)
elif world.shuffle[player] == 'restricted':
simple_shuffle_dungeons(world, player)
lw_entrances = list(Inverted_LW_Entrances + Inverted_LW_Single_Cave_Doors)
dw_entrances = list(Inverted_DW_Entrances + Inverted_DW_Single_Cave_Doors + Inverted_Old_Man_Entrances)
lw_must_exits = list(Inverted_LW_Entrances_Must_Exit)
old_man_entrances = list(Inverted_Old_Man_Entrances)
caves = list(Cave_Exits + Cave_Three_Exits + Old_Man_House)
single_doors = list(Single_Cave_Doors)
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
# place links house
links_house_doors = [i for i in lw_entrances + dw_entrances + lw_must_exits if i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in lw_entrances:
lw_entrances.remove(links_house)
elif links_house in dw_entrances:
dw_entrances.remove(links_house)
elif links_house in lw_must_exits:
lw_must_exits.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in dw_entrances]
sanc_door = random.choice(sanc_doors)
dw_entrances.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
# place must exits
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in dw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
dw_entrances.remove(old_man_exit)
# place blacksmith, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in all_entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
if blacksmith_hut in lw_entrances:
lw_entrances.remove(blacksmith_hut)
if blacksmith_hut in dw_entrances:
dw_entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in all_entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
if bomb_shop in lw_entrances:
lw_entrances.remove(bomb_shop)
if bomb_shop in dw_entrances:
dw_entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere in the dark world
random.shuffle(dw_entrances)
old_man_entrance = dw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_inverted_holes(world, player)
doors = lw_entrances + dw_entrances
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'full':
skull_woods_shuffle(world, player)
lw_entrances = list(Inverted_LW_Entrances + Inverted_LW_Dungeon_Entrances + Inverted_LW_Single_Cave_Doors)
dw_entrances = list(Inverted_DW_Entrances + Inverted_DW_Dungeon_Entrances + Inverted_DW_Single_Cave_Doors + Inverted_Old_Man_Entrances)
lw_must_exits = list(Inverted_LW_Dungeon_Entrances_Must_Exit + Inverted_LW_Entrances_Must_Exit)
old_man_entrances = list(Inverted_Old_Man_Entrances + Old_Man_Entrances + ['Inverted Agahnims Tower', 'Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits) # don't need to consider three exit caves, have one exit caves to avoid parity issues
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
old_man_house = list(Old_Man_House)
# randomize which desert ledge door is a must-exit
if random.randint(0, 1) == 0:
lw_must_exits.append('Desert Palace Entrance (North)')
dp_must_exit = 'Desert Palace Entrance (North)'
lw_entrances.append('Desert Palace Entrance (West)')
else:
lw_must_exits.append('Desert Palace Entrance (West)')
dp_must_exit = 'Desert Palace Entrance (West)'
lw_entrances.append('Desert Palace Entrance (North)')
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
caves.append(tuple(random.sample(['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'],3)))
lw_entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)']
else:
lw_entrances.append('Inverted Ganons Tower')
caves.append('Inverted Ganons Tower Exit')
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)', 'Inverted Ganons Tower']
# shuffle aga door first. if it's on hc ledge, then one other hc ledge door has to be must_exit
all_entrances_aga = lw_entrances + dw_entrances
aga_doors = [i for i in all_entrances_aga]
random.shuffle(aga_doors)
aga_door = aga_doors.pop()
if aga_door in hc_ledge_entrances:
lw_entrances.remove(aga_door)
hc_ledge_entrances.remove(aga_door)
random.shuffle(hc_ledge_entrances)
hc_ledge_must_exit = hc_ledge_entrances.pop()
lw_entrances.remove(hc_ledge_must_exit)
lw_must_exits.append(hc_ledge_must_exit)
if aga_door in lw_entrances:
lw_entrances.remove(aga_door)
elif aga_door in dw_entrances:
dw_entrances.remove(aga_door)
connect_two_way(world, aga_door, 'Inverted Agahnims Tower Exit', player)
caves.remove('Inverted Agahnims Tower Exit')
# place links house
links_house_doors = [i for i in lw_entrances + dw_entrances + lw_must_exits if i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in lw_entrances:
lw_entrances.remove(links_house)
if links_house in dw_entrances:
dw_entrances.remove(links_house)
if links_house in lw_must_exits:
lw_must_exits.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in dw_entrances]
sanc_door = random.choice(sanc_doors)
dw_entrances.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# place old man house
# no dw must exits in inverted, but we randomize whether cave is in light or dark world
if random.randint(0, 1) == 0:
caves += old_man_house
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player, dp_must_exit)
try:
caves.remove(old_man_house[0])
except ValueError:
pass
else: #if the cave wasn't placed we get here
connect_caves(world, lw_entrances, [], old_man_house, player)
else:
connect_caves(world, dw_entrances, [], old_man_house, player)
connect_mandatory_exits(world, lw_entrances, caves, lw_must_exits, player, dp_must_exit)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in dw_entrances + lw_entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
if old_man_exit in dw_entrances:
dw_entrances.remove(old_man_exit)
old_man_world = 'dark'
elif old_man_exit in lw_entrances:
lw_entrances.remove(old_man_exit)
old_man_world = 'light'
# place blacksmith, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in all_entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
if blacksmith_hut in lw_entrances:
lw_entrances.remove(blacksmith_hut)
if blacksmith_hut in dw_entrances:
dw_entrances.remove(blacksmith_hut)
bomb_shop_doors.extend(blacksmith_doors)
# place bomb shop, has limited options
all_entrances = lw_entrances + dw_entrances
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in all_entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
if bomb_shop in lw_entrances:
lw_entrances.remove(bomb_shop)
if bomb_shop in dw_entrances:
dw_entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere in the same world he'll exit from
if old_man_world == 'light':
random.shuffle(lw_entrances)
old_man_entrance = lw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
elif old_man_world == 'dark':
random.shuffle(dw_entrances)
old_man_entrance = dw_entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, lw_entrances, dw_entrances, caves, player)
# scramble holes
scramble_inverted_holes(world, player)
doors = lw_entrances + dw_entrances
# place remaining doors
connect_doors(world, doors, door_targets, player)
elif world.shuffle[player] == 'crossed':
skull_woods_shuffle(world, player)
entrances = list(Inverted_LW_Entrances + Inverted_LW_Dungeon_Entrances + Inverted_LW_Single_Cave_Doors + Inverted_Old_Man_Entrances + Inverted_DW_Entrances + Inverted_DW_Dungeon_Entrances + Inverted_DW_Single_Cave_Doors)
must_exits = list(Inverted_LW_Entrances_Must_Exit + Inverted_LW_Dungeon_Entrances_Must_Exit)
old_man_entrances = list(Inverted_Old_Man_Entrances + Old_Man_Entrances + ['Inverted Agahnims Tower', 'Tower of Hera'])
caves = list(Cave_Exits + Dungeon_Exits + Cave_Three_Exits + Old_Man_House) # don't need to consider three exit caves, have one exit caves to avoid parity issues
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors)
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
# randomize which desert ledge door is a must-exit
if random.randint(0, 1) == 0:
must_exits.append('Desert Palace Entrance (North)')
dp_must_exit = 'Desert Palace Entrance (North)'
entrances.append('Desert Palace Entrance (West)')
else:
must_exits.append('Desert Palace Entrance (West)')
dp_must_exit = 'Desert Palace Entrance (West)'
entrances.append('Desert Palace Entrance (North)')
caves.append(tuple(random.sample(['Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'],3)))
entrances.append('Hyrule Castle Entrance (South)')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)']
else:
entrances.append('Inverted Ganons Tower')
caves.append('Inverted Ganons Tower Exit')
hc_ledge_entrances = ['Hyrule Castle Entrance (West)', 'Hyrule Castle Entrance (East)', 'Inverted Ganons Tower']
# shuffle aga door. if it's on hc ledge, then one other hc ledge door has to be must_exit
aga_door = random.choice(list(entrances))
if aga_door in hc_ledge_entrances:
hc_ledge_entrances.remove(aga_door)
random.shuffle(hc_ledge_entrances)
hc_ledge_must_exit = hc_ledge_entrances.pop()
entrances.remove(hc_ledge_must_exit)
must_exits.append(hc_ledge_must_exit)
entrances.remove(aga_door)
connect_two_way(world, aga_door, 'Inverted Agahnims Tower Exit', player)
caves.remove('Inverted Agahnims Tower Exit')
# place links house
links_house_doors = [i for i in entrances + must_exits if i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in entrances:
entrances.remove(links_house)
elif links_house in must_exits:
must_exits.remove(links_house)
# place dark sanc
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in entrances]
sanc_door = random.choice(sanc_doors)
entrances.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
#place must-exit caves
connect_mandatory_exits(world, entrances, caves, must_exits, player, dp_must_exit)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [door for door in old_man_entrances if door in entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
connect_two_way(world, old_man_exit, 'Old Man Cave Exit (East)', player)
entrances.remove(old_man_exit)
# place blacksmith, has limited options
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
blacksmith_doors = [door for door in blacksmith_doors if door in entrances]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
entrances.remove(blacksmith_hut)
# place bomb shop, has limited options
# cannot place it anywhere already taken (or that are otherwise not eligible for placement)
bomb_shop_doors = [door for door in bomb_shop_doors if door in entrances]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
entrances.remove(bomb_shop)
# place the old man cave's entrance somewhere
random.shuffle(entrances)
old_man_entrance = entrances.pop()
connect_two_way(world, old_man_entrance, 'Old Man Cave Exit (West)', player)
# now scramble the rest
connect_caves(world, entrances, [], caves, player)
# scramble holes
scramble_inverted_holes(world, player)
# place remaining doors
connect_doors(world, entrances, door_targets, player)
elif world.shuffle[player] == 'insanity':
# beware ye who enter here
entrances = Inverted_LW_Entrances + Inverted_LW_Dungeon_Entrances + Inverted_DW_Entrances + Inverted_DW_Dungeon_Entrances + Inverted_Old_Man_Entrances + Old_Man_Entrances + ['Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)', 'Skull Woods First Section Door', 'Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave', 'Hyrule Castle Entrance (South)']
entrances_must_exits = Inverted_LW_Entrances_Must_Exit + Inverted_LW_Dungeon_Entrances_Must_Exit
doors = Inverted_LW_Entrances + Inverted_LW_Dungeon_Entrances + Inverted_LW_Entrances_Must_Exit + Inverted_LW_Dungeon_Entrances_Must_Exit + ['Kakariko Well Cave', 'Bat Cave Cave', 'North Fairy Cave', 'Sanctuary', 'Lost Woods Hideout Stump', 'Lumberjack Tree Cave', 'Hyrule Castle Secret Entrance Stairs'] + Inverted_Old_Man_Entrances +\
Inverted_DW_Entrances + Inverted_DW_Dungeon_Entrances + ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)'] +\
Inverted_LW_Single_Cave_Doors + Inverted_DW_Single_Cave_Doors + ['Desert Palace Entrance (West)', 'Desert Palace Entrance (North)']
# randomize which desert ledge door is a must-exit
if random.randint(0, 1) == 0:
entrances_must_exits.append('Desert Palace Entrance (North)')
entrances.append('Desert Palace Entrance (West)')
else:
entrances_must_exits.append('Desert Palace Entrance (West)')
entrances.append('Desert Palace Entrance (North)')
# TODO: there are other possible entrances we could support here by way of exiting from a connector,
# and rentering to find bomb shop. However appended list here is all those that we currently have
# bomb shop logic for.
# Specifically we could potentially add: 'Dark Death Mountain Ledge (East)' and doors associated with pits
bomb_shop_doors = list(Inverted_Bomb_Shop_Single_Cave_Doors + Inverted_Bomb_Shop_Multi_Cave_Doors + ['Turtle Rock Isolated Ledge Entrance', 'Bumper Cave (Top)', 'Hookshot Cave Back Entrance'])
blacksmith_doors = list(Blacksmith_Single_Cave_Doors + Blacksmith_Multi_Cave_Doors)
door_targets = list(Inverted_Single_Cave_Targets)
random.shuffle(doors)
old_man_entrances = list(Inverted_Old_Man_Entrances + Old_Man_Entrances) + ['Tower of Hera', 'Inverted Agahnims Tower']
caves = Cave_Exits + Dungeon_Exits + Cave_Three_Exits + ['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)', 'Skull Woods First Section Exit', 'Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)',
'Kakariko Well Exit', 'Bat Cave Exit', 'North Fairy Cave Exit', 'Lost Woods Hideout Exit', 'Lumberjack Tree Exit', 'Sanctuary Exit']
# shuffle up holes
hole_entrances = ['Kakariko Well Drop', 'Bat Cave Drop', 'North Fairy Cave Drop', 'Lost Woods Hideout Drop', 'Lumberjack Tree Tree', 'Sanctuary Grave',
'Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole']
hole_targets = ['Kakariko Well (top)', 'Bat Cave (right)', 'North Fairy Cave', 'Lost Woods Hideout (top)', 'Lumberjack Tree (top)', 'Sewer Drop', 'Skull Back Drop',
'Skull Left Drop', 'Skull Pinball', 'Skull Pot Circle']
# tavern back door cannot be shuffled yet
connect_doors(world, ['Tavern North'], ['Tavern'], player)
hole_entrances.append('Hyrule Castle Secret Entrance Drop')
hole_targets.append('Hyrule Castle Secret Entrance')
entrances.append('Hyrule Castle Secret Entrance Stairs')
caves.append('Hyrule Castle Secret Entrance Exit')
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
connect_two_way(world, 'Inverted Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Inverted Pyramid Hole', 'Pyramid', player)
else:
entrances.append('Inverted Ganons Tower')
caves.extend(['Inverted Ganons Tower Exit', 'Pyramid Exit'])
hole_entrances.append('Inverted Pyramid Hole')
hole_targets.append('Pyramid')
doors.extend(['Inverted Ganons Tower', 'Inverted Pyramid Entrance'])
random.shuffle(hole_entrances)
random.shuffle(hole_targets)
random.shuffle(entrances)
# fill up holes
for hole in hole_entrances:
connect_entrance(world, hole, hole_targets.pop(), player)
doors.append('Hyrule Castle Entrance (South)')
caves.append(('Hyrule Castle Exit (South)', 'Hyrule Castle Exit (West)', 'Hyrule Castle Exit (East)'))
# place links house and dark sanc
links_house_doors = [i for i in entrances + entrances_must_exits if i not in Inverted_Dark_Sanctuary_Doors + Isolated_LH_Doors]
links_house = random.choice(list(links_house_doors))
connect_two_way(world, links_house, 'Inverted Links House Exit', player)
if links_house in entrances:
entrances.remove(links_house)
elif links_house in entrances_must_exits:
entrances_must_exits.remove(links_house)
doors.remove(links_house)
sanc_doors = [door for door in Inverted_Dark_Sanctuary_Doors if door in entrances]
sanc_door = random.choice(sanc_doors)
entrances.remove(sanc_door)
doors.remove(sanc_door)
connect_entrance(world, sanc_door, 'Inverted Dark Sanctuary', player)
world.get_entrance('Inverted Dark Sanctuary Exit', player).connect(world.get_entrance(sanc_door, player).parent_region)
# now let's deal with mandatory reachable stuff
def extract_reachable_exit(cavelist):
random.shuffle(cavelist)
candidate = None
for cave in cavelist:
if isinstance(cave, tuple) and len(cave) > 1:
# special handling: TRock has two entries that we should consider entrance only
# ToDo this should be handled in a more sensible manner
if cave[0] in ['Turtle Rock Exit (Front)', 'Spectacle Rock Cave Exit (Peak)'] and len(cave) == 2:
continue
candidate = cave
break
if candidate is None:
raise RuntimeError('No suitable cave.')
cavelist.remove(candidate)
return candidate
def connect_reachable_exit(entrance, caves, doors):
cave = extract_reachable_exit(caves)
exit = cave[-1]
cave = cave[:-1]
connect_exit(world, exit, entrance, player)
connect_entrance(world, doors.pop(), exit, player)
# rest of cave now is forced to be in this world
caves.append(cave)
# connect mandatory exits
for entrance in entrances_must_exits:
connect_reachable_exit(entrance, caves, doors)
# place old man, has limited options
# exit has to come from specific set of doors, the entrance is free to move about
old_man_entrances = [entrance for entrance in old_man_entrances if entrance in entrances]
random.shuffle(old_man_entrances)
old_man_exit = old_man_entrances.pop()
entrances.remove(old_man_exit)
connect_exit(world, 'Old Man Cave Exit (East)', old_man_exit, player)
connect_entrance(world, doors.pop(), 'Old Man Cave Exit (East)', player)
caves.append('Old Man Cave Exit (West)')
# place blacksmith, has limited options
blacksmith_doors = [door for door in blacksmith_doors if door in doors]
random.shuffle(blacksmith_doors)
blacksmith_hut = blacksmith_doors.pop()
connect_entrance(world, blacksmith_hut, 'Blacksmiths Hut', player)
doors.remove(blacksmith_hut)
# place dam and pyramid fairy, have limited options
bomb_shop_doors = [door for door in bomb_shop_doors if door in doors]
random.shuffle(bomb_shop_doors)
bomb_shop = bomb_shop_doors.pop()
connect_entrance(world, bomb_shop, 'Inverted Big Bomb Shop', player)
doors.remove(bomb_shop)
# handle remaining caves
for cave in caves:
if isinstance(cave, str):
cave = (cave,)
for exit in cave:
connect_exit(world, exit, entrances.pop(), player)
connect_entrance(world, doors.pop(), exit, player)
# place remaining doors
connect_doors(world, doors, door_targets, player)
else:
raise NotImplementedError('Shuffling not supported yet')
# check for swamp palace fix
if world.get_entrance('Dam', player).connected_region.name != 'Dam' or world.get_entrance('Swamp Palace', player).connected_region.name != 'Swamp Lobby':
world.swamp_patch_required[player] = True
# check for potion shop location
if world.get_entrance('Potion Shop', player).connected_region.name != 'Potion Shop':
world.powder_patch_required[player] = True
# check for ganon location
if world.get_entrance('Inverted Pyramid Hole', player).connected_region.name != 'Pyramid':
world.ganon_at_pyramid[player] = False
# check for Ganon's Tower location
if world.get_entrance('Inverted Ganons Tower', player).connected_region.name != 'GT Lobby':
world.ganonstower_vanilla[player] = False
def connect_simple(world, exitname, regionname, player):
world.get_entrance(exitname, player).connect(world.get_region(regionname, player))
def connect_entrance(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
# check if we got an entrance or a region to connect to
try:
region = world.get_region(exitname, player)
exit = None
except RuntimeError:
exit = world.get_entrance(exitname, player)
region = exit.parent_region
# if this was already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
target = exit_ids[exit.name][0] if exit is not None else exit_ids.get(region.name, None)
addresses = door_addresses[entrance.name][0]
entrance.connect(region, addresses, target)
world.spoiler.set_entrance(entrance.name, exit.name if exit is not None else region.name, 'entrance', player)
def connect_exit(world, exitname, entrancename, player):
entrance = world.get_entrance(entrancename, player)
exit = world.get_entrance(exitname, player)
# if this was already connected somewhere, remove the backreference
if exit.connected_region is not None:
exit.connected_region.entrances.remove(exit)
exit.connect(entrance.parent_region, door_addresses[entrance.name][1], exit_ids[exit.name][1])
world.spoiler.set_entrance(entrance.name, exit.name, 'exit', player)
def connect_two_way(world, entrancename, exitname, player):
entrance = world.get_entrance(entrancename, player)
exit = world.get_entrance(exitname, player)
# if these were already connected somewhere, remove the backreference
if entrance.connected_region is not None:
entrance.connected_region.entrances.remove(entrance)
if exit.connected_region is not None:
exit.connected_region.entrances.remove(exit)
entrance.connect(exit.parent_region, door_addresses[entrance.name][0], exit_ids[exit.name][0])
exit.connect(entrance.parent_region, door_addresses[entrance.name][1], exit_ids[exit.name][1])
world.spoiler.set_entrance(entrance.name, exit.name, 'both', player)
def scramble_holes(world, player):
hole_entrances = [('Kakariko Well Cave', 'Kakariko Well Drop'),
('Bat Cave Cave', 'Bat Cave Drop'),
('North Fairy Cave', 'North Fairy Cave Drop'),
('Lost Woods Hideout Stump', 'Lost Woods Hideout Drop'),
('Lumberjack Tree Cave', 'Lumberjack Tree Tree'),
('Sanctuary', 'Sanctuary Grave')]
hole_targets = [('Kakariko Well Exit', 'Kakariko Well (top)'),
('Bat Cave Exit', 'Bat Cave (right)'),
('North Fairy Cave Exit', 'North Fairy Cave'),
('Lost Woods Hideout Exit', 'Lost Woods Hideout (top)'),
('Lumberjack Tree Exit', 'Lumberjack Tree (top)')]
if not world.shuffle_ganon:
connect_two_way(world, 'Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Pyramid Hole', 'Pyramid', player)
else:
hole_targets.append(('Pyramid Exit', 'Pyramid'))
if world.mode[player] == 'standard':
# cannot move uncle cave
connect_two_way(world, 'Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Exit', player)
connect_entrance(world, 'Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance', player)
else:
hole_entrances.append(('Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Drop'))
hole_targets.append(('Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance'))
# do not shuffle sanctuary into pyramid hole unless shuffle is crossed
if world.shuffle[player] == 'crossed':
hole_targets.append(('Sanctuary Exit', 'Sewer Drop'))
if world.shuffle_ganon:
random.shuffle(hole_targets)
exit, target = hole_targets.pop()
connect_two_way(world, 'Pyramid Entrance', exit, player)
connect_entrance(world, 'Pyramid Hole', target, player)
if world.shuffle[player] != 'crossed':
hole_targets.append(('Sanctuary Exit', 'Sewer Drop'))
random.shuffle(hole_targets)
for entrance, drop in hole_entrances:
exit, target = hole_targets.pop()
connect_two_way(world, entrance, exit, player)
connect_entrance(world, drop, target, player)
def scramble_inverted_holes(world, player):
hole_entrances = [('Kakariko Well Cave', 'Kakariko Well Drop'),
('Bat Cave Cave', 'Bat Cave Drop'),
('North Fairy Cave', 'North Fairy Cave Drop'),
('Lost Woods Hideout Stump', 'Lost Woods Hideout Drop'),
('Lumberjack Tree Cave', 'Lumberjack Tree Tree'),
('Sanctuary', 'Sanctuary Grave')]
hole_targets = [('Kakariko Well Exit', 'Kakariko Well (top)'),
('Bat Cave Exit', 'Bat Cave (right)'),
('North Fairy Cave Exit', 'North Fairy Cave'),
('Lost Woods Hideout Exit', 'Lost Woods Hideout (top)'),
('Lumberjack Tree Exit', 'Lumberjack Tree (top)')]
if not world.shuffle_ganon:
connect_two_way(world, 'Inverted Pyramid Entrance', 'Pyramid Exit', player)
connect_entrance(world, 'Inverted Pyramid Hole', 'Pyramid', player)
else:
hole_targets.append(('Pyramid Exit', 'Pyramid'))
hole_entrances.append(('Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance Drop'))
hole_targets.append(('Hyrule Castle Secret Entrance Exit', 'Hyrule Castle Secret Entrance'))
# do not shuffle sanctuary into pyramid hole unless shuffle is crossed
if world.shuffle[player] == 'crossed':
hole_targets.append(('Sanctuary Exit', 'Sewer Drop'))
if world.shuffle_ganon:
random.shuffle(hole_targets)
exit, target = hole_targets.pop()
connect_two_way(world, 'Inverted Pyramid Entrance', exit, player)
connect_entrance(world, 'Inverted Pyramid Hole', target, player)
if world.shuffle[player] != 'crossed':
hole_targets.append(('Sanctuary Exit', 'Sewer Drop'))
random.shuffle(hole_targets)
for entrance, drop in hole_entrances:
exit, target = hole_targets.pop()
connect_two_way(world, entrance, exit, player)
connect_entrance(world, drop, target, player)
def connect_random(world, exitlist, targetlist, player, two_way=False):
targetlist = list(targetlist)
random.shuffle(targetlist)
for exit, target in zip(exitlist, targetlist):
if two_way:
connect_two_way(world, exit, target, player)
else:
connect_entrance(world, exit, target, player)
def connect_mandatory_exits(world, entrances, caves, must_be_exits, player, dp_must_exit=None):
"""This works inplace"""
random.shuffle(entrances)
random.shuffle(caves)
used_caves = []
while must_be_exits:
exit = must_be_exits.pop()
# find multi exit cave
cave = None
for candidate in caves:
if not isinstance(candidate, str):
cave = candidate
break
if cave is None:
raise RuntimeError('No more caves left. Should not happen!')
# all caves are sorted so that the last exit is always reachable
connect_two_way(world, exit, cave[-1], player)
if len(cave) == 2:
entrance = entrances.pop()
# ToDo Better solution, this is a hot fix. Do not connect both sides of trock/desert ledge only to each other
if world.mode[player] != 'inverted' and entrance == 'Dark Death Mountain Ledge (West)':
new_entrance = entrances.pop()
entrances.append(entrance)
entrance = new_entrance
if world.mode[player] == 'inverted' and entrance == dp_must_exit:
new_entrance = entrances.pop()
entrances.append(entrance)
entrance = new_entrance
connect_two_way(world, entrance, cave[0], player)
elif cave[-1] == 'Spectacle Rock Cave Exit': #Spectacle rock only has one exit
for exit in cave[:-1]:
connect_two_way(world,entrances.pop(),exit, player)
else:#save for later so we can connect to multiple exits
caves.append(cave[0:-1])
random.shuffle(caves)
used_caves.append(cave[0:-1])
caves.remove(cave)
for cave in used_caves:
if cave in caves: #check if we placed multiple entrances from this 3 or 4 exit
for exit in cave:
connect_two_way(world, entrances.pop(), exit, player)
caves.remove(cave)
def connect_caves(world, lw_entrances, dw_entrances, caves, player):
"""This works inplace"""
random.shuffle(lw_entrances)
random.shuffle(dw_entrances)
random.shuffle(caves)
while caves:
# connect highest exit count caves first, prevent issue where we have 2 or 3 exits accross worlds left to fill
cave_candidate = (None, 0)
for i, cave in enumerate(caves):
if isinstance(cave, str):
cave = (cave,)
if len(cave) > cave_candidate[1]:
cave_candidate = (i, len(cave))
cave = caves.pop(cave_candidate[0])
target = lw_entrances if random.randint(0, 1) == 0 else dw_entrances
if isinstance(cave, str):
cave = (cave,)
# check if we can still fit the cave into our target group
if len(target) < len(cave):
# need to use other set
target = lw_entrances if target is dw_entrances else dw_entrances
for exit in cave:
connect_two_way(world, target.pop(), exit, player)
def connect_doors(world, doors, targets, player):
"""This works inplace"""
random.shuffle(doors)
random.shuffle(targets)
while doors:
door = doors.pop()
target = targets.pop()
connect_entrance(world, door, target, player)
def skull_woods_shuffle(world, player):
connect_random(world, ['Skull Woods First Section Hole (East)', 'Skull Woods First Section Hole (West)', 'Skull Woods First Section Hole (North)', 'Skull Woods Second Section Hole'],
['Skull Left Drop', 'Skull Pinball', 'Skull Pot Circle', 'Skull Back Drop'], player)
connect_random(world, ['Skull Woods First Section Door', 'Skull Woods Second Section Door (East)', 'Skull Woods Second Section Door (West)'],
['Skull Woods First Section Exit', 'Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)'], player, True)
def simple_shuffle_dungeons(world, player):
skull_woods_shuffle(world, player)
dungeon_entrances = ['Eastern Palace', 'Tower of Hera', 'Thieves Town', 'Skull Woods Final Section', 'Palace of Darkness', 'Ice Palace', 'Misery Mire', 'Swamp Palace']
dungeon_exits = ['Eastern Palace Exit', 'Tower of Hera Exit', 'Thieves Town Exit', 'Skull Woods Final Section Exit', 'Palace of Darkness Exit', 'Ice Palace Exit', 'Misery Mire Exit', 'Swamp Palace Exit']
if world.mode[player] != 'inverted':
if not world.shuffle_ganon:
connect_two_way(world, 'Ganons Tower', 'Ganons Tower Exit', player)
else:
dungeon_entrances.append('Ganons Tower')
dungeon_exits.append('Ganons Tower Exit')
else:
dungeon_entrances.append('Inverted Agahnims Tower')
dungeon_exits.append('Inverted Agahnims Tower Exit')
# shuffle up single entrance dungeons
connect_random(world, dungeon_entrances, dungeon_exits, player, True)
# mix up 4 door dungeons
multi_dungeons = ['Desert', 'Turtle Rock']
if world.mode[player] == 'open' or (world.mode[player] == 'inverted' and world.shuffle_ganon):
multi_dungeons.append('Hyrule Castle')
random.shuffle(multi_dungeons)
dp_target = multi_dungeons[0]
tr_target = multi_dungeons[1]
if world.mode[player] not in ['open', 'inverted'] or (world.mode[player] == 'inverted' and world.shuffle_ganon is False):
# place hyrule castle as intended
hc_target = 'Hyrule Castle'
else:
hc_target = multi_dungeons[2]
# door shuffle should restrict hyrule castle to the light world due to sanc being limited to the LW
if world.doorShuffle[player] != 'vanilla' and hc_target == 'Turtle Rock':
swap_w_dp = random.choice([True, False])
if swap_w_dp:
hc_target, dp_target = dp_target, hc_target
else:
hc_target, tr_target = tr_target, hc_target
# ToDo improve this?
if world.mode[player] != 'inverted':
if hc_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Hyrule Castle Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Agahnims Tower', 'Agahnims Tower Exit', player)
elif hc_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Hyrule Castle Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Agahnims Tower Exit', player)
elif hc_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Hyrule Castle Exit (East)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Agahnims Tower Exit', player)
if dp_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Agahnims Tower', 'Desert Palace Exit (North)', player)
elif dp_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Desert Palace Exit (North)', player)
elif dp_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Desert Palace Exit (North)', player)
if tr_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Turtle Rock Ledge Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Agahnims Tower', 'Turtle Rock Isolated Ledge Exit', player)
elif tr_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Turtle Rock Ledge Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Turtle Rock Isolated Ledge Exit', player)
elif tr_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Turtle Rock Isolated Ledge Exit', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Turtle Rock Ledge Exit (East)', player)
else:
if hc_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Hyrule Castle Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Inverted Ganons Tower', 'Inverted Ganons Tower Exit', player)
elif hc_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Hyrule Castle Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Inverted Ganons Tower Exit', player)
elif hc_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Hyrule Castle Exit (South)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Inverted Ganons Tower Exit', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Hyrule Castle Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Hyrule Castle Exit (East)', player)
if dp_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Inverted Ganons Tower', 'Desert Palace Exit (North)', player)
elif dp_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Desert Palace Exit (North)', player)
elif dp_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Desert Palace Exit (South)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Desert Palace Exit (East)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Desert Palace Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Desert Palace Exit (North)', player)
if tr_target == 'Hyrule Castle':
connect_two_way(world, 'Hyrule Castle Entrance (South)', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Hyrule Castle Entrance (East)', 'Turtle Rock Ledge Exit (East)', player)
connect_two_way(world, 'Hyrule Castle Entrance (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Inverted Ganons Tower', 'Turtle Rock Isolated Ledge Exit', player)
elif tr_target == 'Desert':
connect_two_way(world, 'Desert Palace Entrance (South)', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Desert Palace Entrance (North)', 'Turtle Rock Ledge Exit (East)', player)
connect_two_way(world, 'Desert Palace Entrance (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Desert Palace Entrance (East)', 'Turtle Rock Isolated Ledge Exit', player)
elif tr_target == 'Turtle Rock':
connect_two_way(world, 'Turtle Rock', 'Turtle Rock Exit (Front)', player)
connect_two_way(world, 'Turtle Rock Isolated Ledge Entrance', 'Turtle Rock Isolated Ledge Exit', player)
connect_two_way(world, 'Dark Death Mountain Ledge (West)', 'Turtle Rock Ledge Exit (West)', player)
connect_two_way(world, 'Dark Death Mountain Ledge (East)', 'Turtle Rock Ledge Exit (East)', player)
def unbias_some_entrances(Dungeon_Exits, Cave_Exits, Old_Man_House, Cave_Three_Exits):
def shuffle_lists_in_list(ls):
for i, item in enumerate(ls):
if isinstance(item, list):
ls[i] = random.sample(item, len(item))
def tuplize_lists_in_list(ls):
for i, item in enumerate(ls):
if isinstance(item, list):
ls[i] = tuple(item)
shuffle_lists_in_list(Dungeon_Exits)
shuffle_lists_in_list(Cave_Exits)
shuffle_lists_in_list(Old_Man_House)
shuffle_lists_in_list(Cave_Three_Exits)
# paradox fixup
if Cave_Three_Exits[1][0] == "Paradox Cave Exit (Bottom)":
i = random.randint(1,2)
Cave_Three_Exits[1][0] = Cave_Three_Exits[1][i]
Cave_Three_Exits[1][i] = "Paradox Cave Exit (Bottom)"
# TR fixup
tr_fixup = False
for i, item in enumerate(Dungeon_Exits[-1]):
if 'Turtle Rock Ledge Exit (East)' == item:
tr_fixup = True
if 0 != i:
Dungeon_Exits[-1][i] = Dungeon_Exits[-1][0]
Dungeon_Exits[-1][0] = 'Turtle Rock Ledge Exit (East)'
break
if not tr_fixup: raise RuntimeError("TR entrance shuffle fixup didn't happen")
tuplize_lists_in_list(Dungeon_Exits)
tuplize_lists_in_list(Cave_Exits)
tuplize_lists_in_list(Old_Man_House)
tuplize_lists_in_list(Cave_Three_Exits)
LW_Dungeon_Entrances = ['Desert Palace Entrance (South)',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)',
'Eastern Palace',
'Tower of Hera',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Agahnims Tower']
LW_Dungeon_Entrances_Must_Exit = ['Desert Palace Entrance (East)']
DW_Dungeon_Entrances = ['Thieves Town',
'Skull Woods Final Section',
'Ice Palace',
'Misery Mire',
'Palace of Darkness',
'Swamp Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)']
DW_Dungeon_Entrances_Must_Exit = ['Dark Death Mountain Ledge (East)',
'Turtle Rock Isolated Ledge Entrance']
Dungeon_Exits_Base = [['Desert Palace Exit (South)', 'Desert Palace Exit (West)', 'Desert Palace Exit (East)'],
'Desert Palace Exit (North)',
'Eastern Palace Exit',
'Tower of Hera Exit',
'Thieves Town Exit',
'Skull Woods Final Section Exit',
'Ice Palace Exit',
'Misery Mire Exit',
'Palace of Darkness Exit',
'Swamp Palace Exit',
'Agahnims Tower Exit',
['Turtle Rock Ledge Exit (East)',
'Turtle Rock Exit (Front)', 'Turtle Rock Ledge Exit (West)', 'Turtle Rock Isolated Ledge Exit']]
DW_Entrances_Must_Exit = ['Bumper Cave (Top)', 'Hookshot Cave Back Entrance']
Two_Door_Caves_Directional = [('Bumper Cave (Bottom)', 'Bumper Cave (Top)'),
('Hookshot Cave', 'Hookshot Cave Back Entrance')]
Two_Door_Caves = [('Elder House (East)', 'Elder House (West)'),
('Two Brothers House (East)', 'Two Brothers House (West)'),
('Superbunny Cave (Bottom)', 'Superbunny Cave (Top)')]
Old_Man_Entrances = ['Old Man Cave (East)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Spectacle Rock Cave',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave (Bottom)']
Old_Man_House_Base = [['Old Man House Exit (Bottom)', 'Old Man House Exit (Top)']]
Cave_Exits_Base = [['Elder House Exit (East)', 'Elder House Exit (West)'],
['Two Brothers House Exit (East)', 'Two Brothers House Exit (West)'],
['Death Mountain Return Cave Exit (West)', 'Death Mountain Return Cave Exit (East)'],
['Fairy Ascension Cave Exit (Bottom)', 'Fairy Ascension Cave Exit (Top)'],
['Bumper Cave Exit (Top)', 'Bumper Cave Exit (Bottom)'],
['Hookshot Cave Exit (South)', 'Hookshot Cave Exit (North)']]
Cave_Exits_Base += [('Superbunny Cave Exit (Bottom)', 'Superbunny Cave Exit (Top)'),
('Spiral Cave Exit (Top)', 'Spiral Cave Exit')]
Cave_Three_Exits_Base = [('Spectacle Rock Cave Exit (Peak)', 'Spectacle Rock Cave Exit (Top)',
'Spectacle Rock Cave Exit'),
['Paradox Cave Exit (Top)', 'Paradox Cave Exit (Middle)','Paradox Cave Exit (Bottom)']]
LW_Entrances = ['Elder House (East)',
'Elder House (West)',
'Two Brothers House (East)',
'Two Brothers House (West)',
'Old Man Cave (West)',
'Old Man House (Bottom)',
'Death Mountain Return Cave (West)',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)']
DW_Entrances = ['Bumper Cave (Bottom)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave']
Bomb_Shop_Multi_Cave_Doors = ['Hyrule Castle Entrance (South)',
'Misery Mire',
'Thieves Town',
'Bumper Cave (Bottom)',
'Swamp Palace',
'Hyrule Castle Secret Entrance Stairs',
'Skull Woods First Section Door',
'Skull Woods Second Section Door (East)',
'Skull Woods Second Section Door (West)',
'Skull Woods Final Section',
'Ice Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Ganons Tower',
'Desert Palace Entrance (South)',
'Tower of Hera',
'Two Brothers House (West)',
'Old Man Cave (East)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Death Mountain Return Cave (West)',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave',
'Spectacle Rock Cave (Bottom)',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)',
'Palace of Darkness',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Agahnims Tower',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)'
# all entrances below this line would be possible for blacksmith_hut
# if it were not for dwarf checking multi-entrance caves
]
Blacksmith_Multi_Cave_Doors = ['Eastern Palace',
'Elder House (East)',
'Elder House (West)',
'Two Brothers House (East)',
'Old Man Cave (West)',
'Sanctuary',
'Lumberjack Tree Cave',
'Lost Woods Hideout Stump',
'North Fairy Cave',
'Bat Cave Cave',
'Kakariko Well Cave']
LW_Single_Cave_Doors = ['Blinds Hideout',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game',
'Waterfall of Wishing',
'Capacity Upgrade',
'Bonk Rock Cave',
'Graveyard Cave',
'Checkerboard Cave',
'Cave 45',
'Kings Grave',
'Bonk Fairy (Light)',
'Hookshot Fairy',
'Mimic Cave']
DW_Single_Cave_Doors = ['Bonk Fairy (Dark)',
'Dark Sanctuary Hint',
'Dark Lake Hylia Fairy',
'C-Shaped House',
'Big Bomb Shop',
'Dark Death Mountain Fairy',
'Dark Lake Hylia Shop',
'Dark World Shop',
'Red Shield Shop',
'Mire Shed',
'East Dark World Hint',
'Dark Desert Hint',
'Spike Cave',
'Palace of Darkness Hint',
'Dark Lake Hylia Ledge Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark World Potion Shop',
'Pyramid Fairy',
'Archery Game',
'Dark World Lumberjack Shop',
'Hype Cave',
'Brewery',
'Dark Lake Hylia Ledge Hint',
'Chest Game',
'Dark Desert Fairy',
'Dark Lake Hylia Ledge Fairy',
'Fortune Teller (Dark)',
'Dark World Hammer Peg Cave']
Blacksmith_Single_Cave_Doors = ['Blinds Hideout',
'Lake Hylia Fairy',
'Light Hype Fairy',
'Desert Fairy',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Blacksmiths Hut',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Library',
'Potion Shop',
'Dam',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game']
Bomb_Shop_Single_Cave_Doors = ['Waterfall of Wishing',
'Capacity Upgrade',
'Bonk Rock Cave',
'Graveyard Cave',
'Checkerboard Cave',
'Cave 45',
'Kings Grave',
'Bonk Fairy (Light)',
'Hookshot Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Fairy',
'Dark Lake Hylia Ledge Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk Fairy (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Dark Sanctuary Hint',
'Fortune Teller (Dark)',
'Dark World Shop',
'Dark World Lumberjack Shop',
'Dark World Potion Shop',
'Archery Game',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Fairy',
'Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Fairy',
'Mimic Cave',
'Big Bomb Shop',
'Dark Lake Hylia Shop']
Single_Cave_Doors = ['Pyramid Fairy']
Single_Cave_Targets = ['Blinds Hideout',
'Bonk Fairy (Light)',
'Lake Hylia Healer Fairy',
'Swamp Healer Fairy',
'Desert Healer Fairy',
'Kings Grave',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Cave 45',
'Graveyard Cave',
'Checkerboard Cave',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Bonk Rock Cave',
'Library',
'Potion Shop',
'Hookshot Fairy',
'Waterfall of Wishing',
'Capacity Upgrade',
'Pyramid Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Healer Fairy',
'Dark Lake Hylia Ledge Healer Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk Fairy (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Dark Sanctuary Hint',
'Fortune Teller (Dark)',
'Village of Outcasts Shop',
'Dark Lake Hylia Shop',
'Dark World Lumberjack Shop',
'Archery Game',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Healer Fairy',
'Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Healer Fairy',
'Mimic Cave',
'Dark World Potion Shop',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game',
'Dam']
Inverted_LW_Dungeon_Entrances = ['Desert Palace Entrance (South)',
'Eastern Palace',
'Tower of Hera',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)']
Inverted_DW_Dungeon_Entrances = ['Thieves Town',
'Skull Woods Final Section',
'Ice Palace',
'Misery Mire',
'Palace of Darkness',
'Swamp Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Turtle Rock Isolated Ledge Entrance',
'Inverted Agahnims Tower']
Inverted_LW_Dungeon_Entrances_Must_Exit = ['Desert Palace Entrance (East)']
Inverted_Dungeon_Exits_Base = [['Desert Palace Exit (South)', 'Desert Palace Exit (West)', 'Desert Palace Exit (East)'],
'Desert Palace Exit (North)',
'Eastern Palace Exit',
'Tower of Hera Exit',
'Thieves Town Exit',
'Skull Woods Final Section Exit',
'Ice Palace Exit',
'Misery Mire Exit',
'Palace of Darkness Exit',
'Swamp Palace Exit',
'Inverted Agahnims Tower Exit',
['Turtle Rock Ledge Exit (East)',
'Turtle Rock Exit (Front)', 'Turtle Rock Ledge Exit (West)', 'Turtle Rock Isolated Ledge Exit']]
Inverted_LW_Entrances_Must_Exit = ['Death Mountain Return Cave (West)',
'Two Brothers House (West)']
Inverted_Two_Door_Caves_Directional = [('Old Man Cave (West)', 'Death Mountain Return Cave (West)'),
('Two Brothers House (East)', 'Two Brothers House (West)')]
Inverted_Two_Door_Caves = [('Elder House (East)', 'Elder House (West)'),
('Superbunny Cave (Bottom)', 'Superbunny Cave (Top)'),
('Hookshot Cave', 'Hookshot Cave Back Entrance')]
Inverted_Old_Man_Entrances = ['Dark Death Mountain Fairy',
'Spike Cave']
Inverted_LW_Entrances = ['Elder House (East)',
'Elder House (West)',
'Two Brothers House (East)',
'Old Man Cave (East)',
'Old Man Cave (West)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Spectacle Rock Cave',
'Spectacle Rock Cave Peak',
'Spectacle Rock Cave (Bottom)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)']
Inverted_DW_Entrances = ['Bumper Cave (Bottom)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Hookshot Cave Back Entrance']
Inverted_Bomb_Shop_Multi_Cave_Doors = ['Hyrule Castle Entrance (South)',
'Misery Mire',
'Thieves Town',
'Bumper Cave (Bottom)',
'Swamp Palace',
'Hyrule Castle Secret Entrance Stairs',
'Skull Woods First Section Door',
'Skull Woods Second Section Door (East)',
'Skull Woods Second Section Door (West)',
'Skull Woods Final Section',
'Ice Palace',
'Turtle Rock',
'Dark Death Mountain Ledge (West)',
'Dark Death Mountain Ledge (East)',
'Superbunny Cave (Top)',
'Superbunny Cave (Bottom)',
'Hookshot Cave',
'Inverted Agahnims Tower',
'Desert Palace Entrance (South)',
'Tower of Hera',
'Two Brothers House (West)',
'Old Man Cave (East)',
'Old Man House (Bottom)',
'Old Man House (Top)',
'Death Mountain Return Cave (East)',
'Death Mountain Return Cave (West)',
'Spectacle Rock Cave Peak',
'Paradox Cave (Bottom)',
'Paradox Cave (Middle)',
'Paradox Cave (Top)',
'Fairy Ascension Cave (Bottom)',
'Fairy Ascension Cave (Top)',
'Spiral Cave',
'Spiral Cave (Bottom)',
'Palace of Darkness',
'Hyrule Castle Entrance (West)',
'Hyrule Castle Entrance (East)',
'Inverted Ganons Tower',
'Desert Palace Entrance (West)',
'Desert Palace Entrance (North)']
Inverted_LW_Single_Cave_Doors = LW_Single_Cave_Doors + ['Inverted Big Bomb Shop']
Inverted_DW_Single_Cave_Doors = ['Bonk Fairy (Dark)',
'Inverted Dark Sanctuary',
'Inverted Links House',
'Dark Lake Hylia Fairy',
'C-Shaped House',
'Bumper Cave (Top)',
'Dark Lake Hylia Shop',
'Dark World Shop',
'Red Shield Shop',
'Mire Shed',
'East Dark World Hint',
'Dark Desert Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Ledge Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark World Potion Shop',
'Pyramid Fairy',
'Archery Game',
'Dark World Lumberjack Shop',
'Hype Cave',
'Brewery',
'Dark Lake Hylia Ledge Hint',
'Chest Game',
'Dark Desert Fairy',
'Dark Lake Hylia Ledge Fairy',
'Fortune Teller (Dark)',
'Dark World Hammer Peg Cave']
Inverted_Bomb_Shop_Single_Cave_Doors = ['Waterfall of Wishing',
'Capacity Upgrade',
'Bonk Rock Cave',
'Graveyard Cave',
'Checkerboard Cave',
'Cave 45',
'Kings Grave',
'Bonk Fairy (Light)',
'Hookshot Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Fairy',
'Dark Lake Hylia Ledge Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk Fairy (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Inverted Dark Sanctuary',
'Fortune Teller (Dark)',
'Dark World Shop',
'Dark World Lumberjack Shop',
'Dark World Potion Shop',
'Archery Game',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Fairy',
'Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Bumper Cave (Top)',
'Mimic Cave',
'Dark Lake Hylia Shop',
'Inverted Links House',
'Inverted Big Bomb Shop']
Inverted_Single_Cave_Targets = ['Blinds Hideout',
'Bonk Fairy (Light)',
'Lake Hylia Healer Fairy',
'Swamp Healer Fairy',
'Desert Healer Fairy',
'Kings Grave',
'Chicken House',
'Aginahs Cave',
'Sahasrahlas Hut',
'Cave Shop (Lake Hylia)',
'Sick Kids House',
'Lost Woods Gamble',
'Fortune Teller (Light)',
'Snitch Lady (East)',
'Snitch Lady (West)',
'Bush Covered House',
'Tavern (Front)',
'Light World Bomb Hut',
'Kakariko Shop',
'Cave 45',
'Graveyard Cave',
'Checkerboard Cave',
'Mini Moldorm Cave',
'Long Fairy Cave',
'Good Bee Cave',
'20 Rupee Cave',
'50 Rupee Cave',
'Ice Rod Cave',
'Bonk Rock Cave',
'Library',
'Potion Shop',
'Hookshot Fairy',
'Waterfall of Wishing',
'Capacity Upgrade',
'Pyramid Fairy',
'East Dark World Hint',
'Palace of Darkness Hint',
'Dark Lake Hylia Healer Fairy',
'Dark Lake Hylia Ledge Healer Fairy',
'Dark Lake Hylia Ledge Spike Cave',
'Dark Lake Hylia Ledge Hint',
'Hype Cave',
'Bonk Fairy (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Hammer Peg Cave',
'Red Shield Shop',
'Fortune Teller (Dark)',
'Village of Outcasts Shop',
'Dark Lake Hylia Shop',
'Dark World Lumberjack Shop',
'Archery Game',
'Mire Shed',
'Dark Desert Hint',
'Dark Desert Healer Fairy',
'Spike Cave',
'Cave Shop (Dark Death Mountain)',
'Dark Death Mountain Healer Fairy',
'Mimic Cave',
'Dark World Potion Shop',
'Lumberjack House',
'Lake Hylia Fortune Teller',
'Kakariko Gamble Game',
'Dam']
# in inverted we put dark sanctuary in west dark world for now
Inverted_Dark_Sanctuary_Doors = ['Inverted Dark Sanctuary',
'Fortune Teller (Dark)',
'Brewery',
'C-Shaped House',
'Chest Game',
'Dark World Lumberjack Shop',
'Red Shield Shop',
'Bumper Cave (Bottom)',
'Bumper Cave (Top)',
'Thieves Town']
Isolated_LH_Doors = ['Kings Grave',
'Waterfall of Wishing',
'Desert Palace Entrance (South)',
'Desert Palace Entrance (North)',
'Capacity Upgrade',
'Ice Palace',
'Skull Woods Final Section',
'Dark World Hammer Peg Cave',
'Turtle Rock Isolated Ledge Entrance']
# these are connections that cannot be shuffled and always exist. They link together separate parts of the world we need to divide into regions
mandatory_connections = [('Links House S&Q', 'Links House'),
('Sanctuary S&Q', 'Sanctuary'),
('Old Man S&Q', 'Old Man House'),
('Lake Hylia Central Island Pier', 'Lake Hylia Central Island'),
('Lake Hylia Central Island Teleporter', 'Dark Lake Hylia Central Island'),
('Zoras River', 'Zoras River'),
('Kings Grave Outer Rocks', 'Kings Grave Area'),
('Kings Grave Inner Rocks', 'Light World'),
('Kings Grave Mirror Spot', 'Kings Grave Area'),
('Kakariko Well (top to bottom)', 'Kakariko Well (bottom)'),
('Master Sword Meadow', 'Master Sword Meadow'),
('Hobo Bridge', 'Hobo Bridge'),
('Bat Cave Drop Ledge', 'Bat Cave Drop Ledge'),
('Bat Cave Door', 'Bat Cave (left)'),
('Lost Woods Hideout (top to bottom)', 'Lost Woods Hideout (bottom)'),
('Lumberjack Tree (top to bottom)', 'Lumberjack Tree (bottom)'),
('Desert Palace Stairs', 'Desert Palace Stairs'),
('Desert Palace Stairs Drop', 'Light World'),
('Desert Palace Entrance (North) Rocks', 'Desert Palace Entrance (North) Spot'),
('Desert Ledge Return Rocks', 'Desert Ledge'),
('Hyrule Castle Ledge Courtyard Drop', 'Hyrule Castle Courtyard'),
('Hyrule Castle Main Gate', 'Hyrule Castle Courtyard'),
('Sewer Drop', 'Sewers Rat Path'),
('Flute Spot 1', 'Death Mountain'),
('Death Mountain Entrance Rock', 'Death Mountain Entrance'),
('Death Mountain Entrance Drop', 'Light World'),
('Spectacle Rock Cave Drop', 'Spectacle Rock Cave (Bottom)'),
('Spectacle Rock Cave Peak Drop', 'Spectacle Rock Cave (Bottom)'),
('Death Mountain Return Ledge Drop', 'Light World'),
('Old Man House Front to Back', 'Old Man House Back'),
('Old Man House Back to Front', 'Old Man House'),
('Broken Bridge (West)', 'East Death Mountain (Bottom)'),
('Broken Bridge (East)', 'Death Mountain'),
('East Death Mountain Drop', 'East Death Mountain (Bottom)'),
('Spiral Cave Ledge Access', 'Spiral Cave Ledge'),
('Spiral Cave Ledge Drop', 'East Death Mountain (Bottom)'),
('Spiral Cave (top to bottom)', 'Spiral Cave (Bottom)'),
('East Death Mountain (Top)', 'East Death Mountain (Top)'),
('Death Mountain (Top)', 'Death Mountain (Top)'),
('Death Mountain Drop', 'Death Mountain'),
('Spectacle Rock Drop', 'Death Mountain (Top)'),
('Top of Pyramid', 'East Dark World'),
('Dark Lake Hylia Drop (East)', 'Dark Lake Hylia'),
('Dark Lake Hylia Drop (South)', 'Dark Lake Hylia'),
('Dark Lake Hylia Teleporter', 'Dark Lake Hylia'),
('Dark Lake Hylia Ledge', 'Dark Lake Hylia Ledge'),
('Dark Lake Hylia Ledge Drop', 'Dark Lake Hylia'),
('East Dark World Pier', 'East Dark World'),
('Lake Hylia Island Mirror Spot', 'Lake Hylia Island'),
('Lake Hylia Central Island Mirror Spot', 'Lake Hylia Central Island'),
('Hyrule Castle Ledge Mirror Spot', 'Hyrule Castle Ledge'),
('South Dark World Bridge', 'South Dark World'),
('East Dark World Bridge', 'East Dark World'),
('Maze Race Mirror Spot', 'Maze Race Ledge'),
('Village of Outcasts Heavy Rock', 'West Dark World'),
('Village of Outcasts Drop', 'South Dark World'),
('Village of Outcasts Eastern Rocks', 'Hammer Peg Area'),
('Village of Outcasts Pegs', 'Dark Grassy Lawn'),
('Peg Area Rocks', 'West Dark World'),
('Grassy Lawn Pegs', 'West Dark World'),
('Bat Cave Drop Ledge Mirror Spot', 'Bat Cave Drop Ledge'),
('East Dark World River Pier', 'East Dark World'),
('West Dark World Gap', 'West Dark World'),
('East Dark World Broken Bridge Pass', 'East Dark World'),
('Northeast Dark World Broken Bridge Pass', 'Northeast Dark World'),
('Bumper Cave Entrance Rock', 'Bumper Cave Entrance'),
('Bumper Cave Entrance Drop', 'West Dark World'),
('Bumper Cave Entrance Mirror Spot', 'Death Mountain Entrance'),
('Bumper Cave Ledge Drop', 'West Dark World'),
('Bumper Cave Ledge Mirror Spot', 'Death Mountain Return Ledge'),
('Skull Woods Forest', 'Skull Woods Forest'),
('Desert Ledge Mirror Spot', 'Desert Ledge'),
('Desert Ledge (Northeast) Mirror Spot', 'Desert Ledge (Northeast)'),
('Desert Palace Entrance (North) Mirror Spot', 'Desert Palace Entrance (North) Spot'),
('Dark Desert Teleporter', 'Dark Desert'),
('Desert Palace Stairs Mirror Spot', 'Desert Palace Stairs'),
('East Hyrule Teleporter', 'East Dark World'),
('South Hyrule Teleporter', 'South Dark World'),
('Kakariko Teleporter', 'West Dark World'),
('Death Mountain Teleporter', 'Dark Death Mountain (West Bottom)'),
('Paradox Cave Push Block Reverse', 'Paradox Cave Chest Area'),
('Paradox Cave Push Block', 'Paradox Cave Front'),
('Paradox Cave Bomb Jump', 'Paradox Cave'),
('Paradox Cave Drop', 'Paradox Cave Chest Area'),
('Light World Death Mountain Shop', 'Light World Death Mountain Shop'),
('Fairy Ascension Rocks', 'Fairy Ascension Plateau'),
('Fairy Ascension Mirror Spot', 'Fairy Ascension Plateau'),
('Fairy Ascension Drop', 'East Death Mountain (Bottom)'),
('Fairy Ascension Ledge Drop', 'Fairy Ascension Plateau'),
('Fairy Ascension Ledge', 'Fairy Ascension Ledge'),
('Fairy Ascension Cave Climb', 'Fairy Ascension Cave (Top)'),
('Fairy Ascension Cave Pots', 'Fairy Ascension Cave (Bottom)'),
('Fairy Ascension Cave Drop', 'Fairy Ascension Cave (Drop)'),
('Spectacle Rock Mirror Spot', 'Spectacle Rock'),
('Dark Death Mountain Drop (East)', 'Dark Death Mountain (East Bottom)'),
('Dark Death Mountain Drop (West)', 'Dark Death Mountain (West Bottom)'),
('East Death Mountain (Top) Mirror Spot', 'East Death Mountain (Top)'),
('Turtle Rock Teleporter', 'Turtle Rock (Top)'),
('Turtle Rock Drop', 'Dark Death Mountain (Top)'),
('Floating Island Drop', 'Dark Death Mountain (Top)'),
('Floating Island Mirror Spot', 'Death Mountain Floating Island (Light World)'),
('East Death Mountain Teleporter', 'Dark Death Mountain (East Bottom)'),
('Isolated Ledge Mirror Spot', 'Fairy Ascension Ledge'),
('Spiral Cave Mirror Spot', 'Spiral Cave Ledge'),
('Mimic Cave Mirror Spot', 'Mimic Cave Ledge'),
('Cave 45 Mirror Spot', 'Cave 45 Ledge'),
('Graveyard Ledge Mirror Spot', 'Graveyard Ledge'),
('Ganon Drop', 'Bottom of Pyramid'),
('Pyramid Drop', 'East Dark World')
]
inverted_mandatory_connections = [('Links House S&Q', 'Inverted Links House'),
('Dark Sanctuary S&Q', 'Inverted Dark Sanctuary'),
('Old Man S&Q', 'Old Man House'),
('Castle Ledge S&Q', 'Hyrule Castle Ledge'),
('Lake Hylia Central Island Pier', 'Lake Hylia Central Island'),
('Lake Hylia Island', 'Lake Hylia Island'),
('Zoras River', 'Zoras River'),
('Kings Grave Outer Rocks', 'Kings Grave Area'),
('Kings Grave Inner Rocks', 'Light World'),
('Kakariko Well (top to bottom)', 'Kakariko Well (bottom)'),
('Master Sword Meadow', 'Master Sword Meadow'),
('Hobo Bridge', 'Hobo Bridge'),
('Bat Cave Drop Ledge', 'Bat Cave Drop Ledge'),
('Bat Cave Door', 'Bat Cave (left)'),
('Lost Woods Hideout (top to bottom)', 'Lost Woods Hideout (bottom)'),
('Lumberjack Tree (top to bottom)', 'Lumberjack Tree (bottom)'),
('Desert Palace Stairs', 'Desert Palace Stairs'),
('Desert Palace Stairs Drop', 'Light World'),
('Desert Palace Entrance (North) Rocks', 'Desert Palace Entrance (North) Spot'),
('Desert Ledge Return Rocks', 'Desert Ledge'),
('Sewer Drop', 'Sewers Rat Path'),
('Death Mountain Entrance Rock', 'Death Mountain Entrance'),
('Death Mountain Entrance Drop', 'Light World'),
('Spectacle Rock Cave Drop', 'Spectacle Rock Cave (Bottom)'),
('Spectacle Rock Cave Peak Drop', 'Spectacle Rock Cave (Bottom)'),
('Death Mountain Return Ledge Drop', 'Light World'),
('Old Man House Front to Back', 'Old Man House Back'),
('Old Man House Back to Front', 'Old Man House'),
('Broken Bridge (West)', 'East Death Mountain (Bottom)'),
('Broken Bridge (East)', 'Death Mountain'),
('East Death Mountain Drop', 'East Death Mountain (Bottom)'),
('Spiral Cave Ledge Access', 'Spiral Cave Ledge'),
('Spiral Cave Ledge Drop', 'East Death Mountain (Bottom)'),
('Spiral Cave (top to bottom)', 'Spiral Cave (Bottom)'),
('East Death Mountain (Top)', 'East Death Mountain (Top)'),
('Death Mountain (Top)', 'Death Mountain (Top)'),
('Death Mountain Drop', 'Death Mountain'),
('Dark Lake Hylia Drop (East)', 'Dark Lake Hylia'),
('Dark Lake Hylia Drop (South)', 'Dark Lake Hylia'),
('Dark Lake Hylia Teleporter', 'Dark Lake Hylia'),
('Dark Lake Hylia Ledge Pier', 'Dark Lake Hylia Ledge'),
('Dark Lake Hylia Ledge Drop', 'Dark Lake Hylia'),
('East Dark World Pier', 'East Dark World'),
('South Dark World Bridge', 'South Dark World'),
('East Dark World Bridge', 'East Dark World'),
('Village of Outcasts Heavy Rock', 'West Dark World'),
('Village of Outcasts Drop', 'South Dark World'),
('Village of Outcasts Eastern Rocks', 'Hammer Peg Area'),
('Village of Outcasts Pegs', 'Dark Grassy Lawn'),
('Peg Area Rocks', 'West Dark World'),
('Grassy Lawn Pegs', 'West Dark World'),
('East Dark World River Pier', 'Northeast Dark World'),
('West Dark World Gap', 'West Dark World'),
('East Dark World Broken Bridge Pass', 'East Dark World'),
('Northeast Dark World Broken Bridge Pass', 'Northeast Dark World'),
('Bumper Cave Entrance Rock', 'Bumper Cave Entrance'),
('Bumper Cave Entrance Drop', 'West Dark World'),
('Bumper Cave Ledge Drop', 'West Dark World'),
('Skull Woods Forest', 'Skull Woods Forest'),
('Paradox Cave Push Block Reverse', 'Paradox Cave Chest Area'),
('Paradox Cave Push Block', 'Paradox Cave Front'),
('Paradox Cave Bomb Jump', 'Paradox Cave'),
('Paradox Cave Drop', 'Paradox Cave Chest Area'),
('Light World Death Mountain Shop', 'Light World Death Mountain Shop'),
('Fairy Ascension Rocks', 'Fairy Ascension Plateau'),
('Fairy Ascension Drop', 'East Death Mountain (Bottom)'),
('Fairy Ascension Ledge Drop', 'Fairy Ascension Plateau'),
('Fairy Ascension Ledge Access', 'Fairy Ascension Ledge'),
('Fairy Ascension Cave Climb', 'Fairy Ascension Cave (Top)'),
('Fairy Ascension Cave Pots', 'Fairy Ascension Cave (Bottom)'),
('Fairy Ascension Cave Drop', 'Fairy Ascension Cave (Drop)'),
('Dark Death Mountain Drop (East)', 'Dark Death Mountain (East Bottom)'),
('Ganon Drop', 'Bottom of Pyramid'),
('Pyramid Drop', 'East Dark World'),
('Post Aga Teleporter', 'Light World'),
('Secret Passage Inner Bushes', 'Light World'),
('Secret Passage Outer Bushes', 'Hyrule Castle Secret Entrance Area'),
('Potion Shop Inner Bushes', 'Light World'),
('Potion Shop Outer Bushes', 'Potion Shop Area'),
('Potion Shop Inner Rock', 'Northeast Light World'),
('Potion Shop Outer Rock', 'Potion Shop Area'),
('Potion Shop River Drop', 'River'),
('Graveyard Cave Inner Bushes', 'Light World'),
('Graveyard Cave Outer Bushes', 'Graveyard Cave Area'),
('Graveyard Cave Mirror Spot', 'West Dark World'),
('Light World River Drop', 'River'),
('Light World Pier', 'Light World'),
('Potion Shop Pier', 'Potion Shop Area'),
('Hyrule Castle Ledge Courtyard Drop', 'Light World'),
('Mimic Cave Ledge Access', 'Mimic Cave Ledge'),
('Mimic Cave Ledge Drop', 'East Death Mountain (Bottom)'),
('Turtle Rock Tail Drop', 'Turtle Rock (Top)'),
('Turtle Rock Drop', 'Dark Death Mountain'),
('Desert Ledge Drop', 'Light World'),
('Floating Island Drop', 'Dark Death Mountain'),
('Dark Lake Hylia Central Island Teleporter', 'Lake Hylia Central Island'),
('Dark Desert Teleporter', 'Light World'),
('East Dark World Teleporter', 'Light World'),
('South Dark World Teleporter', 'Light World'),
('West Dark World Teleporter', 'Light World'),
('Dark Death Mountain Teleporter (West)', 'Death Mountain'),
('Dark Death Mountain Teleporter (East)', 'East Death Mountain (Top)'),
('Dark Death Mountain Teleporter (East Bottom)', 'East Death Mountain (Bottom)'),
('Mire Mirror Spot', 'Dark Desert'),
('Dark Desert Drop', 'Dark Desert'),
('Desert Palace Stairs Mirror Spot', 'Dark Desert'),
('Desert Palace North Mirror Spot', 'Dark Desert'),
('Maze Race Mirror Spot', 'West Dark World'),
('Lake Hylia Central Island Mirror Spot', 'Dark Lake Hylia'),
('Hammer Peg Area Mirror Spot', 'Hammer Peg Area'),
('Bumper Cave Ledge Mirror Spot', 'Bumper Cave Ledge'),
('Bumper Cave Entrance Mirror Spot', 'Bumper Cave Entrance'),
('Death Mountain Mirror Spot', 'Dark Death Mountain'),
('East Death Mountain Mirror Spot (Top)', 'Dark Death Mountain'),
('East Death Mountain Mirror Spot (Bottom)', 'Dark Death Mountain (East Bottom)'),
('Death Mountain (Top) Mirror Spot', 'Dark Death Mountain'),
('Dark Death Mountain Ledge Mirror Spot (East)', 'Dark Death Mountain Ledge'),
('Dark Death Mountain Ledge Mirror Spot (West)', 'Dark Death Mountain Ledge'),
('Floating Island Mirror Spot', 'Death Mountain Floating Island (Dark World)'),
('Laser Bridge Mirror Spot', 'Dark Death Mountain Isolated Ledge'),
('East Dark World Mirror Spot', 'East Dark World'),
('West Dark World Mirror Spot', 'West Dark World'),
('South Dark World Mirror Spot', 'South Dark World'),
('Potion Shop Mirror Spot', 'Northeast Dark World'),
('Northeast Dark World Mirror Spot', 'Northeast Dark World'),
('Shopping Mall Mirror Spot', 'Dark Lake Hylia Ledge'),
('Skull Woods Mirror Spot', 'Skull Woods Forest (West)'),
('DDM Flute', 'The Sky'),
('DDM Landing', 'Dark Death Mountain'),
('NEDW Flute', 'The Sky'),
('NEDW Landing', 'Northeast Dark World'),
('WDW Flute', 'The Sky'),
('WDW Landing', 'West Dark World'),
('SDW Flute', 'The Sky'),
('SDW Landing', 'South Dark World'),
('EDW Flute', 'The Sky'),
('EDW Landing', 'East Dark World'),
('DLHL Flute', 'The Sky'),
('DLHL Landing', 'Dark Lake Hylia Ledge'),
('DD Flute', 'The Sky'),
('DD Landing', 'Dark Desert Ledge'),
('EDDM Flute', 'The Sky'),
('Dark Grassy Lawn Flute', 'The Sky'),
('Hammer Peg Area Flute', 'The Sky'),
('Chris Houlihan Room Exit', 'Pyramid Ledge'),
('Bush Covered Lawn Inner Bushes', 'Light World'),
('Bush Covered Lawn Outer Bushes', 'Bush Covered Lawn'),
('Bush Covered Lawn Mirror Spot', 'Dark Grassy Lawn'),
('Bomb Hut Inner Bushes', 'Light World'),
('Bomb Hut Outer Bushes', 'Bomb Hut Area'),
('Bomb Hut Mirror Spot', 'West Dark World')]
# non-shuffled entrance links
default_connections = [('Waterfall of Wishing', 'Waterfall of Wishing'),
("Blinds Hideout", "Blinds Hideout"),
('Dam', 'Dam'),
('Lumberjack House', 'Lumberjack House'),
("Hyrule Castle Secret Entrance Drop", "Hyrule Castle Secret Entrance"),
("Hyrule Castle Secret Entrance Stairs", "Hyrule Castle Secret Entrance"),
("Hyrule Castle Secret Entrance Exit", "Hyrule Castle Courtyard"),
('Bonk Fairy (Light)', 'Bonk Fairy (Light)'),
('Lake Hylia Fairy', 'Lake Hylia Healer Fairy'),
('Lake Hylia Fortune Teller', 'Lake Hylia Fortune Teller'),
('Light Hype Fairy', 'Swamp Healer Fairy'),
('Desert Fairy', 'Desert Healer Fairy'),
('Kings Grave', 'Kings Grave'),
('Tavern North', 'Tavern'),
('Chicken House', 'Chicken House'),
('Aginahs Cave', 'Aginahs Cave'),
('Sahasrahlas Hut', 'Sahasrahlas Hut'),
('Cave Shop (Lake Hylia)', 'Cave Shop (Lake Hylia)'),
('Capacity Upgrade', 'Capacity Upgrade'),
('Kakariko Well Drop', 'Kakariko Well (top)'),
('Kakariko Well Cave', 'Kakariko Well (bottom)'),
('Kakariko Well Exit', 'Light World'),
('Blacksmiths Hut', 'Blacksmiths Hut'),
('Bat Cave Drop', 'Bat Cave (right)'),
('Bat Cave Cave', 'Bat Cave (left)'),
('Bat Cave Exit', 'Light World'),
('Sick Kids House', 'Sick Kids House'),
('Elder House (East)', 'Elder House'),
('Elder House (West)', 'Elder House'),
('Elder House Exit (East)', 'Light World'),
('Elder House Exit (West)', 'Light World'),
('North Fairy Cave Drop', 'North Fairy Cave'),
('North Fairy Cave', 'North Fairy Cave'),
('North Fairy Cave Exit', 'Light World'),
('Lost Woods Gamble', 'Lost Woods Gamble'),
('Fortune Teller (Light)', 'Fortune Teller (Light)'),
('Snitch Lady (East)', 'Snitch Lady (East)'),
('Snitch Lady (West)', 'Snitch Lady (West)'),
('Bush Covered House', 'Bush Covered House'),
('Tavern (Front)', 'Tavern (Front)'),
('Light World Bomb Hut', 'Light World Bomb Hut'),
('Kakariko Shop', 'Kakariko Shop'),
('Lost Woods Hideout Drop', 'Lost Woods Hideout (top)'),
('Lost Woods Hideout Stump', 'Lost Woods Hideout (bottom)'),
('Lost Woods Hideout Exit', 'Light World'),
('Lumberjack Tree Tree', 'Lumberjack Tree (top)'),
('Lumberjack Tree Cave', 'Lumberjack Tree (bottom)'),
('Lumberjack Tree Exit', 'Light World'),
('Cave 45', 'Cave 45'),
('Graveyard Cave', 'Graveyard Cave'),
('Checkerboard Cave', 'Checkerboard Cave'),
('Mini Moldorm Cave', 'Mini Moldorm Cave'),
('Long Fairy Cave', 'Long Fairy Cave'), # near East Light World Teleporter
('Good Bee Cave', 'Good Bee Cave'),
('20 Rupee Cave', '20 Rupee Cave'),
('50 Rupee Cave', '50 Rupee Cave'),
('Ice Rod Cave', 'Ice Rod Cave'),
('Bonk Rock Cave', 'Bonk Rock Cave'),
('Library', 'Library'),
('Kakariko Gamble Game', 'Kakariko Gamble Game'),
('Potion Shop', 'Potion Shop'),
('Two Brothers House (East)', 'Two Brothers House'),
('Two Brothers House (West)', 'Two Brothers House'),
('Two Brothers House Exit (East)', 'Light World'),
('Two Brothers House Exit (West)', 'Maze Race Ledge'),
('Sanctuary', 'Sanctuary'),
('Sanctuary Grave', 'Sewer Drop'),
('Sanctuary Exit', 'Light World'),
('Old Man Cave (West)', 'Old Man Cave'),
('Old Man Cave (East)', 'Old Man Cave'),
('Old Man Cave Exit (West)', 'Light World'),
('Old Man Cave Exit (East)', 'Death Mountain'),
('Old Man House (Bottom)', 'Old Man House'),
('Old Man House Exit (Bottom)', 'Death Mountain'),
('Old Man House (Top)', 'Old Man House Back'),
('Old Man House Exit (Top)', 'Death Mountain'),
('Death Mountain Return Cave (East)', 'Death Mountain Return Cave'),
('Death Mountain Return Cave (West)', 'Death Mountain Return Cave'),
('Death Mountain Return Cave Exit (West)', 'Death Mountain Return Ledge'),
('Death Mountain Return Cave Exit (East)', 'Death Mountain'),
('Spectacle Rock Cave Peak', 'Spectacle Rock Cave (Peak)'),
('Spectacle Rock Cave (Bottom)', 'Spectacle Rock Cave (Bottom)'),
('Spectacle Rock Cave', 'Spectacle Rock Cave (Top)'),
('Spectacle Rock Cave Exit', 'Death Mountain'),
('Spectacle Rock Cave Exit (Top)', 'Death Mountain'),
('Spectacle Rock Cave Exit (Peak)', 'Death Mountain'),
('Paradox Cave (Bottom)', 'Paradox Cave Front'),
('Paradox Cave (Middle)', 'Paradox Cave'),
('Paradox Cave (Top)', 'Paradox Cave'),
('Paradox Cave Exit (Bottom)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Middle)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Top)', 'East Death Mountain (Top)'),
('Hookshot Fairy', 'Hookshot Fairy'),
('Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Bottom)'),
('Fairy Ascension Cave (Top)', 'Fairy Ascension Cave (Top)'),
('Fairy Ascension Cave Exit (Bottom)', 'Fairy Ascension Plateau'),
('Fairy Ascension Cave Exit (Top)', 'Fairy Ascension Ledge'),
('Spiral Cave', 'Spiral Cave (Top)'),
('Spiral Cave (Bottom)', 'Spiral Cave (Bottom)'),
('Spiral Cave Exit', 'East Death Mountain (Bottom)'),
('Spiral Cave Exit (Top)', 'Spiral Cave Ledge'),
('Pyramid Fairy', 'Pyramid Fairy'),
('East Dark World Hint', 'East Dark World Hint'),
('Palace of Darkness Hint', 'Palace of Darkness Hint'),
('Big Bomb Shop', 'Big Bomb Shop'),
('Dark Lake Hylia Shop', 'Dark Lake Hylia Shop'),
('Dark Lake Hylia Fairy', 'Dark Lake Hylia Healer Fairy'),
('Dark Lake Hylia Ledge Fairy', 'Dark Lake Hylia Ledge Healer Fairy'),
('Dark Lake Hylia Ledge Spike Cave', 'Dark Lake Hylia Ledge Spike Cave'),
('Dark Lake Hylia Ledge Hint', 'Dark Lake Hylia Ledge Hint'),
('Hype Cave', 'Hype Cave'),
('Bonk Fairy (Dark)', 'Bonk Fairy (Dark)'),
('Brewery', 'Brewery'),
('C-Shaped House', 'C-Shaped House'),
('Chest Game', 'Chest Game'),
('Dark World Hammer Peg Cave', 'Dark World Hammer Peg Cave'),
('Bumper Cave (Bottom)', 'Bumper Cave'),
('Bumper Cave (Top)', 'Bumper Cave'),
('Red Shield Shop', 'Red Shield Shop'),
('Dark Sanctuary Hint', 'Dark Sanctuary Hint'),
('Fortune Teller (Dark)', 'Fortune Teller (Dark)'),
('Dark World Shop', 'Village of Outcasts Shop'),
('Dark World Lumberjack Shop', 'Dark World Lumberjack Shop'),
('Dark World Potion Shop', 'Dark World Potion Shop'),
('Archery Game', 'Archery Game'),
('Bumper Cave Exit (Top)', 'Bumper Cave Ledge'),
('Bumper Cave Exit (Bottom)', 'West Dark World'),
('Mire Shed', 'Mire Shed'),
('Dark Desert Hint', 'Dark Desert Hint'),
('Dark Desert Fairy', 'Dark Desert Healer Fairy'),
('Spike Cave', 'Spike Cave'),
('Hookshot Cave', 'Hookshot Cave'),
('Superbunny Cave (Top)', 'Superbunny Cave'),
('Cave Shop (Dark Death Mountain)', 'Cave Shop (Dark Death Mountain)'),
('Dark Death Mountain Fairy', 'Dark Death Mountain Healer Fairy'),
('Superbunny Cave (Bottom)', 'Superbunny Cave'),
('Superbunny Cave Exit (Top)', 'Dark Death Mountain (Top)'),
('Superbunny Cave Exit (Bottom)', 'Dark Death Mountain (East Bottom)'),
('Hookshot Cave Exit (South)', 'Dark Death Mountain (Top)'),
('Hookshot Cave Exit (North)', 'Death Mountain Floating Island (Dark World)'),
('Hookshot Cave Back Entrance', 'Hookshot Cave'),
('Mimic Cave', 'Mimic Cave'),
('Pyramid Hole', 'Pyramid'),
('Pyramid Exit', 'Pyramid Ledge'),
('Pyramid Entrance', 'Bottom of Pyramid')
]
inverted_default_connections = [('Waterfall of Wishing', 'Waterfall of Wishing'),
('Blinds Hideout', 'Blinds Hideout'),
('Dam', 'Dam'),
('Lumberjack House', 'Lumberjack House'),
('Hyrule Castle Secret Entrance Drop', 'Hyrule Castle Secret Entrance'),
('Hyrule Castle Secret Entrance Stairs', 'Hyrule Castle Secret Entrance'),
('Hyrule Castle Secret Entrance Exit', 'Light World'),
('Bonk Fairy (Light)', 'Bonk Fairy (Light)'),
('Lake Hylia Fairy', 'Lake Hylia Healer Fairy'),
('Lake Hylia Fortune Teller', 'Lake Hylia Fortune Teller'),
('Light Hype Fairy', 'Swamp Healer Fairy'),
('Desert Fairy', 'Desert Healer Fairy'),
('Kings Grave', 'Kings Grave'),
('Tavern North', 'Tavern'),
('Chicken House', 'Chicken House'),
('Aginahs Cave', 'Aginahs Cave'),
('Sahasrahlas Hut', 'Sahasrahlas Hut'),
('Cave Shop (Lake Hylia)', 'Cave Shop (Lake Hylia)'),
('Capacity Upgrade', 'Capacity Upgrade'),
('Kakariko Well Drop', 'Kakariko Well (top)'),
('Kakariko Well Cave', 'Kakariko Well (bottom)'),
('Kakariko Well Exit', 'Light World'),
('Blacksmiths Hut', 'Blacksmiths Hut'),
('Bat Cave Drop', 'Bat Cave (right)'),
('Bat Cave Cave', 'Bat Cave (left)'),
('Bat Cave Exit', 'Light World'),
('Sick Kids House', 'Sick Kids House'),
('Elder House (East)', 'Elder House'),
('Elder House (West)', 'Elder House'),
('Elder House Exit (East)', 'Light World'),
('Elder House Exit (West)', 'Light World'),
('North Fairy Cave Drop', 'North Fairy Cave'),
('North Fairy Cave', 'North Fairy Cave'),
('North Fairy Cave Exit', 'Light World'),
('Lost Woods Gamble', 'Lost Woods Gamble'),
('Fortune Teller (Light)', 'Fortune Teller (Light)'),
('Snitch Lady (East)', 'Snitch Lady (East)'),
('Snitch Lady (West)', 'Snitch Lady (West)'),
('Bush Covered House', 'Bush Covered House'),
('Tavern (Front)', 'Tavern (Front)'),
('Light World Bomb Hut', 'Light World Bomb Hut'),
('Kakariko Shop', 'Kakariko Shop'),
('Lost Woods Hideout Drop', 'Lost Woods Hideout (top)'),
('Lost Woods Hideout Stump', 'Lost Woods Hideout (bottom)'),
('Lost Woods Hideout Exit', 'Light World'),
('Lumberjack Tree Tree', 'Lumberjack Tree (top)'),
('Lumberjack Tree Cave', 'Lumberjack Tree (bottom)'),
('Lumberjack Tree Exit', 'Light World'),
('Cave 45', 'Cave 45'),
('Graveyard Cave', 'Graveyard Cave'),
('Checkerboard Cave', 'Checkerboard Cave'),
('Mini Moldorm Cave', 'Mini Moldorm Cave'),
('Long Fairy Cave', 'Long Fairy Cave'),
('Good Bee Cave', 'Good Bee Cave'),
('20 Rupee Cave', '20 Rupee Cave'),
('50 Rupee Cave', '50 Rupee Cave'),
('Ice Rod Cave', 'Ice Rod Cave'),
('Bonk Rock Cave', 'Bonk Rock Cave'),
('Library', 'Library'),
('Kakariko Gamble Game', 'Kakariko Gamble Game'),
('Potion Shop', 'Potion Shop'),
('Two Brothers House (East)', 'Two Brothers House'),
('Two Brothers House (West)', 'Two Brothers House'),
('Two Brothers House Exit (East)', 'Light World'),
('Two Brothers House Exit (West)', 'Maze Race Ledge'),
('Sanctuary', 'Sanctuary'),
('Sanctuary Grave', 'Sewer Drop'),
('Sanctuary Exit', 'Light World'),
('Old Man House (Bottom)', 'Old Man House'),
('Old Man House Exit (Bottom)', 'Death Mountain'),
('Old Man House (Top)', 'Old Man House Back'),
('Old Man House Exit (Top)', 'Death Mountain'),
('Spectacle Rock Cave Peak', 'Spectacle Rock Cave (Peak)'),
('Spectacle Rock Cave (Bottom)', 'Spectacle Rock Cave (Bottom)'),
('Spectacle Rock Cave', 'Spectacle Rock Cave (Top)'),
('Spectacle Rock Cave Exit', 'Death Mountain'),
('Spectacle Rock Cave Exit (Top)', 'Death Mountain'),
('Spectacle Rock Cave Exit (Peak)', 'Death Mountain'),
('Paradox Cave (Bottom)', 'Paradox Cave Front'),
('Paradox Cave (Middle)', 'Paradox Cave'),
('Paradox Cave (Top)', 'Paradox Cave'),
('Paradox Cave Exit (Bottom)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Middle)', 'East Death Mountain (Bottom)'),
('Paradox Cave Exit (Top)', 'East Death Mountain (Top)'),
('Hookshot Fairy', 'Hookshot Fairy'),
('Fairy Ascension Cave (Bottom)', 'Fairy Ascension Cave (Bottom)'),
('Fairy Ascension Cave (Top)', 'Fairy Ascension Cave (Top)'),
('Fairy Ascension Cave Exit (Bottom)', 'Fairy Ascension Plateau'),
('Fairy Ascension Cave Exit (Top)', 'Fairy Ascension Ledge'),
('Spiral Cave', 'Spiral Cave (Top)'),
('Spiral Cave (Bottom)', 'Spiral Cave (Bottom)'),
('Spiral Cave Exit', 'East Death Mountain (Bottom)'),
('Spiral Cave Exit (Top)', 'Spiral Cave Ledge'),
('Pyramid Fairy', 'Pyramid Fairy'),
('East Dark World Hint', 'East Dark World Hint'),
('Palace of Darkness Hint', 'Palace of Darkness Hint'),
('Dark Lake Hylia Shop', 'Dark Lake Hylia Shop'),
('Dark Lake Hylia Fairy', 'Dark Lake Hylia Healer Fairy'),
('Dark Lake Hylia Ledge Fairy', 'Dark Lake Hylia Ledge Healer Fairy'),
('Dark Lake Hylia Ledge Spike Cave', 'Dark Lake Hylia Ledge Spike Cave'),
('Dark Lake Hylia Ledge Hint', 'Dark Lake Hylia Ledge Hint'),
('Hype Cave', 'Hype Cave'),
('Bonk Fairy (Dark)', 'Bonk Fairy (Dark)'),
('Brewery', 'Brewery'),
('C-Shaped House', 'C-Shaped House'),
('Chest Game', 'Chest Game'),
('Dark World Hammer Peg Cave', 'Dark World Hammer Peg Cave'),
('Red Shield Shop', 'Red Shield Shop'),
('Fortune Teller (Dark)', 'Fortune Teller (Dark)'),
('Dark World Shop', 'Village of Outcasts Shop'),
('Dark World Lumberjack Shop', 'Dark World Lumberjack Shop'),
('Dark World Potion Shop', 'Dark World Potion Shop'),
('Archery Game', 'Archery Game'),
('Mire Shed', 'Mire Shed'),
('Dark Desert Hint', 'Dark Desert Hint'),
('Dark Desert Fairy', 'Dark Desert Healer Fairy'),
('Spike Cave', 'Spike Cave'),
('Hookshot Cave', 'Hookshot Cave'),
('Superbunny Cave (Top)', 'Superbunny Cave'),
('Cave Shop (Dark Death Mountain)', 'Cave Shop (Dark Death Mountain)'),
('Superbunny Cave (Bottom)', 'Superbunny Cave'),
('Superbunny Cave Exit (Bottom)', 'Dark Death Mountain (East Bottom)'),
('Hookshot Cave Exit (North)', 'Death Mountain Floating Island (Dark World)'),
('Hookshot Cave Back Entrance', 'Hookshot Cave'),
('Mimic Cave', 'Mimic Cave'),
('Inverted Pyramid Hole', 'Pyramid'),
('Inverted Links House', 'Inverted Links House'),
('Inverted Links House Exit', 'South Dark World'),
('Inverted Big Bomb Shop', 'Inverted Big Bomb Shop'),
('Inverted Dark Sanctuary', 'Inverted Dark Sanctuary'),
('Inverted Dark Sanctuary Exit', 'West Dark World'),
('Old Man Cave (West)', 'Bumper Cave'),
('Old Man Cave (East)', 'Death Mountain Return Cave'),
('Old Man Cave Exit (West)', 'West Dark World'),
('Old Man Cave Exit (East)', 'Dark Death Mountain'),
('Dark Death Mountain Fairy', 'Old Man Cave'),
('Bumper Cave (Bottom)', 'Old Man Cave'),
('Bumper Cave (Top)', 'Dark Death Mountain Healer Fairy'),
('Bumper Cave Exit (Top)', 'Death Mountain Return Ledge'),
('Bumper Cave Exit (Bottom)', 'Light World'),
('Death Mountain Return Cave (West)', 'Bumper Cave'),
('Death Mountain Return Cave (East)', 'Death Mountain Return Cave'),
('Death Mountain Return Cave Exit (West)', 'Death Mountain'),
('Death Mountain Return Cave Exit (East)', 'Death Mountain'),
('Hookshot Cave Exit (South)', 'Dark Death Mountain'),
('Superbunny Cave Exit (Top)', 'Dark Death Mountain'),
('Pyramid Exit', 'Light World'),
('Inverted Pyramid Entrance', 'Bottom of Pyramid')]
# non shuffled dungeons
default_dungeon_connections = [('Desert Palace Entrance (South)', 'Desert Main Lobby'),
('Desert Palace Entrance (West)', 'Desert West Lobby'),
('Desert Palace Entrance (North)', 'Desert Back Lobby'),
('Desert Palace Entrance (East)', 'Desert East Lobby'),
('Desert Palace Exit (South)', 'Desert Palace Stairs'),
('Desert Palace Exit (West)', 'Desert Ledge'),
('Desert Palace Exit (East)', 'Desert Palace Lone Stairs'),
('Desert Palace Exit (North)', 'Desert Palace Entrance (North) Spot'),
('Eastern Palace', 'Eastern Lobby'),
('Eastern Palace Exit', 'Light World'),
('Tower of Hera', 'Hera Lobby'),
('Tower of Hera Exit', 'Death Mountain (Top)'),
('Hyrule Castle Entrance (South)', 'Hyrule Castle Lobby'),
('Hyrule Castle Entrance (West)', 'Hyrule Castle West Lobby'),
('Hyrule Castle Entrance (East)', 'Hyrule Castle East Lobby'),
('Hyrule Castle Exit (South)', 'Hyrule Castle Courtyard'),
('Hyrule Castle Exit (West)', 'Hyrule Castle Ledge'),
('Hyrule Castle Exit (East)', 'Hyrule Castle Ledge'),
('Agahnims Tower', 'Tower Lobby'),
('Agahnims Tower Exit', 'Hyrule Castle Ledge'),
('Thieves Town', 'Thieves Lobby'),
('Thieves Town Exit', 'West Dark World'),
('Skull Woods First Section Hole (East)', 'Skull Pinball'),
('Skull Woods First Section Hole (West)', 'Skull Left Drop'),
('Skull Woods First Section Hole (North)', 'Skull Pot Circle'),
('Skull Woods First Section Door', 'Skull 1 Lobby'),
('Skull Woods First Section Exit', 'Skull Woods Forest'),
('Skull Woods Second Section Hole', 'Skull Back Drop'),
('Skull Woods Second Section Door (East)', 'Skull 2 East Lobby'),
('Skull Woods Second Section Door (West)', 'Skull 2 West Lobby'),
('Skull Woods Second Section Exit (East)', 'Skull Woods Forest'),
('Skull Woods Second Section Exit (West)', 'Skull Woods Forest (West)'),
('Skull Woods Final Section', 'Skull 3 Lobby'),
('Skull Woods Final Section Exit', 'Skull Woods Forest (West)'),
('Ice Palace', 'Ice Lobby'),
('Ice Palace Exit', 'Dark Lake Hylia Central Island'),
('Misery Mire', 'Mire Lobby'),
('Misery Mire Exit', 'Dark Desert'),
('Palace of Darkness', 'PoD Lobby'),
('Palace of Darkness Exit', 'East Dark World'),
('Swamp Palace', 'Swamp Lobby'), # requires additional patch for flooding moat if moved
('Swamp Palace Exit', 'South Dark World'),
('Turtle Rock', 'TR Main Lobby'),
('Turtle Rock Exit (Front)', 'Dark Death Mountain (Top)'),
('Turtle Rock Ledge Exit (West)', 'Dark Death Mountain Ledge'),
('Turtle Rock Ledge Exit (East)', 'Dark Death Mountain Ledge'),
('Dark Death Mountain Ledge (West)', 'TR Lazy Eyes'),
('Dark Death Mountain Ledge (East)', 'TR Big Chest Entrance'),
('Turtle Rock Isolated Ledge Exit', 'Dark Death Mountain Isolated Ledge'),
('Turtle Rock Isolated Ledge Entrance', 'TR Eye Bridge'),
('Ganons Tower', 'GT Lobby'),
('Ganons Tower Exit', 'Dark Death Mountain (Top)')
]
inverted_default_dungeon_connections = [('Desert Palace Entrance (South)', 'Desert Main Lobby'),
('Desert Palace Entrance (West)', 'Desert West Lobby'),
('Desert Palace Entrance (North)', 'Desert Back Lobby'),
('Desert Palace Entrance (East)', 'Desert East Lobby'),
('Desert Palace Exit (South)', 'Desert Palace Stairs'),
('Desert Palace Exit (West)', 'Desert Ledge'),
('Desert Palace Exit (East)', 'Desert Palace Lone Stairs'),
('Desert Palace Exit (North)', 'Desert Palace Entrance (North) Spot'),
('Eastern Palace', 'Eastern Lobby'),
('Eastern Palace Exit', 'Light World'),
('Tower of Hera', 'Hera Lobby'),
('Tower of Hera Exit', 'Death Mountain (Top)'),
('Hyrule Castle Entrance (South)', 'Hyrule Castle Lobby'),
('Hyrule Castle Entrance (West)', 'Hyrule Castle West Lobby'),
('Hyrule Castle Entrance (East)', 'Hyrule Castle East Lobby'),
('Hyrule Castle Exit (South)', 'Light World'),
('Hyrule Castle Exit (West)', 'Hyrule Castle Ledge'),
('Hyrule Castle Exit (East)', 'Hyrule Castle Ledge'),
('Thieves Town', 'Thieves Lobby'),
('Thieves Town Exit', 'West Dark World'),
('Skull Woods First Section Hole (East)', 'Skull Pinball'),
('Skull Woods First Section Hole (West)', 'Skull Left Drop'),
('Skull Woods First Section Hole (North)', 'Skull Pot Circle'),
('Skull Woods First Section Door', 'Skull 1 Lobby'),
('Skull Woods First Section Exit', 'Skull Woods Forest'),
('Skull Woods Second Section Hole', 'Skull Back Drop'),
('Skull Woods Second Section Door (East)', 'Skull 2 East Lobby'),
('Skull Woods Second Section Door (West)', 'Skull 2 West Lobby'),
('Skull Woods Second Section Exit (East)', 'Skull Woods Forest'),
('Skull Woods Second Section Exit (West)', 'Skull Woods Forest (West)'),
('Skull Woods Final Section', 'Skull 3 Lobby'),
('Skull Woods Final Section Exit', 'Skull Woods Forest (West)'),
('Ice Palace', 'Ice Lobby'),
('Misery Mire', 'Mire Lobby'),
('Misery Mire Exit', 'Dark Desert'),
('Palace of Darkness', 'PoD Lobby'),
('Palace of Darkness Exit', 'East Dark World'),
('Swamp Palace', 'Swamp Lobby'), # requires additional patch for flooding moat if moved
('Swamp Palace Exit', 'South Dark World'),
('Turtle Rock', 'TR Main Lobby'),
('Turtle Rock Ledge Exit (West)', 'Dark Death Mountain Ledge'),
('Turtle Rock Ledge Exit (East)', 'Dark Death Mountain Ledge'),
('Dark Death Mountain Ledge (West)', 'TR Lazy Eyes'),
('Dark Death Mountain Ledge (East)', 'TR Big Chest Entrance'),
('Turtle Rock Isolated Ledge Exit', 'Dark Death Mountain Isolated Ledge'),
('Turtle Rock Isolated Ledge Entrance', 'TR Eye Bridge'),
('Inverted Ganons Tower', 'GT Lobby'),
('Inverted Ganons Tower Exit', 'Hyrule Castle Ledge'),
('Inverted Agahnims Tower', 'Tower Lobby'),
('Inverted Agahnims Tower Exit', 'Dark Death Mountain'),
('Turtle Rock Exit (Front)', 'Dark Death Mountain'),
('Ice Palace Exit', 'Dark Lake Hylia')
]
indirect_connections = {
'Turtle Rock (Top)': 'Turtle Rock',
'East Dark World': 'Pyramid Fairy',
'Big Bomb Shop': 'Pyramid Fairy',
'Dark Desert': 'Pyramid Fairy',
'West Dark World': 'Pyramid Fairy',
'South Dark World': 'Pyramid Fairy',
'Light World': 'Pyramid Fairy',
'Old Man Cave': 'Old Man S&Q'
}
# format:
# Key=Name
# addr = (door_index, exitdata) # multiexit
# | ([addr], None) # holes
# exitdata = (room_id, ow_area, vram_loc, scroll_y, scroll_x, link_y, link_x, camera_y, camera_x, unknown_1, unknown_2, door_1, door_2)
# ToDo somehow merge this with creation of the locations
# ToDo somehow merge this with creation of the locations
door_addresses = {'Links House': (0x00, (0x0104, 0x2c, 0x0506, 0x0a9a, 0x0832, 0x0ae8, 0x08b8, 0x0b07, 0x08bf, 0x06, 0xfe, 0x0816, 0x0000)),
'Inverted Big Bomb Shop': (0x00, (0x0104, 0x2c, 0x0506, 0x0a9a, 0x0832, 0x0ae8, 0x08b8, 0x0b07, 0x08bf, 0x06, 0xfe, 0x0816, 0x0000)),
'Desert Palace Entrance (South)': (0x08, (0x0084, 0x30, 0x0314, 0x0c56, 0x00a6, 0x0ca8, 0x0128, 0x0cc3, 0x0133, 0x0a, 0xfa, 0x0000, 0x0000)),
'Desert Palace Entrance (West)': (0x0A, (0x0083, 0x30, 0x0280, 0x0c46, 0x0003, 0x0c98, 0x0088, 0x0cb3, 0x0090, 0x0a, 0xfd, 0x0000, 0x0000)),
'Desert Palace Entrance (North)': (0x0B, (0x0063, 0x30, 0x0016, 0x0c00, 0x00a2, 0x0c28, 0x0128, 0x0c6d, 0x012f, 0x00, 0x0e, 0x0000, 0x0000)),
'Desert Palace Entrance (East)': (0x09, (0x0085, 0x30, 0x02a8, 0x0c4a, 0x0142, 0x0c98, 0x01c8, 0x0cb7, 0x01cf, 0x06, 0xfe, 0x0000, 0x0000)),
'Eastern Palace': (0x07, (0x00c9, 0x1e, 0x005a, 0x0600, 0x0ed6, 0x0618, 0x0f50, 0x066d, 0x0f5b, 0x00, 0xfa, 0x0000, 0x0000)),
'Tower of Hera': (0x32, (0x0077, 0x03, 0x0050, 0x0014, 0x087c, 0x0068, 0x08f0, 0x0083, 0x08fb, 0x0a, 0xf4, 0x0000, 0x0000)),
'Hyrule Castle Entrance (South)': (0x03, (0x0061, 0x1b, 0x0530, 0x0692, 0x0784, 0x06cc, 0x07f8, 0x06ff, 0x0803, 0x0e, 0xfa, 0x0000, 0x87be)),
'Hyrule Castle Entrance (West)': (0x02, (0x0060, 0x1b, 0x0016, 0x0600, 0x06ae, 0x0604, 0x0728, 0x066d, 0x0733, 0x00, 0x02, 0x0000, 0x8124)),
'Hyrule Castle Entrance (East)': (0x04, (0x0062, 0x1b, 0x004a, 0x0600, 0x0856, 0x0604, 0x08c8, 0x066d, 0x08d3, 0x00, 0xfa, 0x0000, 0x8158)),
'Inverted Pyramid Entrance': (0x35, (0x0010, 0x1b, 0x0418, 0x0679, 0x06b4, 0x06c6, 0x0728, 0x06e6, 0x0733, 0x07, 0xf9, 0x0000, 0x0000)),
'Agahnims Tower': (0x23, (0x00e0, 0x1b, 0x0032, 0x0600, 0x0784, 0x0634, 0x07f8, 0x066d, 0x0803, 0x00, 0x0a, 0x0000, 0x82be)),
'Inverted Ganons Tower': (0x23, (0x00e0, 0x1b, 0x0032, 0x0600, 0x0784, 0x0634, 0x07f8, 0x066d, 0x0803, 0x00, 0x0a, 0x0000, 0x82be)),
'Thieves Town': (0x33, (0x00db, 0x58, 0x0b2e, 0x075a, 0x0176, 0x07a8, 0x01f8, 0x07c7, 0x0203, 0x06, 0xfa, 0x0000, 0x0000)),
'Skull Woods First Section Door': (0x29, (0x0058, 0x40, 0x0f4c, 0x01f6, 0x0262, 0x0248, 0x02e8, 0x0263, 0x02ef, 0x0a, 0xfe, 0x0000, 0x0000)),
'Skull Woods Second Section Door (East)': (0x28, (0x0057, 0x40, 0x0eb8, 0x01e6, 0x01c2, 0x0238, 0x0248, 0x0253, 0x024f, 0x0a, 0xfe, 0x0000, 0x0000)),
'Skull Woods Second Section Door (West)': (0x27, (0x0056, 0x40, 0x0c8e, 0x01a6, 0x0062, 0x01f8, 0x00e8, 0x0213, 0x00ef, 0x0a, 0x0e, 0x0000, 0x0000)),
'Skull Woods Final Section': (0x2A, (0x0059, 0x40, 0x0282, 0x0066, 0x0016, 0x00b8, 0x0098, 0x00d3, 0x00a3, 0x0a, 0xfa, 0x0000, 0x0000)),
'Ice Palace': (0x2C, (0x000e, 0x75, 0x0bc6, 0x0d6a, 0x0c3e, 0x0db8, 0x0cb8, 0x0dd7, 0x0cc3, 0x06, 0xf2, 0x0000, 0x0000)),
'Misery Mire': (0x26, (0x0098, 0x70, 0x0414, 0x0c79, 0x00a6, 0x0cc7, 0x0128, 0x0ce6, 0x0133, 0x07, 0xfa, 0x0000, 0x0000)),
'Palace of Darkness': (0x25, (0x004a, 0x5e, 0x005a, 0x0600, 0x0ed6, 0x0628, 0x0f50, 0x066d, 0x0f5b, 0x00, 0xfa, 0x0000, 0x0000)),
'Swamp Palace': (0x24, (0x0028, 0x7b, 0x049e, 0x0e8c, 0x06f2, 0x0ed8, 0x0778, 0x0ef9, 0x077f, 0x04, 0xfe, 0x0000, 0x0000)),
'Turtle Rock': (0x34, (0x00d6, 0x47, 0x0712, 0x00da, 0x0e96, 0x0128, 0x0f08, 0x0147, 0x0f13, 0x06, 0xfa, 0x0000, 0x0000)),
'Dark Death Mountain Ledge (West)': (0x14, (0x0023, 0x45, 0x07ca, 0x0103, 0x0c46, 0x0157, 0x0cb8, 0x0172, 0x0cc3, 0x0b, 0x0a, 0x0000, 0x0000)),
'Dark Death Mountain Ledge (East)': (0x18, (0x0024, 0x45, 0x07e0, 0x0103, 0x0d00, 0x0157, 0x0d78, 0x0172, 0x0d7d, 0x0b, 0x00, 0x0000, 0x0000)),
'Turtle Rock Isolated Ledge Entrance': (0x17, (0x00d5, 0x45, 0x0ad4, 0x0164, 0x0ca6, 0x01b8, 0x0d18, 0x01d3, 0x0d23, 0x0a, 0xfa, 0x0000, 0x0000)),
'Hyrule Castle Secret Entrance Stairs': (0x31, (0x0055, 0x1b, 0x044a, 0x067a, 0x0854, 0x06c8, 0x08c8, 0x06e7, 0x08d3, 0x06, 0xfa, 0x0000, 0x0000)),
'Kakariko Well Cave': (0x38, (0x002f, 0x18, 0x0386, 0x0665, 0x0032, 0x06b7, 0x00b8, 0x06d2, 0x00bf, 0x0b, 0xfe, 0x0000, 0x0000)),
'Bat Cave Cave': (0x10, (0x00e3, 0x22, 0x0412, 0x087a, 0x048e, 0x08c8, 0x0508, 0x08e7, 0x0513, 0x06, 0x02, 0x0000, 0x0000)),
'Elder House (East)': (0x0D, (0x00f3, 0x18, 0x02c4, 0x064a, 0x0222, 0x0698, 0x02a8, 0x06b7, 0x02af, 0x06, 0xfe, 0x05d4, 0x0000)),
'Elder House (West)': (0x0C, (0x00f2, 0x18, 0x02bc, 0x064c, 0x01e2, 0x0698, 0x0268, 0x06b9, 0x026f, 0x04, 0xfe, 0x05cc, 0x0000)),
'North Fairy Cave': (0x37, (0x0008, 0x15, 0x0088, 0x0400, 0x0a36, 0x0448, 0x0aa8, 0x046f, 0x0ab3, 0x00, 0x0a, 0x0000, 0x0000)),
'Lost Woods Hideout Stump': (0x2B, (0x00e1, 0x00, 0x0f4e, 0x01f6, 0x0262, 0x0248, 0x02e8, 0x0263, 0x02ef, 0x0a, 0x0e, 0x0000, 0x0000)),
'Lumberjack Tree Cave': (0x11, (0x00e2, 0x02, 0x0118, 0x0015, 0x04c6, 0x0067, 0x0548, 0x0082, 0x0553, 0x0b, 0xfa, 0x0000, 0x0000)),
'Two Brothers House (East)': (0x0F, (0x00f5, 0x29, 0x0880, 0x0b07, 0x0200, 0x0b58, 0x0238, 0x0b74, 0x028d, 0x09, 0x00, 0x0b86, 0x0000)),
'Two Brothers House (West)': (0x0E, (0x00f4, 0x28, 0x08a0, 0x0b06, 0x0100, 0x0b58, 0x01b8, 0x0b73, 0x018d, 0x0a, 0x00, 0x0bb6, 0x0000)),
'Sanctuary': (0x01, (0x0012, 0x13, 0x001c, 0x0400, 0x06de, 0x0414, 0x0758, 0x046d, 0x0763, 0x00, 0x02, 0x0000, 0x01aa)),
'Old Man Cave (West)': (0x05, (0x00f0, 0x0a, 0x03a0, 0x0264, 0x0500, 0x02b8, 0x05a8, 0x02d3, 0x058d, 0x0a, 0x00, 0x0000, 0x0000)),
'Old Man Cave (East)': (0x06, (0x00f1, 0x03, 0x1402, 0x0294, 0x0604, 0x02e8, 0x0678, 0x0303, 0x0683, 0x0a, 0xfc, 0x0000, 0x0000)),
'Old Man House (Bottom)': (0x2F, (0x00e4, 0x03, 0x181a, 0x031e, 0x06b4, 0x03a7, 0x0728, 0x038d, 0x0733, 0x00, 0x0c, 0x0000, 0x0000)),
'Old Man House (Top)': (0x30, (0x00e5, 0x03, 0x10c6, 0x0224, 0x0814, 0x0278, 0x0888, 0x0293, 0x0893, 0x0a, 0x0c, 0x0000, 0x0000)),
'Death Mountain Return Cave (East)': (0x2E, (0x00e7, 0x03, 0x0d82, 0x01c4, 0x0600, 0x0218, 0x0648, 0x0233, 0x067f, 0x0a, 0x00, 0x0000, 0x0000)),
'Death Mountain Return Cave (West)': (0x2D, (0x00e6, 0x0a, 0x00a0, 0x0205, 0x0500, 0x0257, 0x05b8, 0x0272, 0x058d, 0x0b, 0x00, 0x0000, 0x0000)),
'Spectacle Rock Cave Peak': (0x22, (0x00ea, 0x03, 0x092c, 0x0133, 0x0754, 0x0187, 0x07c8, 0x01a2, 0x07d3, 0x0b, 0xfc, 0x0000, 0x0000)),
'Spectacle Rock Cave': (0x21, (0x00fa, 0x03, 0x0eac, 0x01e3, 0x0754, 0x0237, 0x07c8, 0x0252, 0x07d3, 0x0b, 0xfc, 0x0000, 0x0000)),
'Spectacle Rock Cave (Bottom)': (0x20, (0x00f9, 0x03, 0x0d9c, 0x01c3, 0x06d4, 0x0217, 0x0748, 0x0232, 0x0753, 0x0b, 0xfc, 0x0000, 0x0000)),
'Paradox Cave (Bottom)': (0x1D, (0x00ff, 0x05, 0x0ee0, 0x01e3, 0x0d00, 0x0237, 0x0da8, 0x0252, 0x0d7d, 0x0b, 0x00, 0x0000, 0x0000)),
'Paradox Cave (Middle)': (0x1E, (0x00ef, 0x05, 0x17e0, 0x0304, 0x0d00, 0x0358, 0x0dc8, 0x0373, 0x0d7d, 0x0a, 0x00, 0x0000, 0x0000)),
'Paradox Cave (Top)': (0x1F, (0x00df, 0x05, 0x0460, 0x0093, 0x0d00, 0x00e7, 0x0db8, 0x0102, 0x0d7d, 0x0b, 0x00, 0x0000, 0x0000)),
'Fairy Ascension Cave (Bottom)': (0x19, (0x00fd, 0x05, 0x0dd4, 0x01c4, 0x0ca6, 0x0218, 0x0d18, 0x0233, 0x0d23, 0x0a, 0xfa, 0x0000, 0x0000)),
'Fairy Ascension Cave (Top)': (0x1A, (0x00ed, 0x05, 0x0ad4, 0x0163, 0x0ca6, 0x01b7, 0x0d18, 0x01d2, 0x0d23, 0x0b, 0xfa, 0x0000, 0x0000)),
'Spiral Cave': (0x1C, (0x00ee, 0x05, 0x07c8, 0x0108, 0x0c46, 0x0158, 0x0cb8, 0x0177, 0x0cc3, 0x06, 0xfa, 0x0000, 0x0000)),
'Spiral Cave (Bottom)': (0x1B, (0x00fe, 0x05, 0x0cca, 0x01a3, 0x0c56, 0x01f7, 0x0cc8, 0x0212, 0x0cd3, 0x0b, 0xfa, 0x0000, 0x0000)),
'Bumper Cave (Bottom)': (0x15, (0x00fb, 0x4a, 0x03a0, 0x0263, 0x0500, 0x02b7, 0x05a8, 0x02d2, 0x058d, 0x0b, 0x00, 0x0000, 0x0000)),
'Bumper Cave (Top)': (0x16, (0x00eb, 0x4a, 0x00a0, 0x020a, 0x0500, 0x0258, 0x05b8, 0x0277, 0x058d, 0x06, 0x00, 0x0000, 0x0000)),
'Superbunny Cave (Top)': (0x13, (0x00e8, 0x45, 0x0460, 0x0093, 0x0d00, 0x00e7, 0x0db8, 0x0102, 0x0d7d, 0x0b, 0x00, 0x0000, 0x0000)),
'Superbunny Cave (Bottom)': (0x12, (0x00f8, 0x45, 0x0ee0, 0x01e4, 0x0d00, 0x0238, 0x0d78, 0x0253, 0x0d7d, 0x0a, 0x00, 0x0000, 0x0000)),
'Hookshot Cave': (0x39, (0x003c, 0x45, 0x04da, 0x00a3, 0x0cd6, 0x0107, 0x0d48, 0x0112, 0x0d53, 0x0b, 0xfa, 0x0000, 0x0000)),
'Hookshot Cave Back Entrance': (0x3A, (0x002c, 0x45, 0x004c, 0x0000, 0x0c56, 0x0038, 0x0cc8, 0x006f, 0x0cd3, 0x00, 0x0a, 0x0000, 0x0000)),
'Ganons Tower': (0x36, (0x000c, 0x43, 0x0052, 0x0000, 0x0884, 0x0028, 0x08f8, 0x006f, 0x0903, 0x00, 0xfc, 0x0000, 0x0000)),
'Inverted Agahnims Tower': (0x36, (0x000c, 0x43, 0x0052, 0x0000, 0x0884, 0x0028, 0x08f8, 0x006f, 0x0903, 0x00, 0xfc, 0x0000, 0x0000)),
'Pyramid Entrance': (0x35, (0x0010, 0x5b, 0x0b0e, 0x075a, 0x0674, 0x07a8, 0x06e8, 0x07c7, 0x06f3, 0x06, 0xfa, 0x0000, 0x0000)),
'Skull Woods First Section Hole (West)': ([0xDB84D, 0xDB84E], None),
'Skull Woods First Section Hole (East)': ([0xDB84F, 0xDB850], None),
'Skull Woods First Section Hole (North)': ([0xDB84C], None),
'Skull Woods Second Section Hole': ([0xDB851, 0xDB852], None),
'Pyramid Hole': ([0xDB854, 0xDB855, 0xDB856], None),
'Inverted Pyramid Hole': ([0xDB854, 0xDB855, 0xDB856, 0x180340], None),
'Waterfall of Wishing': (0x5B, (0x0114, 0x0f, 0x0080, 0x0200, 0x0e00, 0x0207, 0x0e60, 0x026f, 0x0e7d, 0x00, 0x00, 0x0000, 0x0000)),
'Dam': (0x4D, (0x010b, 0x3b, 0x04a0, 0x0e8a, 0x06fa, 0x0ed8, 0x0778, 0x0ef7, 0x077f, 0x06, 0xfa, 0x0000, 0x0000)),
'Blinds Hideout': (0x60, (0x0119, 0x18, 0x02b2, 0x064a, 0x0186, 0x0697, 0x0208, 0x06b7, 0x0213, 0x06, 0xfa, 0x0000, 0x0000)),
'Hyrule Castle Secret Entrance Drop': ([0xDB858], None),
'Bonk Fairy (Light)': (0x76, (0x0126, 0x2b, 0x00a0, 0x0a0a, 0x0700, 0x0a67, 0x0788, 0x0a77, 0x0785, 0x06, 0xfa, 0x0000, 0x0000)),
'Lake Hylia Fairy': (0x5D, (0x0115, 0x2e, 0x0016, 0x0a00, 0x0cb6, 0x0a37, 0x0d28, 0x0a6d, 0x0d33, 0x00, 0x00, 0x0000, 0x0000)),
'Light Hype Fairy': (0x6B, (0x0115, 0x34, 0x00a0, 0x0c04, 0x0900, 0x0c58, 0x0988, 0x0c73, 0x0985, 0x0a, 0xf6, 0x0000, 0x0000)),
'Desert Fairy': (0x71, (0x0115, 0x3a, 0x0000, 0x0e00, 0x0400, 0x0e26, 0x0468, 0x0e6d, 0x0485, 0x00, 0x00, 0x0000, 0x0000)),
'Kings Grave': (0x5A, (0x0113, 0x14, 0x0320, 0x0456, 0x0900, 0x04a6, 0x0998, 0x04c3, 0x097d, 0x0a, 0xf6, 0x0000, 0x0000)),
'Tavern North': (0x42, (0x0103, 0x18, 0x1440, 0x08a7, 0x0206, 0x08f9, 0x0288, 0x0914, 0x0293, 0xf7, 0x09, 0xFFFF, 0x0000)), # do not use, buggy
'Chicken House': (0x4A, (0x0108, 0x18, 0x1120, 0x0837, 0x0106, 0x0888, 0x0188, 0x08a4, 0x0193, 0x07, 0xf9, 0x1530, 0x0000)),
'Aginahs Cave': (0x70, (0x010a, 0x30, 0x0656, 0x0cc6, 0x02aa, 0x0d18, 0x0328, 0x0d33, 0x032f, 0x08, 0xf8, 0x0000, 0x0000)),
'Sahasrahlas Hut': (0x44, (0x0105, 0x1e, 0x0610, 0x06d4, 0x0c76, 0x0727, 0x0cf0, 0x0743, 0x0cfb, 0x0a, 0xf6, 0x0000, 0x0000)),
'Cave Shop (Lake Hylia)': (0x57, (0x0112, 0x35, 0x0022, 0x0c00, 0x0b1a, 0x0c26, 0x0b98, 0x0c6d, 0x0b9f, 0x00, 0x00, 0x0000, 0x0000)),
'Capacity Upgrade': (0x5C, (0x0115, 0x35, 0x0a46, 0x0d36, 0x0c2a, 0x0d88, 0x0ca8, 0x0da3, 0x0caf, 0x0a, 0xf6, 0x0000, 0x0000)),
'Kakariko Well Drop': ([0xDB85C, 0xDB85D], None),
'Blacksmiths Hut': (0x63, (0x0121, 0x22, 0x010c, 0x081a, 0x0466, 0x0868, 0x04d8, 0x0887, 0x04e3, 0x06, 0xfa, 0x041A, 0x0000)),
'Bat Cave Drop': ([0xDB859, 0xDB85A], None),
'Sick Kids House': (0x3F, (0x0102, 0x18, 0x10be, 0x0826, 0x01f6, 0x0877, 0x0278, 0x0893, 0x0283, 0x08, 0xf8, 0x14CE, 0x0000)),
'North Fairy Cave Drop': ([0xDB857], None),
'Lost Woods Gamble': (0x3B, (0x0100, 0x00, 0x004e, 0x0000, 0x0272, 0x0008, 0x02f0, 0x006f, 0x02f7, 0x00, 0x00, 0x0000, 0x0000)),
'Fortune Teller (Light)': (0x64, (0x0122, 0x11, 0x060e, 0x04b4, 0x027d, 0x0508, 0x02f8, 0x0523, 0x0302, 0x0a, 0xf6, 0x0000, 0x0000)),
'Snitch Lady (East)': (0x3D, (0x0101, 0x18, 0x0ad8, 0x074a, 0x02c6, 0x0798, 0x0348, 0x07b7, 0x0353, 0x06, 0xfa, 0x0DE8, 0x0000)),
'Snitch Lady (West)': (0x3E, (0x0101, 0x18, 0x0788, 0x0706, 0x0046, 0x0758, 0x00c8, 0x0773, 0x00d3, 0x08, 0xf8, 0x0B98, 0x0000)),
'Bush Covered House': (0x43, (0x0103, 0x18, 0x1156, 0x081a, 0x02b6, 0x0868, 0x0338, 0x0887, 0x0343, 0x06, 0xfa, 0x1466, 0x0000)),
'Tavern (Front)': (0x41, (0x0103, 0x18, 0x1842, 0x0916, 0x0206, 0x0967, 0x0288, 0x0983, 0x0293, 0x08, 0xf8, 0x1C50, 0x0000)),
'Light World Bomb Hut': (0x49, (0x0107, 0x18, 0x1800, 0x0916, 0x0000, 0x0967, 0x0068, 0x0983, 0x008d, 0x08, 0xf8, 0x9C0C, 0x0000)),
'Kakariko Shop': (0x45, (0x011f, 0x18, 0x16a8, 0x08e7, 0x0136, 0x0937, 0x01b8, 0x0954, 0x01c3, 0x07, 0xf9, 0x1AB6, 0x0000)),
'Lost Woods Hideout Drop': ([0xDB853], None),
'Lumberjack Tree Tree': ([0xDB85B], None),
'Cave 45': (0x50, (0x011b, 0x32, 0x0680, 0x0cc9, 0x0400, 0x0d16, 0x0438, 0x0d36, 0x0485, 0x07, 0xf9, 0x0000, 0x0000)),
'Graveyard Cave': (0x51, (0x011b, 0x14, 0x0016, 0x0400, 0x08a2, 0x0446, 0x0918, 0x046d, 0x091f, 0x00, 0x00, 0x0000, 0x0000)),
'Checkerboard Cave': (0x7D, (0x0126, 0x30, 0x00c8, 0x0c0a, 0x024a, 0x0c67, 0x02c8, 0x0c77, 0x02cf, 0x06, 0xfa, 0x0000, 0x0000)),
'Mini Moldorm Cave': (0x7C, (0x0123, 0x35, 0x1480, 0x0e96, 0x0a00, 0x0ee8, 0x0a68, 0x0f03, 0x0a85, 0x08, 0xf8, 0x0000, 0x0000)),
'Long Fairy Cave': (0x54, (0x011e, 0x2f, 0x06a0, 0x0aca, 0x0f00, 0x0b18, 0x0fa8, 0x0b37, 0x0f85, 0x06, 0xfa, 0x0000, 0x0000)),
'Good Bee Cave': (0x6A, (0x0120, 0x37, 0x0084, 0x0c00, 0x0e26, 0x0c36, 0x0e98, 0x0c6f, 0x0ea3, 0x00, 0x00, 0x0000, 0x0000)),
'20 Rupee Cave': (0x7A, (0x0125, 0x37, 0x0200, 0x0c23, 0x0e00, 0x0c86, 0x0e68, 0x0c92, 0x0e7d, 0x0d, 0xf3, 0x0000, 0x0000)),
'50 Rupee Cave': (0x78, (0x0124, 0x3a, 0x0790, 0x0eea, 0x047a, 0x0f47, 0x04f8, 0x0f57, 0x04ff, 0x06, 0xfa, 0x0000, 0x0000)),
'Ice Rod Cave': (0x7F, (0x0120, 0x37, 0x0080, 0x0c00, 0x0e00, 0x0c37, 0x0e48, 0x0c6f, 0x0e7d, 0x00, 0x00, 0x0000, 0x0000)),
'Bonk Rock Cave': (0x79, (0x0124, 0x13, 0x0280, 0x044a, 0x0600, 0x04a7, 0x0638, 0x04b7, 0x067d, 0x06, 0xfa, 0x0000, 0x0000)),
'Library': (0x48, (0x0107, 0x29, 0x0100, 0x0a14, 0x0200, 0x0a67, 0x0278, 0x0a83, 0x0285, 0x0a, 0xf6, 0x040E, 0x0000)),
'Potion Shop': (0x4B, (0x0109, 0x16, 0x070a, 0x04e6, 0x0c56, 0x0538, 0x0cc8, 0x0553, 0x0cd3, 0x08, 0xf8, 0x0A98, 0x0000)),
'Sanctuary Grave': ([0xDB85E], None),
'Hookshot Fairy': (0x4F, (0x010c, 0x05, 0x0ee0, 0x01e3, 0x0d00, 0x0236, 0x0d78, 0x0252, 0x0d7d, 0x0b, 0xf5, 0x0000, 0x0000)),
'Pyramid Fairy': (0x62, (0x0116, 0x5b, 0x0b1e, 0x0754, 0x06fa, 0x07a7, 0x0778, 0x07c3, 0x077f, 0x0a, 0xf6, 0x0000, 0x0000)),
'East Dark World Hint': (0x68, (0x010e, 0x6f, 0x06a0, 0x0aca, 0x0f00, 0x0b18, 0x0fa8, 0x0b37, 0x0f85, 0x06, 0xfa, 0x0000, 0x0000)),
'Palace of Darkness Hint': (0x67, (0x011a, 0x5e, 0x0c24, 0x0794, 0x0d12, 0x07e8, 0x0d90, 0x0803, 0x0d97, 0x0a, 0xf6, 0x0000, 0x0000)),
'Dark Lake Hylia Fairy': (0x6C, (0x0115, 0x6e, 0x0016, 0x0a00, 0x0cb6, 0x0a36, 0x0d28, 0x0a6d, 0x0d33, 0x00, 0x00, 0x0000, 0x0000)),
'Dark Lake Hylia Ledge Fairy': (0x80, (0x0115, 0x77, 0x0080, 0x0c00, 0x0e00, 0x0c37, 0x0e48, 0x0c6f, 0x0e7d, 0x00, 0x00, 0x0000, 0x0000)),
'Dark Lake Hylia Ledge Spike Cave': (0x7B, (0x0125, 0x77, 0x0200, 0x0c27, 0x0e00, 0x0c86, 0x0e68, 0x0c96, 0x0e7d, 0x09, 0xf7, 0x0000, 0x0000)),
'Dark Lake Hylia Ledge Hint': (0x69, (0x010e, 0x77, 0x0084, 0x0c00, 0x0e26, 0x0c36, 0x0e98, 0x0c6f, 0x0ea3, 0x00, 0x00, 0x0000, 0x0000)),
'Hype Cave': (0x3C, (0x011e, 0x74, 0x00a0, 0x0c0a, 0x0900, 0x0c58, 0x0988, 0x0c77, 0x097d, 0x06, 0xfa, 0x0000, 0x0000)),
'Bonk Fairy (Dark)': (0x77, (0x0126, 0x6b, 0x00a0, 0x0a05, 0x0700, 0x0a66, 0x0788, 0x0a72, 0x0785, 0x0b, 0xf5, 0x0000, 0x0000)),
'Brewery': (0x47, (0x0106, 0x58, 0x16a8, 0x08e4, 0x013e, 0x0938, 0x01b8, 0x0953, 0x01c3, 0x0a, 0xf6, 0x1AB6, 0x0000)),
'C-Shaped House': (0x53, (0x011c, 0x58, 0x09d8, 0x0744, 0x02ce, 0x0797, 0x0348, 0x07b3, 0x0353, 0x0a, 0xf6, 0x0DE8, 0x0000)),
'Chest Game': (0x46, (0x0106, 0x58, 0x078a, 0x0705, 0x004e, 0x0758, 0x00c8, 0x0774, 0x00d3, 0x09, 0xf7, 0x0B98, 0x0000)),
'Dark World Hammer Peg Cave': (0x7E, (0x0127, 0x62, 0x0894, 0x091e, 0x0492, 0x09a6, 0x0508, 0x098b, 0x050f, 0x00, 0x00, 0x0000, 0x0000)),
'Red Shield Shop': (0x74, (0x0110, 0x5a, 0x079a, 0x06e8, 0x04d6, 0x0738, 0x0548, 0x0755, 0x0553, 0x08, 0xf8, 0x0AA8, 0x0000)),
'Dark Sanctuary Hint': (0x59, (0x0112, 0x53, 0x001e, 0x0400, 0x06e2, 0x0446, 0x0758, 0x046d, 0x075f, 0x00, 0x00, 0x0000, 0x0000)),
'Inverted Dark Sanctuary': (0x59, (0x0112, 0x53, 0x001e, 0x0400, 0x06e2, 0x0446, 0x0758, 0x046d, 0x075f, 0x00, 0x00, 0x0000, 0x0000)),
'Fortune Teller (Dark)': (0x65, (0x0122, 0x51, 0x0610, 0x04b4, 0x027e, 0x0507, 0x02f8, 0x0523, 0x0303, 0x0a, 0xf6, 0x091E, 0x0000)),
'Dark World Shop': (0x5F, (0x010f, 0x58, 0x1058, 0x0814, 0x02be, 0x0868, 0x0338, 0x0883, 0x0343, 0x0a, 0xf6, 0x0000, 0x0000)),
'Dark World Lumberjack Shop': (0x56, (0x010f, 0x42, 0x041c, 0x0074, 0x04e2, 0x00c7, 0x0558, 0x00e3, 0x055f, 0x0a, 0xf6, 0x0000, 0x0000)),
'Dark World Potion Shop': (0x6E, (0x010f, 0x56, 0x080e, 0x04f4, 0x0c66, 0x0548, 0x0cd8, 0x0563, 0x0ce3, 0x0a, 0xf6, 0x0000, 0x0000)),
'Archery Game': (0x58, (0x0111, 0x69, 0x069e, 0x0ac4, 0x02ea, 0x0b18, 0x0368, 0x0b33, 0x036f, 0x0a, 0xf6, 0x09AC, 0x0000)),
'Mire Shed': (0x5E, (0x010d, 0x70, 0x0384, 0x0c69, 0x001e, 0x0cb6, 0x0098, 0x0cd6, 0x00a3, 0x07, 0xf9, 0x0000, 0x0000)),
'Dark Desert Hint': (0x61, (0x0114, 0x70, 0x0654, 0x0cc5, 0x02aa, 0x0d16, 0x0328, 0x0d32, 0x032f, 0x09, 0xf7, 0x0000, 0x0000)),
'Dark Desert Fairy': (0x55, (0x0115, 0x70, 0x03a8, 0x0c6a, 0x013a, 0x0cb7, 0x01b8, 0x0cd7, 0x01bf, 0x06, 0xfa, 0x0000, 0x0000)),
'Spike Cave': (0x40, (0x0117, 0x43, 0x0ed4, 0x01e4, 0x08aa, 0x0236, 0x0928, 0x0253, 0x092f, 0x0a, 0xf6, 0x0000, 0x0000)),
'Cave Shop (Dark Death Mountain)': (0x6D, (0x0112, 0x45, 0x0ee0, 0x01e3, 0x0d00, 0x0236, 0x0daa, 0x0252, 0x0d7d, 0x0b, 0xf5, 0x0000, 0x0000)),
'Dark Death Mountain Fairy': (0x6F, (0x0115, 0x43, 0x1400, 0x0294, 0x0600, 0x02e8, 0x0678, 0x0303, 0x0685, 0x0a, 0xf6, 0x0000, 0x0000)),
'Mimic Cave': (0x4E, (0x010c, 0x05, 0x07e0, 0x0103, 0x0d00, 0x0156, 0x0d78, 0x0172, 0x0d7d, 0x0b, 0xf5, 0x0000, 0x0000)),
'Big Bomb Shop': (0x52, (0x011c, 0x6c, 0x0506, 0x0a9a, 0x0832, 0x0ae7, 0x08b8, 0x0b07, 0x08bf, 0x06, 0xfa, 0x0816, 0x0000)),
'Inverted Links House': (0x52, (0x011c, 0x6c, 0x0506, 0x0a9a, 0x0832, 0x0ae7, 0x08b8, 0x0b07, 0x08bf, 0x06, 0xfa, 0x0816, 0x0000)),
'Dark Lake Hylia Shop': (0x73, (0x010f, 0x75, 0x0380, 0x0c6a, 0x0a00, 0x0cb8, 0x0a58, 0x0cd7, 0x0a85, 0x06, 0xfa, 0x0000, 0x0000)),
'Lumberjack House': (0x75, (0x011f, 0x02, 0x049c, 0x0088, 0x04e6, 0x00d8, 0x0558, 0x00f7, 0x0563, 0x08, 0xf8, 0x07AA, 0x0000)),
'Lake Hylia Fortune Teller': (0x72, (0x0122, 0x35, 0x0380, 0x0c6a, 0x0a00, 0x0cb8, 0x0a58, 0x0cd7, 0x0a85, 0x06, 0xfa, 0x0000, 0x0000)),
'Kakariko Gamble Game': (0x66, (0x0118, 0x29, 0x069e, 0x0ac4, 0x02ea, 0x0b18, 0x0368, 0x0b33, 0x036f, 0x0a, 0xf6, 0x09AC, 0x0000))}
# format:
# Key=Name
# value = entrance #
# | (entrance #, exit #)
exit_ids = {'Links House Exit': (0x01, 0x00),
'Inverted Links House Exit': (0x01, 0x00),
'Chris Houlihan Room Exit': (None, 0x3D),
'Desert Palace Exit (South)': (0x09, 0x0A),
'Desert Palace Exit (West)': (0x0B, 0x0C),
'Desert Palace Exit (East)': (0x0A, 0x0B),
'Desert Palace Exit (North)': (0x0C, 0x0D),
'Eastern Palace Exit': (0x08, 0x09),
'Tower of Hera Exit': (0x33, 0x2D),
'Hyrule Castle Exit (South)': (0x04, 0x03),
'Hyrule Castle Exit (West)': (0x03, 0x02),
'Hyrule Castle Exit (East)': (0x05, 0x04),
'Agahnims Tower Exit': (0x24, 0x25),
'Inverted Agahnims Tower Exit': (0x24, 0x25),
'Thieves Town Exit': (0x34, 0x35),
'Skull Woods First Section Exit': (0x2A, 0x2B),
'Skull Woods Second Section Exit (East)': (0x29, 0x2A),
'Skull Woods Second Section Exit (West)': (0x28, 0x29),
'Skull Woods Final Section Exit': (0x2B, 0x2C),
'Ice Palace Exit': (0x2D, 0x2E),
'Misery Mire Exit': (0x27, 0x28),
'Palace of Darkness Exit': (0x26, 0x27),
'Swamp Palace Exit': (0x25, 0x26),
'Turtle Rock Exit (Front)': (0x35, 0x34),
'Turtle Rock Ledge Exit (West)': (0x15, 0x16),
'Turtle Rock Ledge Exit (East)': (0x19, 0x1A),
'Turtle Rock Isolated Ledge Exit': (0x18, 0x19),
'Hyrule Castle Secret Entrance Exit': (0x32, 0x33),
'Kakariko Well Exit': (0x39, 0x3A),
'Bat Cave Exit': (0x11, 0x12),
'Elder House Exit (East)': (0x0E, 0x0F),
'Elder House Exit (West)': (0x0D, 0x0E),
'North Fairy Cave Exit': (0x38, 0x39),
'Lost Woods Hideout Exit': (0x2C, 0x36),
'Lumberjack Tree Exit': (0x12, 0x13),
'Two Brothers House Exit (East)': (0x10, 0x11),
'Two Brothers House Exit (West)': (0x0F, 0x10),
'Sanctuary Exit': (0x02, 0x01),
'Old Man Cave Exit (East)': (0x07, 0x08),
'Old Man Cave Exit (West)': (0x06, 0x07),
'Old Man House Exit (Bottom)': (0x30, 0x31),
'Old Man House Exit (Top)': (0x31, 0x32),
'Death Mountain Return Cave Exit (West)': (0x2E, 0x2F),
'Death Mountain Return Cave Exit (East)': (0x2F, 0x30),
'Spectacle Rock Cave Exit': (0x21, 0x22),
'Spectacle Rock Cave Exit (Top)': (0x22, 0x23),
'Spectacle Rock Cave Exit (Peak)': (0x23, 0x24),
'Paradox Cave Exit (Bottom)': (0x1E, 0x1F),
'Paradox Cave Exit (Middle)': (0x1F, 0x20),
'Paradox Cave Exit (Top)': (0x20, 0x21),
'Fairy Ascension Cave Exit (Bottom)': (0x1A, 0x1B),
'Fairy Ascension Cave Exit (Top)': (0x1B, 0x1C),
'Spiral Cave Exit': (0x1C, 0x1D),
'Spiral Cave Exit (Top)': (0x1D, 0x1E),
'Bumper Cave Exit (Top)': (0x17, 0x18),
'Bumper Cave Exit (Bottom)': (0x16, 0x17),
'Superbunny Cave Exit (Top)': (0x14, 0x15),
'Superbunny Cave Exit (Bottom)': (0x13, 0x14),
'Hookshot Cave Exit (South)': (0x3A, 0x3B),
'Hookshot Cave Exit (North)': (0x3B, 0x3C),
'Ganons Tower Exit': (0x37, 0x38),
'Inverted Ganons Tower Exit': (0x37, 0x38),
'Pyramid Exit': (0x36, 0x37),
'Waterfall of Wishing': 0x5C,
'Dam': 0x4E,
'Blinds Hideout': 0x61,
'Lumberjack House': 0x6B,
'Bonk Fairy (Light)': 0x71,
'Bonk Fairy (Dark)': 0x71,
'Lake Hylia Healer Fairy': 0x5E,
'Swamp Healer Fairy': 0x5E,
'Desert Healer Fairy': 0x5E,
'Dark Lake Hylia Healer Fairy': 0x5E,
'Dark Lake Hylia Ledge Healer Fairy': 0x5E,
'Dark Desert Healer Fairy': 0x5E,
'Dark Death Mountain Healer Fairy': 0x5E,
'Fortune Teller (Light)': 0x65,
'Lake Hylia Fortune Teller': 0x65,
'Kings Grave': 0x5B,
'Tavern': 0x43,
'Chicken House': 0x4B,
'Aginahs Cave': 0x4D,
'Sahasrahlas Hut': 0x45,
'Cave Shop (Lake Hylia)': 0x58,
'Cave Shop (Dark Death Mountain)': 0x58,
'Capacity Upgrade': 0x5D,
'Blacksmiths Hut': 0x64,
'Sick Kids House': 0x40,
'Lost Woods Gamble': 0x3C,
'Snitch Lady (East)': 0x3E,
'Snitch Lady (West)': 0x3F,
'Bush Covered House': 0x44,
'Tavern (Front)': 0x42,
'Light World Bomb Hut': 0x4A,
'Kakariko Shop': 0x46,
'Cave 45': 0x51,
'Graveyard Cave': 0x52,
'Checkerboard Cave': 0x72,
'Mini Moldorm Cave': 0x6C,
'Long Fairy Cave': 0x55,
'Good Bee Cave': 0x56,
'20 Rupee Cave': 0x6F,
'50 Rupee Cave': 0x6D,
'Ice Rod Cave': 0x84,
'Bonk Rock Cave': 0x6E,
'Library': 0x49,
'Kakariko Gamble Game': 0x67,
'Potion Shop': 0x4C,
'Hookshot Fairy': 0x50,
'Pyramid Fairy': 0x63,
'East Dark World Hint': 0x69,
'Palace of Darkness Hint': 0x68,
'Big Bomb Shop': 0x53,
'Inverted Big Bomb Shop': 0x53,
'Village of Outcasts Shop': 0x60,
'Dark Lake Hylia Shop': 0x60,
'Dark World Lumberjack Shop': 0x60,
'Dark World Potion Shop': 0x60,
'Dark Lake Hylia Ledge Spike Cave': 0x70,
'Dark Lake Hylia Ledge Hint': 0x6A,
'Hype Cave': 0x3D,
'Brewery': 0x48,
'C-Shaped House': 0x54,
'Chest Game': 0x47,
'Dark World Hammer Peg Cave': 0x83,
'Red Shield Shop': 0x57,
'Dark Sanctuary Hint': 0x5A,
'Inverted Dark Sanctuary': 0x5A,
'Fortune Teller (Dark)': 0x66,
'Archery Game': 0x59,
'Mire Shed': 0x5F,
'Dark Desert Hint': 0x62,
'Spike Cave': 0x41,
'Mimic Cave': 0x4F,
'Kakariko Well (top)': 0x80,
'Hyrule Castle Secret Entrance': 0x7D,
'Bat Cave (right)': 0x7E,
'North Fairy Cave': 0x7C,
'Lost Woods Hideout (top)': 0x7A,
'Lumberjack Tree (top)': 0x7F,
'Sewer Drop': 0x81,
'Skull Back Drop': 0x79,
'Skull Left Drop': 0x77,
'Skull Pinball': 0x78,
'Skull Pot Circle': 0x76,
'Pyramid': 0x7B}
| 59.117923 | 457 | 0.56081 | [
"MIT"
] | Nathan-Carlson/ALttPDoorRandomizer | EntranceShuffle.py | 226,599 | Python |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova floating ips.
"""
import logging
from django import template
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django import shortcuts
from django.utils.translation import ugettext as _
from django_openstack import api
from django_openstack import forms
from novaclient import exceptions as novaclient_exceptions
LOG = logging.getLogger('django_openstack.dash.views.floating_ip')
class ReleaseFloatingIp(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
LOG.info('Releasing Floating IP "%s"' % data['floating_ip_id'])
api.tenant_floating_ip_release(request, data['floating_ip_id'])
messages.info(request, _('Successfully released Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in ReleaseFloatingIp")
messages.error(request, 'Error releasing Floating IP from tenant: \
%s' % e.message)
return shortcuts.redirect(request.build_absolute_uri())
class FloatingIpAssociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
floating_ip = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
instance_id = forms.ChoiceField()
def __init__(self, *args, **kwargs):
super(FloatingIpAssociate, self).__init__(*args, **kwargs)
instancelist = kwargs.get('initial', {}).get('instances', [])
self.fields['instance_id'] = forms.ChoiceField(
choices=instancelist,
label="Instance")
def handle(self, request, data):
try:
api.server_add_floating_ip(request,
data['instance_id'],
data['floating_ip_id'])
LOG.info('Associating Floating IP "%s" with Instance "%s"'
% (data['floating_ip'], data['instance_id']))
messages.info(request, _('Successfully associated Floating IP: \
%(ip)s with Instance: %(inst)s'
% {"ip": data['floating_ip'],
"inst": data['instance_id']}))
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, 'Error associating Floating IP: %s'
% e.message)
return shortcuts.redirect('dash_floating_ips', request.user.tenant_id)
class FloatingIpDisassociate(forms.SelfHandlingForm):
floating_ip_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
fip = api.tenant_floating_ip_get(request, data['floating_ip_id'])
api.server_remove_floating_ip(request, fip.instance_id, fip.id)
LOG.info('Disassociating Floating IP "%s"'
% data['floating_ip_id'])
messages.info(request,
_('Successfully disassociated Floating IP: %s')
% data['floating_ip_id'])
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAssociate")
messages.error(request, _('Error disassociating Floating IP: %s')
% e.message)
return shortcuts.redirect('dash_floating_ips', request.user.tenant_id)
class FloatingIpAllocate(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
fip = api.tenant_floating_ip_allocate(request)
LOG.info('Allocating Floating IP "%s" to tenant "%s"'
% (fip.ip, data['tenant_id']))
messages.success(request,
_('Successfully allocated Floating IP "%(ip)s"\
to tenant "%(tenant)s"')
% {"ip": fip.ip, "tenant": data['tenant_id']})
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in FloatingIpAllocate")
messages.error(request, 'Error allocating Floating IP "%s"\
to tenant "%s": %s' %
(fip.ip, data['tenant_id'], e.message))
return shortcuts.redirect('dash_floating_ips', request.user.tenant_id)
@login_required
def index(request, tenant_id):
for f in (ReleaseFloatingIp, FloatingIpDisassociate, FloatingIpAllocate):
_unused, handled = f.maybe_handle(request)
if handled:
return handled
try:
floating_ips = api.tenant_floating_ip_list(request)
except novaclient_exceptions.ClientException, e:
floating_ips = []
LOG.exception("ClientException in floating ip index")
messages.error(request,
_('Error fetching floating ips: %s') % e.message)
return shortcuts.render_to_response(
'django_openstack/dash/floating_ips/index.html', {
'allocate_form': FloatingIpAllocate(
initial={'tenant_id': request.user.tenant_id}),
'disassociate_form': FloatingIpDisassociate(),
'floating_ips': floating_ips,
'release_form': ReleaseFloatingIp(),
}, context_instance=template.RequestContext(request))
@login_required
def associate(request, tenant_id, ip_id):
instancelist = [(server.id, 'id: %s, name: %s' %
(server.id, server.name))
for server in api.server_list(request)]
form, handled = FloatingIpAssociate().maybe_handle(request, initial={
'floating_ip_id': ip_id,
'floating_ip': api.tenant_floating_ip_get(request, ip_id).ip,
'instances': instancelist})
if handled:
return handled
return shortcuts.render_to_response(
'django_openstack/dash/floating_ips/associate.html', {
'associate_form': form,
}, context_instance=template.RequestContext(request))
@login_required
def disassociate(request, tenant_id, ip_id):
form, handled = FloatingIpDisassociate().maybe_handle(request)
if handled:
return handled
return shortcuts.render_to_response(
'django_openstack/dash/floating_ips/associate.html', {
}, context_instance=template.RequestContext(request))
| 41.104396 | 79 | 0.634273 | [
"Apache-2.0"
] | daniel-hou0/horizon | django-openstack/django_openstack/dash/views/floating_ips.py | 7,481 | Python |
import inspect
import unittest
from config.database import DATABASES
from src.masoniteorm.connections import ConnectionFactory
from src.masoniteorm.models import Model
from src.masoniteorm.query import QueryBuilder
from src.masoniteorm.query.grammars import SQLiteGrammar
from src.masoniteorm.relationships import belongs_to
from tests.utils import MockConnectionFactory
class User(Model):
__connection__ = "dev"
class BaseTestQueryRelationships(unittest.TestCase):
maxDiff = None
def get_builder(self, table="users", model=User):
connection = ConnectionFactory().make("sqlite")
return QueryBuilder(
grammar=SQLiteGrammar,
connection_class=connection,
connection="dev",
table=table,
model=model,
connection_details=DATABASES,
).on("dev")
def test_pagination(self):
builder = self.get_builder()
paginator = builder.table("users").paginate(1)
self.assertTrue(paginator.count)
self.assertTrue(paginator.serialize()["data"])
self.assertTrue(paginator.serialize()["meta"])
self.assertTrue(paginator.result)
self.assertTrue(paginator.current_page)
self.assertTrue(paginator.per_page)
self.assertTrue(paginator.count)
self.assertTrue(paginator.last_page)
self.assertTrue(paginator.next_page)
self.assertEqual(paginator.previous_page, None)
self.assertTrue(paginator.total)
for user in paginator:
self.assertIsInstance(user, User)
paginator = builder.table("users").simple_paginate(10, 1)
self.assertIsInstance(paginator.to_json(), str)
self.assertTrue(paginator.count)
self.assertTrue(paginator.serialize()["data"])
self.assertTrue(paginator.serialize()["meta"])
self.assertTrue(paginator.result)
self.assertTrue(paginator.current_page)
self.assertTrue(paginator.per_page)
self.assertTrue(paginator.count)
self.assertEqual(paginator.next_page, None)
self.assertEqual(paginator.previous_page, None)
for user in paginator:
self.assertIsInstance(user, User)
self.assertIsInstance(paginator.to_json(), str)
| 33.264706 | 65 | 0.693192 | [
"MIT"
] | MasoniteFramework/orm | tests/sqlite/builder/test_sqlite_builder_pagination.py | 2,262 | Python |
# ============================================================================
# ============================================================================
# Copyright (c) 2021 Nghia T. Vo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Author: Nghia T. Vo
# E-mail:
# Description: Tests for the Algotom package.
# Contributors:
# ============================================================================
"""
Tests for the methods in prep/filtering.py
"""
import unittest
import numpy as np
import scipy.ndimage as ndi
import algotom.prep.filtering as filt
class FilteringMethods(unittest.TestCase):
def setUp(self):
self.eps = 10 ** (-6)
def test_fresnel_filter(self):
mat = np.random.rand(64, 64)
mat1 = filt.fresnel_filter(mat, 10, dim=1)
mat2 = filt.fresnel_filter(mat, 10, dim=2)
num1 = np.sum(np.abs(mat - mat1))
num2 = np.sum(np.abs(mat - mat2))
num3 = np.sum(np.abs(mat1 - mat2))
self.assertTrue(num1 > self.eps and num2 > self.eps and num3 > self.eps)
def test_double_wedge_filter(self):
size = 129
idx1 = size // 2
rad = size // 4
num_proj = 73
# Create a phantom and its sinogram.
mat = np.zeros((size, size), dtype=np.float32)
mat[idx1 - 10:idx1 + 5, idx1 + 10:idx1 + 20] = np.float32(1.0)
mat = ndi.gaussian_filter(mat, 1.0)
sino_360_std = np.zeros((num_proj, size), dtype=np.float32)
angles = np.linspace(0.0, 360.0, len(sino_360_std), dtype=np.float32)
for i, angle in enumerate(angles):
sino_360_std[i] = np.sum(ndi.rotate(mat, angle, reshape=False),
axis=0)
sino_360_std = sino_360_std / size
# Create a phantom with a feature larger than the crop FOV.
mat = np.zeros((size, size), dtype=np.float32)
mat[idx1 - 10:idx1 + 5, idx1 + 10:idx1 + 20] = np.float32(1.0)
mat[5:25, 10:25] = np.float32(1.5)
mat = ndi.gaussian_filter(mat, 1.0)
sino_360 = np.zeros((num_proj, size), dtype=np.float32)
angles = np.linspace(0.0, 360.0, len(sino_360), dtype=np.float32)
for i, angle in enumerate(angles):
sino_360[i] = np.sum(ndi.rotate(mat, angle, reshape=False), axis=0)
sino_360 = sino_360 / size
sino_360_crop0 = sino_360_std[:, idx1 - rad: idx1 + rad]
sino_360_crop = sino_360[:, idx1 - rad: idx1 + rad]
sino_180_crop0 = sino_360_crop0[:num_proj // 2 + 1]
sino_180_crop = sino_360_crop[:num_proj // 2 + 1]
sino_360_filt = filt.double_wedge_filter(sino_360_crop,
sino_type="360", iteration=10)
sino_360_filt = sino_360_filt * (
np.mean(sino_360_crop0) / np.mean(np.abs(sino_360_filt)))
num1 = np.max(np.abs(sino_360_filt - sino_360_crop0))
sino_180_filt = filt.double_wedge_filter(sino_180_crop, center=32.0,
sino_type="180", iteration=10)
sino_180_filt = sino_180_filt * (
np.mean(sino_180_crop0) / np.mean(np.abs(sino_180_filt)))
num2 = np.max(np.abs(sino_180_filt - sino_180_crop0))
self.assertTrue(num2 <= 0.1 and num2 <= 0.1)
| 44.056818 | 80 | 0.570286 | [
"Apache-2.0"
] | gbzan/algotom | tests/test_prep/test_filtering.py | 3,877 | Python |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
]
)
right_pad = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
]
)
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
]
)
left_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
]
)
right_pad_input = torch.LongTensor(
[
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
]
)
right_pad_output = torch.LongTensor(
[
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
]
)
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
for p in params:
p.grad = torch.full((5,), fill_value=2.0)
grad_norm = utils.clip_grad_norm_(params, 1.0)
exp_grad_norm = torch.full((15,), fill_value=2.0).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
def test_resolve_max_positions_with_tuple(self):
resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000)
self.assertEqual(resolved, (2000, 100, 2000))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == "__main__":
unittest.main()
| 28.652174 | 78 | 0.493475 | [
"MIT"
] | 1-punchMan/fairseq | tests/test_utils.py | 3,295 | Python |
#!/usr/bin/env python3
# encoding: utf-8
import os
import cv2
cv2.setNumThreads(0)
import numpy as np
from utils.visualize import print_iou, show_img, show_prediction
from engine.evaluator import Evaluator
from engine.logger import get_logger
from seg_opr.metric import hist_info, compute_score
logger = get_logger()
class SegEvaluator(Evaluator):
def func_per_iteration(self, data, device, iter=None):
if self.config is not None: config = self.config
img = data['data']
label = data['label']
name = data['fn']
if len(config.eval_scale_array) == 1:
pred = self.whole_eval(img, None, device)
else:
pred = self.sliding_eval(img, config.eval_crop_size, config.eval_stride_rate, device)
hist_tmp, labeled_tmp, correct_tmp = hist_info(config.num_classes, pred, label)
results_dict = {'hist': hist_tmp, 'labeled': labeled_tmp, 'correct': correct_tmp}
if self.save_path is not None:
fn = name + '.png'
cv2.imwrite(os.path.join(self.save_path, fn), pred)
logger.info('Save the image ' + fn)
# tensorboard logger does not fit multiprocess
if self.logger is not None and iter is not None:
colors = self.dataset.get_class_colors()
image = img
clean = np.zeros(label.shape)
comp_img = show_img(colors, config.background, image, clean, label, pred)
self.logger.add_image('vis', np.swapaxes(np.swapaxes(comp_img, 0, 2), 1, 2), iter)
print("self.show_prediction = ", self.show_prediction)
if self.show_image or self.show_prediction:
colors = self.dataset.get_class_colors()
image = img
clean = np.zeros(label.shape)
if self.show_image:
comp_img = show_img(colors, config.background, image, clean, label, pred)
else:
comp_img = show_prediction(colors, config.background, image, pred)
cv2.imwrite(os.path.join(os.path.realpath('.'), self.config.save, "eval", name+".vis.png"), comp_img[:,:,::-1])
# cv2.imwrite(name + ".png", comp_img[:,:,::-1])
return results_dict
def compute_metric(self, results):
hist = np.zeros((self.config.num_classes, self.config.num_classes))
correct = 0
labeled = 0
count = 0
for d in results:
hist += d['hist']
correct += d['correct']
labeled += d['labeled']
count += 1
iu, mean_IU, mean_IU_no_back, mean_pixel_acc = compute_score(hist, correct, labeled)
result_line = print_iou(iu, mean_pixel_acc, self.dataset.get_class_names(), True)
return result_line, mean_IU
| 39.084507 | 123 | 0.619459 | [
"MIT"
] | SuzukiDaichi-git/ai_edge_contest | train/eval.py | 2,775 | Python |
# for value in range(1,9):
# print(' * ' * (9 - value))
# print("---------------------------")
# for i in range(5):
# for j in range(i+1):
# print(" O ", end="")
# print()
# print("---------------------------")
# for i in range(5):
# for s in range(i):
# print(" ", end="")
# for j in range(i, 5):
# print(" O", end="")
# print()
# print("---------------------------")
# for i in range(5):
# for j in range(i, 5):
# print(end="O ")
# print()
# print("---------------------------")
for i in range(1, 6):
print(i, end=" ")
m = 4
k = i + m
for j in range(1, i):
print(k, end=" ")
m = m - 1
k = k + m
print() | 22.382353 | 39 | 0.321945 | [
"MIT"
] | Kunal3Kumar/Assignment | pattern.py | 761 | Python |
import dataclasses
import warnings
import numpy as np
import torch
def to_device(data, device=None, dtype=None, non_blocking=False, copy=False):
"""Change the device of object recursively"""
if isinstance(data, dict):
return {
k: to_device(v, device, dtype, non_blocking, copy) for k, v in data.items()
}
elif dataclasses.is_dataclass(data) and not isinstance(data, type):
return type(data)(
*[
to_device(v, device, dtype, non_blocking, copy)
for v in dataclasses.astuple(data)
]
)
# maybe namedtuple. I don't know the correct way to judge namedtuple.
elif isinstance(data, tuple) and type(data) is not tuple:
return type(data)(
*[to_device(o, device, dtype, non_blocking, copy) for o in data]
)
elif isinstance(data, (list, tuple)):
return type(data)(to_device(v, device, dtype, non_blocking, copy) for v in data)
elif isinstance(data, np.ndarray):
return to_device(torch.from_numpy(data), device, dtype, non_blocking, copy)
elif isinstance(data, torch.Tensor):
return data.to(device, dtype, non_blocking, copy)
else:
return data
def force_gatherable(data, device):
"""Change object to gatherable in torch.nn.DataParallel recursively
The difference from to_device() is changing to torch.Tensor if float or int
value is found.
The restriction to the returned value in DataParallel:
The object must be
- torch.cuda.Tensor
- 1 or more dimension. 0-dimension-tensor sends warning.
or a list, tuple, dict.
"""
if isinstance(data, dict):
return {k: force_gatherable(v, device) for k, v in data.items()}
# DataParallel can't handle NamedTuple well
elif isinstance(data, tuple) and type(data) is not tuple:
return type(data)(*[force_gatherable(o, device) for o in data])
elif isinstance(data, (list, tuple, set)):
return type(data)(force_gatherable(v, device) for v in data)
elif isinstance(data, np.ndarray):
return force_gatherable(torch.from_numpy(data), device)
elif isinstance(data, torch.Tensor):
if data.dim() == 0:
# To 1-dim array
data = data[None]
return data.to(device)
elif isinstance(data, float):
return torch.tensor([data], dtype=torch.float, device=device)
elif isinstance(data, int):
return torch.tensor([data], dtype=torch.long, device=device)
elif data is None:
return None
else:
warnings.warn(f"{type(data)} may not be gatherable by DataParallel")
return data
| 37.236111 | 88 | 0.645655 | [
"Apache-2.0"
] | 18445864529/espnet | espnet2/torch_utils/device_funcs.py | 2,681 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.datalabeling_v1beta1.types import annotation_spec_set
from google.cloud.datalabeling_v1beta1.types import (
annotation_spec_set as gcd_annotation_spec_set,
)
from google.cloud.datalabeling_v1beta1.types import data_labeling_service
from google.cloud.datalabeling_v1beta1.types import dataset
from google.cloud.datalabeling_v1beta1.types import dataset as gcd_dataset
from google.cloud.datalabeling_v1beta1.types import evaluation
from google.cloud.datalabeling_v1beta1.types import evaluation_job
from google.cloud.datalabeling_v1beta1.types import evaluation_job as gcd_evaluation_job
from google.cloud.datalabeling_v1beta1.types import instruction
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-datalabeling",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DataLabelingServiceTransport(abc.ABC):
"""Abstract transport class for DataLabelingService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "datalabeling.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_dataset: gapic_v1.method.wrap_method(
self.create_dataset, default_timeout=30.0, client_info=client_info,
),
self.get_dataset: gapic_v1.method.wrap_method(
self.get_dataset,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_datasets: gapic_v1.method.wrap_method(
self.list_datasets,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.delete_dataset: gapic_v1.method.wrap_method(
self.delete_dataset,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.import_data: gapic_v1.method.wrap_method(
self.import_data, default_timeout=30.0, client_info=client_info,
),
self.export_data: gapic_v1.method.wrap_method(
self.export_data,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.get_data_item: gapic_v1.method.wrap_method(
self.get_data_item,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_data_items: gapic_v1.method.wrap_method(
self.list_data_items,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.get_annotated_dataset: gapic_v1.method.wrap_method(
self.get_annotated_dataset,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_annotated_datasets: gapic_v1.method.wrap_method(
self.list_annotated_datasets,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.delete_annotated_dataset: gapic_v1.method.wrap_method(
self.delete_annotated_dataset,
default_timeout=None,
client_info=client_info,
),
self.label_image: gapic_v1.method.wrap_method(
self.label_image, default_timeout=30.0, client_info=client_info,
),
self.label_video: gapic_v1.method.wrap_method(
self.label_video, default_timeout=30.0, client_info=client_info,
),
self.label_text: gapic_v1.method.wrap_method(
self.label_text, default_timeout=30.0, client_info=client_info,
),
self.get_example: gapic_v1.method.wrap_method(
self.get_example,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_examples: gapic_v1.method.wrap_method(
self.list_examples,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.create_annotation_spec_set: gapic_v1.method.wrap_method(
self.create_annotation_spec_set,
default_timeout=30.0,
client_info=client_info,
),
self.get_annotation_spec_set: gapic_v1.method.wrap_method(
self.get_annotation_spec_set,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_annotation_spec_sets: gapic_v1.method.wrap_method(
self.list_annotation_spec_sets,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.delete_annotation_spec_set: gapic_v1.method.wrap_method(
self.delete_annotation_spec_set,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.create_instruction: gapic_v1.method.wrap_method(
self.create_instruction, default_timeout=30.0, client_info=client_info,
),
self.get_instruction: gapic_v1.method.wrap_method(
self.get_instruction,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_instructions: gapic_v1.method.wrap_method(
self.list_instructions,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.delete_instruction: gapic_v1.method.wrap_method(
self.delete_instruction,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.get_evaluation: gapic_v1.method.wrap_method(
self.get_evaluation,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.search_evaluations: gapic_v1.method.wrap_method(
self.search_evaluations,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.search_example_comparisons: gapic_v1.method.wrap_method(
self.search_example_comparisons,
default_timeout=30.0,
client_info=client_info,
),
self.create_evaluation_job: gapic_v1.method.wrap_method(
self.create_evaluation_job,
default_timeout=30.0,
client_info=client_info,
),
self.update_evaluation_job: gapic_v1.method.wrap_method(
self.update_evaluation_job,
default_timeout=30.0,
client_info=client_info,
),
self.get_evaluation_job: gapic_v1.method.wrap_method(
self.get_evaluation_job,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.pause_evaluation_job: gapic_v1.method.wrap_method(
self.pause_evaluation_job,
default_timeout=30.0,
client_info=client_info,
),
self.resume_evaluation_job: gapic_v1.method.wrap_method(
self.resume_evaluation_job,
default_timeout=30.0,
client_info=client_info,
),
self.delete_evaluation_job: gapic_v1.method.wrap_method(
self.delete_evaluation_job,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
self.list_evaluation_jobs: gapic_v1.method.wrap_method(
self.list_evaluation_jobs,
default_retry=retries.Retry(
initial=0.1,
maximum=30.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_dataset(
self,
) -> Callable[
[data_labeling_service.CreateDatasetRequest],
Union[gcd_dataset.Dataset, Awaitable[gcd_dataset.Dataset]],
]:
raise NotImplementedError()
@property
def get_dataset(
self,
) -> Callable[
[data_labeling_service.GetDatasetRequest],
Union[dataset.Dataset, Awaitable[dataset.Dataset]],
]:
raise NotImplementedError()
@property
def list_datasets(
self,
) -> Callable[
[data_labeling_service.ListDatasetsRequest],
Union[
data_labeling_service.ListDatasetsResponse,
Awaitable[data_labeling_service.ListDatasetsResponse],
],
]:
raise NotImplementedError()
@property
def delete_dataset(
self,
) -> Callable[
[data_labeling_service.DeleteDatasetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def import_data(
self,
) -> Callable[
[data_labeling_service.ImportDataRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def export_data(
self,
) -> Callable[
[data_labeling_service.ExportDataRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_data_item(
self,
) -> Callable[
[data_labeling_service.GetDataItemRequest],
Union[dataset.DataItem, Awaitable[dataset.DataItem]],
]:
raise NotImplementedError()
@property
def list_data_items(
self,
) -> Callable[
[data_labeling_service.ListDataItemsRequest],
Union[
data_labeling_service.ListDataItemsResponse,
Awaitable[data_labeling_service.ListDataItemsResponse],
],
]:
raise NotImplementedError()
@property
def get_annotated_dataset(
self,
) -> Callable[
[data_labeling_service.GetAnnotatedDatasetRequest],
Union[dataset.AnnotatedDataset, Awaitable[dataset.AnnotatedDataset]],
]:
raise NotImplementedError()
@property
def list_annotated_datasets(
self,
) -> Callable[
[data_labeling_service.ListAnnotatedDatasetsRequest],
Union[
data_labeling_service.ListAnnotatedDatasetsResponse,
Awaitable[data_labeling_service.ListAnnotatedDatasetsResponse],
],
]:
raise NotImplementedError()
@property
def delete_annotated_dataset(
self,
) -> Callable[
[data_labeling_service.DeleteAnnotatedDatasetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def label_image(
self,
) -> Callable[
[data_labeling_service.LabelImageRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def label_video(
self,
) -> Callable[
[data_labeling_service.LabelVideoRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def label_text(
self,
) -> Callable[
[data_labeling_service.LabelTextRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_example(
self,
) -> Callable[
[data_labeling_service.GetExampleRequest],
Union[dataset.Example, Awaitable[dataset.Example]],
]:
raise NotImplementedError()
@property
def list_examples(
self,
) -> Callable[
[data_labeling_service.ListExamplesRequest],
Union[
data_labeling_service.ListExamplesResponse,
Awaitable[data_labeling_service.ListExamplesResponse],
],
]:
raise NotImplementedError()
@property
def create_annotation_spec_set(
self,
) -> Callable[
[data_labeling_service.CreateAnnotationSpecSetRequest],
Union[
gcd_annotation_spec_set.AnnotationSpecSet,
Awaitable[gcd_annotation_spec_set.AnnotationSpecSet],
],
]:
raise NotImplementedError()
@property
def get_annotation_spec_set(
self,
) -> Callable[
[data_labeling_service.GetAnnotationSpecSetRequest],
Union[
annotation_spec_set.AnnotationSpecSet,
Awaitable[annotation_spec_set.AnnotationSpecSet],
],
]:
raise NotImplementedError()
@property
def list_annotation_spec_sets(
self,
) -> Callable[
[data_labeling_service.ListAnnotationSpecSetsRequest],
Union[
data_labeling_service.ListAnnotationSpecSetsResponse,
Awaitable[data_labeling_service.ListAnnotationSpecSetsResponse],
],
]:
raise NotImplementedError()
@property
def delete_annotation_spec_set(
self,
) -> Callable[
[data_labeling_service.DeleteAnnotationSpecSetRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_instruction(
self,
) -> Callable[
[data_labeling_service.CreateInstructionRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def get_instruction(
self,
) -> Callable[
[data_labeling_service.GetInstructionRequest],
Union[instruction.Instruction, Awaitable[instruction.Instruction]],
]:
raise NotImplementedError()
@property
def list_instructions(
self,
) -> Callable[
[data_labeling_service.ListInstructionsRequest],
Union[
data_labeling_service.ListInstructionsResponse,
Awaitable[data_labeling_service.ListInstructionsResponse],
],
]:
raise NotImplementedError()
@property
def delete_instruction(
self,
) -> Callable[
[data_labeling_service.DeleteInstructionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_evaluation(
self,
) -> Callable[
[data_labeling_service.GetEvaluationRequest],
Union[evaluation.Evaluation, Awaitable[evaluation.Evaluation]],
]:
raise NotImplementedError()
@property
def search_evaluations(
self,
) -> Callable[
[data_labeling_service.SearchEvaluationsRequest],
Union[
data_labeling_service.SearchEvaluationsResponse,
Awaitable[data_labeling_service.SearchEvaluationsResponse],
],
]:
raise NotImplementedError()
@property
def search_example_comparisons(
self,
) -> Callable[
[data_labeling_service.SearchExampleComparisonsRequest],
Union[
data_labeling_service.SearchExampleComparisonsResponse,
Awaitable[data_labeling_service.SearchExampleComparisonsResponse],
],
]:
raise NotImplementedError()
@property
def create_evaluation_job(
self,
) -> Callable[
[data_labeling_service.CreateEvaluationJobRequest],
Union[evaluation_job.EvaluationJob, Awaitable[evaluation_job.EvaluationJob]],
]:
raise NotImplementedError()
@property
def update_evaluation_job(
self,
) -> Callable[
[data_labeling_service.UpdateEvaluationJobRequest],
Union[
gcd_evaluation_job.EvaluationJob,
Awaitable[gcd_evaluation_job.EvaluationJob],
],
]:
raise NotImplementedError()
@property
def get_evaluation_job(
self,
) -> Callable[
[data_labeling_service.GetEvaluationJobRequest],
Union[evaluation_job.EvaluationJob, Awaitable[evaluation_job.EvaluationJob]],
]:
raise NotImplementedError()
@property
def pause_evaluation_job(
self,
) -> Callable[
[data_labeling_service.PauseEvaluationJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def resume_evaluation_job(
self,
) -> Callable[
[data_labeling_service.ResumeEvaluationJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def delete_evaluation_job(
self,
) -> Callable[
[data_labeling_service.DeleteEvaluationJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_evaluation_jobs(
self,
) -> Callable[
[data_labeling_service.ListEvaluationJobsRequest],
Union[
data_labeling_service.ListEvaluationJobsResponse,
Awaitable[data_labeling_service.ListEvaluationJobsResponse],
],
]:
raise NotImplementedError()
__all__ = ("DataLabelingServiceTransport",)
| 35.424769 | 101 | 0.571275 | [
"Apache-2.0"
] | LaudateCorpus1/python-datalabeling | google/cloud/datalabeling_v1beta1/services/data_labeling_service/transports/base.py | 30,607 | Python |
from itertools import chain, starmap
from typing import Any, Tuple
def flatten_json(dictionary: dict) -> dict:
"""Flatten a nested json file"""
while True:
dictionary = dict(chain.from_iterable(starmap(_unpack, dictionary.items())))
if _atomic_values(dictionary):
break
return dictionary
def _atomic_values(dictionary: dict) -> bool:
return not _nested_dict(dictionary) and not _nested_list(dictionary)
def _process_dict_values(parent_key: str, key, value: str):
temp1 = parent_key + "_" + key
return temp1, value
def _proccess_list(parent_key, i, value) -> Tuple[str, Any]:
temp2 = parent_key + "_" + str(i)
return temp2, value
def _nested_dict(dictionary: dict) -> bool:
return any(isinstance(value, dict) for value in dictionary.values())
def _nested_list(dictionary: dict) -> bool:
return any(isinstance(value, list) for value in dictionary.values())
def _unpack(parent_key, parent_value):
"""Unpack one level of nesting in json file"""
# Unpack one level only!!!
if isinstance(parent_value, dict):
for key, value in parent_value.items():
yield _process_dict_values(parent_key, key, value)
elif isinstance(parent_value, list):
for i, value in enumerate(parent_value):
yield _proccess_list(parent_key, i, value)
else:
yield parent_key, parent_value
| 28.653061 | 84 | 0.690171 | [
"MIT"
] | BentoBox-Project/activejson | activejson/flatten_json_tools.py | 1,404 | Python |
'''subclassing kde
Author: josef pktd
'''
import numpy as np
from numpy.testing import assert_almost_equal, assert_
import scipy
from scipy import stats
import matplotlib.pylab as plt
class gaussian_kde_set_covariance(stats.gaussian_kde):
'''
from Anne Archibald in mailinglist:
http://www.nabble.com/Width-of-the-gaussian-in-stats.kde.gaussian_kde---td19558924.html#a19558924
'''
def __init__(self, dataset, covariance):
self.covariance = covariance
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
class gaussian_kde_covfact(stats.gaussian_kde):
def __init__(self, dataset, covfact = 'scotts'):
self.covfact = covfact
scipy.stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance_(self):
'''not used'''
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) * self.n
def covariance_factor(self):
if self.covfact in ['sc', 'scotts']:
return self.scotts_factor()
if self.covfact in ['si', 'silverman']:
return self.silverman_factor()
elif self.covfact:
return float(self.covfact)
else:
raise ValueError('covariance factor has to be scotts, silverman or a number')
def reset_covfact(self, covfact):
self.covfact = covfact
self.covariance_factor()
self._compute_covariance()
def plotkde(covfact):
gkde.reset_covfact(covfact)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation - ' + str(gkde.covfact))
plt.legend()
def test_kde_1d():
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
print(xnmean, xnstd)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
print('MSE', np.sum((kdepdf - normpdf)**2))
print('axabserror', np.max(np.abs(kdepdf - normpdf)))
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
#assert_array_almost_equal(kdepdf, normpdf, decimal=2)
print(gkde.integrate_gaussian(0.0, 1.0))
print(gkde.integrate_box_1d(-np.inf, 0.0))
print(gkde.integrate_box_1d(0.0, np.inf))
print(gkde.integrate_box_1d(-np.inf, xnmean))
print(gkde.integrate_box_1d(xnmean, np.inf))
assert_almost_equal(gkde.integrate_box_1d(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box_1d(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), 0.5, decimal=1)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
## assert_almost_equal(gkde.integrate_gaussian(0.0, 1.0),
## (kdepdf*normpdf).sum()*intervall, decimal=2)
if __name__ == '__main__':
# generate a sample
n_basesample = 1000
np.random.seed(8765678)
alpha = 0.6 #weight for (prob of) lower distribution
mlow, mhigh = (-3,3) #mean locations for gaussian mixture
xn = np.concatenate([mlow + np.random.randn(alpha * n_basesample),
mhigh + np.random.randn((1-alpha) * n_basesample)])
# get kde for original sample
#gkde = stats.gaussian_kde(xn)
gkde = gaussian_kde_covfact(xn, 0.1)
# evaluate the density function for the kde for some points
ind = np.linspace(-7,7,101)
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
gkde = gaussian_kde_covfact(xn, 'scotts')
kdepdf = gkde.evaluate(ind)
plt.figure()
# plot histgram of sample
plt.hist(xn, bins=20, normed=1)
# plot estimated density
plt.plot(ind, kdepdf, label='kde', color="g")
# plot data generating density
plt.plot(ind, alpha * stats.norm.pdf(ind, loc=mlow) +
(1-alpha) * stats.norm.pdf(ind, loc=mhigh),
color="r", label='DGP: normal mix')
plt.title('Kernel Density Estimation')
plt.legend()
#plt.show()
for cv in ['scotts', 'silverman', 0.05, 0.1, 0.5]:
plotkde(cv)
test_kde_1d()
np.random.seed(8765678)
n_basesample = 1000
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
| 34.357576 | 101 | 0.646499 | [
"BSD-3-Clause"
] | ADI10HERO/statsmodels | statsmodels/sandbox/nonparametric/kdecovclass.py | 5,669 | Python |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 活动状态错误。
FAILEDOPERATION_ACTIVITYSTATUSINVALID = 'FailedOperation.ActivityStatusInvalid'
# 人脸配准点出框错误码。
FAILEDOPERATION_FACEBORDERCHECKFAILED = 'FailedOperation.FaceBorderCheckFailed'
# 人脸检测失败。
FAILEDOPERATION_FACEDETECTFAILED = 'FailedOperation.FaceDetectFailed'
# 人脸提特征失败。
FAILEDOPERATION_FACEFEATUREFAILED = 'FailedOperation.FaceFeatureFailed'
# 人脸融合失败,请更换图片后重试。
FAILEDOPERATION_FACEFUSIONERROR = 'FailedOperation.FaceFusionError'
# 人脸姿态检测失败。
FAILEDOPERATION_FACEPOSEFAILED = 'FailedOperation.FacePoseFailed'
# 人脸框不合法。
FAILEDOPERATION_FACERECTINVALID = 'FailedOperation.FaceRectInvalid'
# 人脸配准点不合法。
FAILEDOPERATION_FACESHAPEINVALID = 'FailedOperation.FaceShapeInvalid'
# 人脸因太小被过滤,建议人脸分辨率不小于34*34。
FAILEDOPERATION_FACESIZETOOSMALL = 'FailedOperation.FaceSizeTooSmall'
# 人脸融合后端服务异常。
FAILEDOPERATION_FUSEBACKENDSERVERFAULT = 'FailedOperation.FuseBackendServerFault'
# 未检测到人脸。
FAILEDOPERATION_FUSEDETECTNOFACE = 'FailedOperation.FuseDetectNoFace'
# 操作太频繁,触发频控。
FAILEDOPERATION_FUSEFREQCTRL = 'FailedOperation.FuseFreqCtrl'
# 图像处理出错。
FAILEDOPERATION_FUSEIMAGEERROR = 'FailedOperation.FuseImageError'
# 服务内部错误,请重试。
FAILEDOPERATION_FUSEINNERERROR = 'FailedOperation.FuseInnerError'
# 素材未经过审核。
FAILEDOPERATION_FUSEMATERIALNOTAUTH = 'FailedOperation.FuseMaterialNotAuth'
# 素材不存在。
FAILEDOPERATION_FUSEMATERIALNOTEXIST = 'FailedOperation.FuseMaterialNotExist'
# 保存结果图片出错。
FAILEDOPERATION_FUSESAVEPHOTOFAIL = 'FailedOperation.FuseSavePhotoFail'
# 人脸检测-图片解码失败。
FAILEDOPERATION_IMAGEDECODEFAILED = 'FailedOperation.ImageDecodeFailed'
# 图片下载失败。
FAILEDOPERATION_IMAGEDOWNLOADERROR = 'FailedOperation.ImageDownloadError'
# 素材尺寸超过1080*1080像素。
FAILEDOPERATION_IMAGEPIXELEXCEED = 'FailedOperation.ImagePixelExceed'
# 图片分辨率过大。建议您resize压缩到3k*3k以内。
FAILEDOPERATION_IMAGERESOLUTIONEXCEED = 'FailedOperation.ImageResolutionExceed'
# 图片短边分辨率小于64。
FAILEDOPERATION_IMAGERESOLUTIONTOOSMALL = 'FailedOperation.ImageResolutionTooSmall'
# 输入图片base64数据大小超过5M。
FAILEDOPERATION_IMAGESIZEEXCEED = 'FailedOperation.ImageSizeExceed'
# base64编码后的图片数据大小不超500k。
FAILEDOPERATION_IMAGESIZEEXCEEDFIVEHUNDREDKB = 'FailedOperation.ImageSizeExceedFiveHundredKB'
# 图片尺寸过大或者过小;不满足算法要求。
FAILEDOPERATION_IMAGESIZEINVALID = 'FailedOperation.ImageSizeInvalid'
# 图片上传失败。
FAILEDOPERATION_IMAGEUPLOADFAILED = 'FailedOperation.ImageUploadFailed'
# 素材条数超过上限。
FAILEDOPERATION_MATERIALVALUEEXCEED = 'FailedOperation.MaterialValueExceed'
# 无法检测出人脸, 人脸框配准分低于阈值。
FAILEDOPERATION_NOFACEDETECTED = 'FailedOperation.NoFaceDetected'
# 参数字段或者值有误。
FAILEDOPERATION_PARAMETERVALUEERROR = 'FailedOperation.ParameterValueError'
# 活动未支付授权费或已停用。
FAILEDOPERATION_PROJECTNOTAUTH = 'FailedOperation.ProjectNotAuth'
# 请求实体太大。
FAILEDOPERATION_REQUESTENTITYTOOLARGE = 'FailedOperation.RequestEntityTooLarge'
# 后端服务超时。
FAILEDOPERATION_REQUESTTIMEOUT = 'FailedOperation.RequestTimeout'
# 系统内部错误。
FAILEDOPERATION_SERVERERROR = 'FailedOperation.ServerError'
# 素材人脸ID不存在。
FAILEDOPERATION_TEMPLATEFACEIDNOTEXIST = 'FailedOperation.TemplateFaceIDNotExist'
# 未查找到活动id。
INVALIDPARAMETERVALUE_ACTIVITYIDNOTFOUND = 'InvalidParameterValue.ActivityIdNotFound'
# 活动算法版本值错误。
INVALIDPARAMETERVALUE_ENGINEVALUEERROR = 'InvalidParameterValue.EngineValueError'
# 人脸框参数有误或者人脸框太小。
INVALIDPARAMETERVALUE_FACERECTPARAMETERVALUEERROR = 'InvalidParameterValue.FaceRectParameterValueError'
# 人脸检测-图片为空。
INVALIDPARAMETERVALUE_IMAGEEMPTY = 'InvalidParameterValue.ImageEmpty'
# 未查找到素材Id。
INVALIDPARAMETERVALUE_MATERIALIDNOTFOUND = 'InvalidParameterValue.MaterialIdNotFound'
# 人脸检测-图片没有人脸。
INVALIDPARAMETERVALUE_NOFACEINPHOTO = 'InvalidParameterValue.NoFaceInPhoto'
# 资源正在发货中。
RESOURCEUNAVAILABLE_DELIVERING = 'ResourceUnavailable.Delivering'
# 帐号已被冻结。
RESOURCEUNAVAILABLE_FREEZE = 'ResourceUnavailable.Freeze'
# 获取认证信息失败。
RESOURCEUNAVAILABLE_GETAUTHINFOERROR = 'ResourceUnavailable.GetAuthInfoError'
# 帐号已欠费。
RESOURCEUNAVAILABLE_INARREARS = 'ResourceUnavailable.InArrears'
# 余额不足。
RESOURCEUNAVAILABLE_LOWBALANCE = 'ResourceUnavailable.LowBalance'
# 计费状态未知,请确认是否已在控制台开通服务。
RESOURCEUNAVAILABLE_NOTEXIST = 'ResourceUnavailable.NotExist'
# 服务未开通。
RESOURCEUNAVAILABLE_NOTREADY = 'ResourceUnavailable.NotReady'
# 资源已被回收。
RESOURCEUNAVAILABLE_RECOVER = 'ResourceUnavailable.Recover'
# 帐号已停服。
RESOURCEUNAVAILABLE_STOPUSING = 'ResourceUnavailable.StopUsing'
# 计费状态未知。
RESOURCEUNAVAILABLE_UNKNOWNSTATUS = 'ResourceUnavailable.UnknownStatus'
# 帐号已欠费。
RESOURCESSOLDOUT_CHARGESTATUSEXCEPTION = 'ResourcesSoldOut.ChargeStatusException'
| 30.011628 | 103 | 0.844828 | [
"Apache-2.0"
] | PlasticMem/tencentcloud-sdk-python | tencentcloud/facefusion/v20181201/errorcodes.py | 6,162 | Python |
"""included ordering for sections
Revision ID: 91052a50e2b0
Revises: 5716caecc491
Create Date: 2020-06-26 14:49:21.905034
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '91052a50e2b0'
down_revision = '5716caecc491'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('section', sa.Column('section_ordering', sa.Integer(), nullable=False, server_default='0'))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('section', 'section_ordering')
# ### end Alembic commands ###
| 24.551724 | 109 | 0.706461 | [
"MIT"
] | Juan7655/wfh-movies | app/alembic/versions/2020_06_26_14_49_21.py | 712 | Python |
class RSVPRouter(object):
"""
A router to control all database operations on models in the
rsvp application.
"""
apps = ["rsvp"]
using = "rsvp_db"
def db_for_read(self, model, **hints):
if model._meta.app_label in self.apps:
return self.using
return None
def db_for_write(self, model, **hints):
if model._meta.app_label in self.apps:
return self.using
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the app is involved.
"""
if obj1._meta.app_label in self.apps or obj2._meta.app_label in self.apps:
return True
return None
def allow_syncdb(self, db, model):
"""Make sure the apps we care about appear in the db"""
if model._meta.app_label in ['south']:
return True
if db == self.using:
return model._meta.app_label in self.apps
elif model._meta.app_label in self.apps:
return False
return None
def allow_migrate(self, db, model):
if db == self.using:
return model._meta.app_label in self.apps
elif model._meta.app_label in self.apps:
return False
return None
| 29.613636 | 82 | 0.585572 | [
"MIT"
] | ehayne/KyleAndEmily | kyleandemily/rsvp/db_router.py | 1,303 | Python |
from crypt import methods
from unicodedata import name
from flask import Flask, render_template, request, session, logging, url_for, redirect, flash
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from passlib.hash import sha256_crypt
from flask_login import login_user
engine = create_engine("postgresql+psycopg2://moringa:1234@localhost/signup")
db = scoped_session(sessionmaker(bind=engine))
app = Flask(__name__)
app.secret_key = "1234code"
@app.route('/')
def landing():
return render_template('landing.html')
#login page form
@app.route('/signin', methods=["GET", "POST"])
def signin():
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
usernamedata = db.execute(
"SELECT username FROM users WHERE username:=username", {
"username": username
}).fetchone()
passworddata = db.execute(
"SELECT password FROM users WHERE password:=password", {
"password": password
}).fetchone()
if usernamedata is None:
flash("NO username", "danger")
return render_template("signin.html")
else:
for password_data in passworddata:
if sha256_crypt.verify(password, password_data):
flash("You are logged", "success")
return redirect(url_for('profile'))
else:
flash("Incorrect password", "danger")
return render_template('signin.html')
return render_template('signin.html')
#route for photo
@app.route('/photo')
def photo():
return render_template("photo.html")
#route for profile
@app.route('/profile')
def profile():
return render_template('profile.html')
#register form functions,route for signup page
@app.route("/signup", methods=["POST", "GET"])
def signup():
if request.method == "POST":
name = request.form.get("name")
username = request.form.get("username")
password = request.form.get("password")
confirm = request.form.get("confirm")
secure_password = sha256_crypt.encrypt(str(password))
if password == confirm:
db.execute(
"INSERT INTO users(name,username,password) VALUES(:name,:username,:password)",
{
"name": name,
"username": username,
"password": secure_password
})
db.commit()
flash("You are registered and can login", "success")
return redirect(url_for('signin'))
else:
flash("password did not match", "danger")
return render_template('signup.html')
return render_template('signup.html')
#route for contact
@app.route('/contact')
def contact():
return render_template('contact.html')
#about us route
@app.route('/about')
def about():
return render_template('about.html')
#route for logout
@app.route('/logout')
def logout():
return redirect(url_for(''))
#route for social
@app.route('/social', methods=["POST", "GET"])
def social():
return render_template("social.html")
if __name__ == "__main__":
app.run(debug=True) | 27.380165 | 94 | 0.620284 | [
"MIT"
] | Kingsly62/Pitches-Application | main.py | 3,313 | Python |
"""Several tools used accross by other modules"""
import logging
from logging.handlers import BufferingHandler
from asyncio import sleep, get_event_loop
from datetime import datetime, timedelta
from distutils.util import strtobool
from os.path import abspath, dirname
from typing import Union, Optional, List
from uuid import uuid4
import jwt as jwtlib
logger = logging.getLogger(__name__)
def find(func, iteratee):
"""Returns the first element that match the query"""
for value in iteratee:
if func(value):
return value
return None
def cast(val, typ, *types):
"""Cast a value to the given type. /!\\ Hack /!\\ """
# get Optional
if typ.__class__ in [Union.__class__, Optional.__class__] \
and len(typ.__args__) == 2 \
and typ.__args__[1] is None:
typ = typ.__args__[0]
# split Unions
elif typ.__class__ == Union.__class__:
return cast(val, *typ.__args__)
# consume List
if typ.__class__ == List.__class__:
values = []
for element in val:
values.append(cast(element, typ.__args__[0]))
return values
# cast
types = [typ] + list(types)
for typ in types:
try:
return typ(val)
except:
continue
raise TypeError("{} not castable in any of {{{}}}.".format(val, types))
def real_type(typ):
"""Escape the type from Union and Optional. /!\\ Hack /!\\ """
if typ.__class__ in [Union.__class__, Optional.__class__]:
return typ.__args__[0]
return typ
def root():
"""Return the path of the package root"""
return dirname(abspath(__file__))
class DelayLogFor(BufferingHandler):
"""Delai logging for a specific logger."""
def __init__(self, delayed_logger: logging.Logger):
self.delayed_logger = delayed_logger
self.delayed_handlers = []
super().__init__(float('infinity'))
def flush(self):
"""Flush this BufferingHandler to all the delayed handlers."""
self.acquire()
try:
for handler in self.delayed_handlers:
for record in self.buffer:
if record.levelno >= handler.level:
handler.handle(record)
self.buffer = []
finally:
self.release()
def __enter__(self):
"""Replace the handlers by this BufferingHandler"""
self.delayed_handlers.extend(self.delayed_logger.handlers)
self.delayed_logger.handlers.clear()
self.delayed_logger.addHandler(self)
return self
def __exit__(self, typ, val, traceback):
"""Restore the handlers and flush this BufferingHandler"""
self.delayed_logger.removeHandler(self)
self.delayed_logger.handlers.extend(self.delayed_handlers)
self.close()
def generate_token(key, iat=None, exp_delta=timedelta(minutes=5), typ="player",
tid=None, uid="00000000-0000-0000-0000-000000000000"):
"""Generate a JSON Web Token"""
if iat is None:
iat = datetime.utcnow()
if tid is None:
tid = str(uuid4())
return jwtlib.encode({
"iss": "webapi",
"sub": "webgames",
"iat": iat,
"exp": iat + exp_delta,
"jti": tid,
"typ": typ,
"uid": uid
}, key, algorithm='HS256').decode()
def ask_bool(prompt):
"""Ask a question to the user, retry until the reply is valid"""
while True:
try:
return strtobool(input("%s (yes/no) " % prompt).strip().casefold())
except ValueError:
continue
def fake_async(func):
"""Fake coroutine by awaiting asyncio.sleep(0)"""
async def wrapped(*args, **kwargs):
"""The faked coroutine"""
await sleep(0)
return func(*args, **kwargs)
return wrapped
def lruc(coro, loop=get_event_loop()):
"""Short version of loop.run_until_complete(coro)"""
return loop.run_until_complete(coro)
def async_partial(func, *args, **keywords):
"""async functools.partial"""
async def newfunc(*fargs, **fkeywords):
"""the mocked function"""
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return await func(*args, *fargs, **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
class Ref:
_obj = None
def __call__(self, obj):
self._obj = obj
def __getattr__(self, attr):
return getattr(self._obj, attr)
def __setattr__(self, attr, value):
if attr == "_obj":
super().__setattr__(attr, value)
return setattr(self._obj, attr, value)
| 27.988024 | 79 | 0.617672 | [
"MIT"
] | JWebgames/WebAPI | webapi/tools.py | 4,674 | Python |
"""The base class and interface for all formatting plugins."""
import argparse
import os
import sys
from typing import IO
from typing import List
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from flake8.statistics import Statistics
from flake8.style_guide import Violation
class BaseFormatter:
"""Class defining the formatter interface.
.. attribute:: options
The options parsed from both configuration files and the command-line.
.. attribute:: filename
If specified by the user, the path to store the results of the run.
.. attribute:: output_fd
Initialized when the :meth:`start` is called. This will be a file
object opened for writing.
.. attribute:: newline
The string to add to the end of a line. This is only used when the
output filename has been specified.
"""
def __init__(self, options: argparse.Namespace) -> None:
"""Initialize with the options parsed from config and cli.
This also calls a hook, :meth:`after_init`, so subclasses do not need
to call super to call this method.
:param options:
User specified configuration parsed from both configuration files
and the command-line interface.
:type options:
:class:`argparse.Namespace`
"""
self.options = options
self.filename = options.output_file
self.output_fd: Optional[IO[str]] = None
self.newline = "\n"
self.after_init()
def after_init(self) -> None:
"""Initialize the formatter further."""
def beginning(self, filename: str) -> None:
"""Notify the formatter that we're starting to process a file.
:param str filename:
The name of the file that Flake8 is beginning to report results
from.
"""
def finished(self, filename: str) -> None:
"""Notify the formatter that we've finished processing a file.
:param str filename:
The name of the file that Flake8 has finished reporting results
from.
"""
def start(self) -> None:
"""Prepare the formatter to receive input.
This defaults to initializing :attr:`output_fd` if :attr:`filename`
"""
if self.filename:
dirname = os.path.dirname(os.path.abspath(self.filename))
os.makedirs(dirname, exist_ok=True)
self.output_fd = open(self.filename, "a")
def handle(self, error: "Violation") -> None:
"""Handle an error reported by Flake8.
This defaults to calling :meth:`format`, :meth:`show_source`, and
then :meth:`write`. To extend how errors are handled, override this
method.
:param error:
This will be an instance of
:class:`~flake8.style_guide.Violation`.
:type error:
flake8.style_guide.Violation
"""
line = self.format(error)
source = self.show_source(error)
self.write(line, source)
def format(self, error: "Violation") -> Optional[str]:
"""Format an error reported by Flake8.
This method **must** be implemented by subclasses.
:param error:
This will be an instance of
:class:`~flake8.style_guide.Violation`.
:type error:
flake8.style_guide.Violation
:returns:
The formatted error string.
:rtype:
str
"""
raise NotImplementedError(
"Subclass of BaseFormatter did not implement" " format."
)
def show_statistics(self, statistics: "Statistics") -> None:
"""Format and print the statistics."""
for error_code in statistics.error_codes():
stats_for_error_code = statistics.statistics_for(error_code)
statistic = next(stats_for_error_code)
count = statistic.count
count += sum(stat.count for stat in stats_for_error_code)
self._write(f"{count:<5} {error_code} {statistic.message}")
def show_benchmarks(self, benchmarks: List[Tuple[str, float]]) -> None:
"""Format and print the benchmarks."""
# NOTE(sigmavirus24): The format strings are a little confusing, even
# to me, so here's a quick explanation:
# We specify the named value first followed by a ':' to indicate we're
# formatting the value.
# Next we use '<' to indicate we want the value left aligned.
# Then '10' is the width of the area.
# For floats, finally, we only want only want at most 3 digits after
# the decimal point to be displayed. This is the precision and it
# can not be specified for integers which is why we need two separate
# format strings.
float_format = "{value:<10.3} {statistic}".format
int_format = "{value:<10} {statistic}".format
for statistic, value in benchmarks:
if isinstance(value, int):
benchmark = int_format(statistic=statistic, value=value)
else:
benchmark = float_format(statistic=statistic, value=value)
self._write(benchmark)
def show_source(self, error: "Violation") -> Optional[str]:
"""Show the physical line generating the error.
This also adds an indicator for the particular part of the line that
is reported as generating the problem.
:param error:
This will be an instance of
:class:`~flake8.style_guide.Violation`.
:type error:
flake8.style_guide.Violation
:returns:
The formatted error string if the user wants to show the source.
If the user does not want to show the source, this will return
``None``.
:rtype:
str
"""
if not self.options.show_source or error.physical_line is None:
return ""
# Because column numbers are 1-indexed, we need to remove one to get
# the proper number of space characters.
indent = "".join(
c if c.isspace() else " "
for c in error.physical_line[: error.column_number - 1]
)
# Physical lines have a newline at the end, no need to add an extra
# one
return f"{error.physical_line}{indent}^"
def _write(self, output: str) -> None:
"""Handle logic of whether to use an output file or print()."""
if self.output_fd is not None:
self.output_fd.write(output + self.newline)
if self.output_fd is None or self.options.tee:
sys.stdout.buffer.write(output.encode() + self.newline.encode())
def write(self, line: Optional[str], source: Optional[str]) -> None:
"""Write the line either to the output file or stdout.
This handles deciding whether to write to a file or print to standard
out for subclasses. Override this if you want behaviour that differs
from the default.
:param str line:
The formatted string to print or write.
:param str source:
The source code that has been formatted and associated with the
line of output.
"""
if line:
self._write(line)
if source:
self._write(source)
def stop(self) -> None:
"""Clean up after reporting is finished."""
if self.output_fd is not None:
self.output_fd.close()
self.output_fd = None
| 35.764151 | 78 | 0.616196 | [
"MIT"
] | AWSCookbook/Databases | .venv/lib/python3.9/site-packages/flake8/formatting/base.py | 7,582 | Python |
import os, logging
from flask import Flask, request
from werkzeug.serving import run_simple
from galaxylearning.utils.utils import return_data_decorator, LoggerFactory
app = Flask(__name__)
BASE_MODEL_PATH = os.path.join(os.path.abspath("."), "res", "models")
logger = LoggerFactory.getLogger(__name__, logging.INFO)
@return_data_decorator
@app.route("/", methods=['GET'])
def test_client():
return "Hello galaxylearning client", 200
@return_data_decorator
@app.route("/aggregatepars", methods=['POST'])
def submit_aggregate_pars():
logger.info("receive aggregate files")
recv_aggregate_files = request.files
# print(recv_aggregate_files)
for filename in recv_aggregate_files:
job_id = filename.split("_")[-2]
# print("recv_filename: ", recv_aggregate_files[filename])
tmp_aggregate_file = recv_aggregate_files[filename]
job_base_model_dir = os.path.join(BASE_MODEL_PATH, "models_{}".format(job_id), "tmp_aggregate_pars")
latest_num = len(os.listdir(job_base_model_dir)) - 1
latest_tmp_aggretate_file_path = os.path.join(job_base_model_dir, "avg_pars_{}".format(latest_num))
with open(latest_tmp_aggretate_file_path, "wb") as f:
for line in tmp_aggregate_file.readlines():
f.write(line)
logger.info("recv success")
return "ok", 200
def start_communicate_client(client_ip, client_port):
app.url_map.strict_slashes = False
run_simple(hostname=client_ip, port=int(client_port), application=app, threaded=True)
logger.info("galaxy learning client started") | 37.833333 | 108 | 0.731278 | [
"Apache-2.0"
] | ZJU-DistributedAI/GalaxyLearning | galaxylearning/core/communicate_client.py | 1,589 | Python |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Testing Tracker on Fashion MNIST',
author='Danish Technological Institute (DTI Research)',
license='BSD-3',
)
| 23.090909 | 59 | 0.692913 | [
"BSD-3-Clause"
] | nily-dti/tracker_fashion_mnist_example | setup.py | 254 | Python |
import logging
def webapp_add_wsgi_middleware(app):
try:
from google.appengine.ext.appstats import recording
except ImportError, err:
logging.info('Failed to import recording: %s', err)
else:
app = recording.appstats_wsgi_middleware(app)
return app
appstats_KEY_DISTANCE = 10
appstats_MAX_REPR = 1000
appstats_MAX_STACK = 20
appstats_FILTER_LIST = [
{'PATH_INFO': '!^/favicon\.ico$'},
]
| 21.789474 | 55 | 0.73913 | [
"Apache-2.0"
] | alltyme/appengine-ndb-experiment | appengine_config.py | 414 | Python |
# Generated by Django 2.2.13 on 2020-11-05 11:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("workflow_handler", "0030_auto_20201103_1718"),
]
operations = [
migrations.AddField(
model_name="task",
name="correct",
field=models.BooleanField(null=True),
),
]
| 20.631579 | 56 | 0.604592 | [
"Apache-2.0"
] | Human-Lambdas/human-lambdas | src/human_lambdas/workflow_handler/migrations/0031_task_correct.py | 392 | Python |
"""Tools to assist importing optional external modules."""
import sys
import re
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
_component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def version_tuple(vstring):
# Parse a version string to a tuple e.g. '1.2' -> (1, 2)
# Simplified from distutils.version.LooseVersion which was deprecated in
# Python 3.10.
components = []
for x in _component_re.split(vstring):
if x and x != '.':
try:
x = int(x)
except ValueError:
pass
components.append(x)
return components
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
import_kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the import_kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... import_kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... import_kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning, stacklevel=2)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **import_kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = import_kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning,
stacklevel=2)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)),
stacklevel=2)
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if version_tuple(modversion) < version_tuple(min_module_version):
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, str):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning, stacklevel=2)
return
return mod
| 40.854167 | 89 | 0.668027 | [
"MIT"
] | LuisMi1245/QPath-and-Snakes | .environment/lib/python3.8/site-packages/sympy/external/importtools.py | 7,844 | Python |
Subsets and Splits