metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jleinonen/cloudsat-gan",
"score": 2
} |
#### File: csmodiscgan/src/data_utils.py
```python
import gc
import numpy as np
import h5py
import keras.backend as K
import netCDF4
def load_cloudsat_scenes(fn, n=None, right_handed=False, frac_validate=0.1,
shuffle=True, shuffle_seed=None):
with netCDF4.Dataset(fn, 'r') as ds:
if n is None:
n = ds["scenes"].shape[0]
cs_scenes = np.array(ds["scenes"][:n,:,:])
cs_scenes = cs_scenes.reshape(cs_scenes.shape+(1,))
if right_handed:
cs_scenes = np.rot90(cs_scenes, axes=(2,1))
# rescale from (0,1) to (-1,1)
cs_scenes *= 2
cs_scenes -= 1
modis_vars = np.zeros((n,)+ds["tau_c"].shape[1:]+(4,),
dtype=np.float32)
modis_vars[:,:,0] = ds["tau_c"][:n,:]
modis_vars[:,:,1] = ds["p_top"][:n,:]
modis_vars[:,:,2] = ds["r_e"][:n,:]
modis_vars[:,:,3] = ds["twp"][:n,:]
modis_mask = np.zeros((n,)+ds["tau_c"].shape[1:]+(1,),
dtype=np.float32)
modis_mask[:,:,0] = ds["modis_mask"][:n,:]
num_scenes = cs_scenes.shape[0]
if shuffle:
prng = np.random.RandomState(shuffle_seed)
ind = np.arange(num_scenes)
prng.shuffle(ind)
cs_scenes = cs_scenes[ind,...]
modis_vars = modis_vars[ind,...]
modis_mask = modis_mask[ind,...]
gc.collect()
num_train = int(round(num_scenes*(1.0-frac_validate)))
scenes = {
"train": (
cs_scenes[:num_train,...],
modis_vars[:num_train,...],
modis_mask[:num_train,...]
),
"validate": (
cs_scenes[num_train:,...],
modis_vars[num_train:,...],
modis_mask[num_train:,...]
)
}
return scenes
def decode_modis_vars(modis_vars, modis_mask):
tau_c_scaled = modis_vars[:,:,0]
p_top_scaled = modis_vars[:,:,1]
r_e_scaled = modis_vars[:,:,2]
twp_scaled = modis_vars[:,:,3]
decoded_vars = {}
decoded_vars["tau_c"] = np.exp((1.13*tau_c_scaled+2.20))
decoded_vars["p_top"] = 265.0*p_top_scaled+532.0
decoded_vars["r_e"] = np.exp((0.542*r_e_scaled+3.06))
decoded_vars["twp"] = np.exp((1.11*twp_scaled+0.184))
for var in decoded_vars:
decoded_vars[var][~modis_mask[:,:,0].astype(bool)] = np.nan
return decoded_vars
def rescale_scene(scene, Z_range=(-35,20), missing_max=-30):
sc = Z_range[0] + (scene+1)/2.0 * (Z_range[1]-Z_range[0])
sc[sc <= missing_max] = np.nan
return sc
def gen_batch(cs_scenes, modis_vars, modis_mask, batch_size):
ind = np.arange(cs_scenes.shape[0], dtype=int)
np.random.shuffle(ind)
while len(ind) >= batch_size:
idx = ind[:batch_size]
ind = ind[batch_size:]
yield (cs_scenes[idx,...], modis_vars[idx,...], modis_mask[idx,...])
def gen_modis_batch_2d(modis_vars_2d, modis_mask_2d, batch_size):
ind = np.arange(modis_vars_2d.shape[0], dtype=int)
np.random.shuffle(ind)
while len(ind) >= batch_size:
idx = ind[:batch_size]
ind = ind[batch_size:]
(modis_vars_2d_b, modis_mask_2d_b) = (
modis_vars_2d[idx,...], modis_mask_2d[idx,...])
yield (modis_vars_2d_b, modis_mask_2d_b)
def expand_modis_batch(modis_vars_2d_b, modis_mask_2d_b, scene_size):
vars_shape = modis_vars_2d_b.shape[:3]+(scene_size,modis_vars_2d_b.shape[-1])
mask_shape = modis_mask_2d_b.shape[:3]+(scene_size,modis_mask_2d_b.shape[-1])
modis_vars_3d_b = np.empty(vars_shape, dtype=np.float32)
modis_mask_3d_b = np.empty(mask_shape, dtype=np.float32)
for i in range(scene_size):
modis_vars_3d_b[:,:,:,i,:] = modis_vars_2d_b
modis_mask_3d_b[:,:,:,i,:] = modis_mask_2d_b
return (modis_vars_3d_b, modis_mask_3d_b)
def sample_noise(noise_scale, batch_size, noise_dim):
return np.random.normal(scale=noise_scale, size=(batch_size, noise_dim))
def get_disc_batch(cs_scenes_b, modis_vars_b, modis_mask_b, gen, fake,
batch_size, noise_dim,
noise_scale=1.0, max_smoothing=0.1):
# Create X_disc: alternatively only generated or real images
#if fake: # generate fake samples
if fake:
# Pass noise to the generator
noise = sample_noise(noise_scale, batch_size, noise_dim)
#cont = sample_noise(noise_scale, batch_size, cont_dim)
X_disc = [
gen.predict([noise, modis_vars_b, modis_mask_b]),
modis_vars_b,
modis_mask_b
]
# label smoothing
y_disc = 1-max_smoothing*np.random.rand(batch_size, 1)
else:
X_disc = [cs_scenes_b, modis_vars_b, modis_mask_b]
y_disc = max_smoothing*np.random.rand(batch_size, 1)
return (X_disc, y_disc)
def get_gan_batch(batch_size, noise_dim,
noise_scale=1.0, max_smoothing=0.1, num_disc=1):
noise = sample_noise(noise_scale, batch_size, noise_dim)
X_gan = noise
y_gan_disc = max_smoothing*np.random.rand(batch_size, num_disc)
return (X_gan, y_gan_disc)
def save_opt_weights(model, filepath):
with h5py.File(filepath, 'w') as f:
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
# Default values of symbolic_weights is /variable for theano
if K.backend() == 'theano':
if hasattr(w, 'name') and w.name != "/variable":
name = str(w.name)
else:
name = 'param_' + str(i)
else:
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name,
val.shape,
dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def load_opt_weights(model, filepath):
with h5py.File(filepath, mode='r') as f:
optimizer_weights_group = f['optimizer_weights']
optimizer_weight_names = [n.decode('utf8') for n in
optimizer_weights_group.attrs['weight_names']]
optimizer_weight_values = [optimizer_weights_group[n] for n in
optimizer_weight_names]
model.optimizer.set_weights(optimizer_weight_values)
```
#### File: csmodiscgan/src/models.py
```python
from keras.models import Model
from keras.layers import Activation, Concatenate, Dense, Flatten, Input
from keras.layers import LeakyReLU, Reshape
from keras.layers import Conv2D, UpSampling2D
from keras.layers import BatchNormalization
def cs_generator(scene_size, modis_var_dim, noise_dim):
f = 256
start_dim = 8
reshape_shape = (start_dim, start_dim, f)
modis_var_input = Input(shape=(scene_size,modis_var_dim), name="modis_var_in")
modis_mask_input = Input(shape=(scene_size,1), name="modis_mask_in")
noise_input = Input(shape=(noise_dim,), name="noise_in")
inputs = [noise_input, modis_var_input, modis_mask_input]
inputs_flat = [inputs[0], Flatten()(inputs[1]), Flatten()(inputs[2])]
gen_input = Concatenate()(inputs_flat)
x = Dense(f * start_dim * start_dim)(gen_input)
x = Activation("relu")(x)
x = BatchNormalization(momentum=0.8)(x)
x = Reshape(reshape_shape)(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(256, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(128, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(64, (3, 3), padding="same")(x)
x = Activation("relu")(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(1, (3, 3), padding="same",
activation='tanh')(x)
gen = Model(inputs=inputs, outputs=x, name="gen")
return gen
def modis_upsampler(modis_var_input, modis_mask_input,
modis_var_dim, scene_size, upsampled_channels=None):
if upsampled_channels is None:
upsampled_channels = modis_var_dim+1
modis_input = Concatenate()([modis_var_input, modis_mask_input])
x = Reshape((1,scene_size,modis_var_dim+1))(modis_input)
x = UpSampling2D(size=(4,1))(x)
x = Conv2D(256, (5, 3), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D(size=(4,1))(x)
x = Conv2D(128, (5, 3), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D(size=(2,1))(x)
x = Conv2D(64, (3, 3), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D(size=(2,1))(x)
x = Conv2D(upsampled_channels, (3, 3), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
return x
def discriminator(scene_size, modis_var_dim):
disc_input = Input(shape=(scene_size,scene_size,1), name="disc_in")
modis_var_input = Input(shape=(scene_size,modis_var_dim), name="modis_var_in")
modis_mask_input = Input(shape=(scene_size,1), name="modis_mask_in")
modis_upsampled = modis_upsampler(modis_var_input, modis_mask_input,
modis_var_dim, scene_size)
full_input = Concatenate()([disc_input, modis_upsampled])
x = Conv2D(64, (3, 3), strides=(2, 2), padding="same")(full_input)
x = LeakyReLU(0.2)(x)
x = Conv2D(128, (3, 3), strides=(2, 2), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(256, (3, 3), strides=(2, 2), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = Conv2D(256, (3, 3), strides=(2, 2), padding="same")(x)
x = LeakyReLU(0.2)(x)
x = Flatten()(x)
x_disc = Dense(1, activation='sigmoid', name="disc_out")(x)
model = Model(inputs=[disc_input, modis_var_input, modis_mask_input],
outputs=x_disc,
name="disc")
return model
def cs_modis_cgan(gen, disc, scene_size, modis_var_dim, noise_dim):
modis_var_input = Input(shape=(scene_size,modis_var_dim),
name="modis_var_in")
modis_mask_input = Input(shape=(scene_size,1), name="modis_mask_in")
noise_input = Input(shape=(noise_dim,), name="noise_in")
inputs = [noise_input, modis_var_input, modis_mask_input]
generated_image = gen(inputs)
disc_inputs = [generated_image, modis_var_input, modis_mask_input]
x_disc = disc(disc_inputs)
gan = Model(inputs=inputs, outputs=x_disc, name="cs_modis_cgan")
return gan
``` |
{
"source": "jleinonen/downscaling-rnn-gan",
"score": 2
} |
#### File: downscaling-rnn-gan/dsrnngan/eval.py
```python
from bisect import bisect_left
from datetime import datetime, timedelta
import os
import netCDF4
import numpy as np
from scipy.interpolate import interp1d
import crps
import train
import data
import models
import msssim
import noise
import plots
import rainfarm
path = os.path.dirname(os.path.abspath(__file__))
def randomize_nans(x, rnd_mean, rnd_range):
nan_mask = np.isnan(x)
nan_shape = x[nan_mask].shape
x[nan_mask] = rnd_mean + \
(np.random.rand(*nan_shape)-0.5)*rnd_range
def ensemble_ranks(gen, batch_gen, noise_gen,
noise_offset=0.0, noise_mul=1.0,
num_batches=1024, rank_samples=100, normalize_ranks=True):
rnd_range = 0.1 * (batch_gen.decoder.value_range[0] -
batch_gen.decoder.below_val)
ranks = []
crps_scores = []
for k in range(num_batches):
(sample,cond) = next(batch_gen)
sample_crps = sample
sample = sample.ravel()
sample = batch_gen.decoder.denormalize(sample)
randomize_nans(sample, batch_gen.decoder.below_val, rnd_range)
samples_gen = []
for i in range(rank_samples):
n = noise_gen()
for nn in n:
nn *= noise_mul
nn -= noise_offset
sample_gen = gen.predict([cond]+n)
samples_gen.append(sample_gen)
samples_gen = np.stack(samples_gen, axis=-1)
crps_score = crps.crps_ensemble(sample_crps, samples_gen)
crps_scores.append(crps_score.ravel())
samples_gen = samples_gen.reshape(
(np.prod(samples_gen.shape[:-1]), samples_gen.shape[-1]))
samples_gen = batch_gen.decoder.denormalize(samples_gen)
randomize_nans(samples_gen, batch_gen.decoder.below_val, rnd_range)
rank = np.count_nonzero(sample[:,None] >= samples_gen, axis=-1)
ranks.append(rank)
ranks = np.concatenate(ranks)
crps_scores = np.concatenate(crps_scores)
if normalize_ranks:
ranks = ranks / rank_samples
return (ranks, crps_scores)
def rank_KS(norm_ranks, num_ranks=100):
(h,b) = np.histogram(norm_ranks, num_ranks+1)
h = h / h.sum()
ch = np.cumsum(h)
cb = b[1:]
return abs(ch-cb).max()
def rank_CvM(norm_ranks, num_ranks=100):
(h,b) = np.histogram(norm_ranks, num_ranks+1)
h = h / h.sum()
ch = np.cumsum(h)
cb = b[1:]
db = np.diff(b)
return np.sqrt(((ch-cb)**2*db).sum())
def rank_DKL(norm_ranks, num_ranks=100):
(h,b) = np.histogram(norm_ranks, num_ranks+1)
q = h / h.sum()
p = 1/len(h)
return p*np.log(p/q).sum()
def rank_OP(norm_ranks, num_ranks=100):
op = np.count_nonzero(
(norm_ranks==0) | (norm_ranks==1)
)
op = float(op)/len(norm_ranks)
return op
def rank_metrics_by_time(application, data_file, out_fn,
weights_dir, check_every=1, N_range=None):
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test,
noise_shapes, steps_per_epoch) = train.setup_gan(data_file,
application=application, batch_size=64)
gen = wgan.gen
noise_gen = noise.NoiseGenerator(noise_shapes(),
batch_size=batch_gen_valid.batch_size)
files = os.listdir(weights_dir)
def get_id(fn):
return fn.split("-")[1]
files = sorted(fn for fn in files if get_id(fn)==application)
def log_line(line):
with open(out_fn, 'a') as f:
print(line, file=f)
log_line("N KS CvM DKL OP CRPS mean std")
for fn in files[::check_every]:
N_samples = int(fn.split("-")[-1].split(".")[0])
if (N_range is not None) and not (N_range[0] <= N_samples < N_range[1]):
continue
gen.load_weights(weights_dir+"/"+fn)
(ranks, crps_scores) = ensemble_ranks(gen, batch_gen_valid,
noise_gen, num_batches=8)
KS = rank_KS(ranks)
CvM = rank_CvM(ranks)
DKL = rank_DKL(ranks)
OP = rank_OP(ranks)
CRPS = crps_scores.mean()
mean = ranks.mean()
std = ranks.std()
log_line("{} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}".format(
N_samples, KS, CvM, DKL, OP, CRPS, mean, std))
def rank_metrics_by_noise(application, run_id, data_file,
weights_fn):
(wgan, batch_gen_train, batch_gen_valid, _,
noise_shapes, steps_per_epoch) = train.setup_gan(data_file,
application=application)
gen = wgan.gen
noise_gen = noise.NoiseGenerator(noise_shapes(),
batch_size=batch_gen_valid.batch_size)
for m in list(range(0.5,2.51,0.1))+[3.0,3.5]:
N_samples = int(fn.split("-")[-1].split(".")[0])
gen.load_weights(weights_dir+"/"+fn)
(ranks, crps_scores) = ensemble_ranks(gen, batch_gen_valid,
noise_gen, num_batches=32, noise_mul=m)
KS = rank_KS(ranks)
CvM = rank_CvM(ranks)
DKL = rank_DKL(ranks)
CRPS = crps_scores.mean()
mean = ranks.mean()
std = ranks.std()
print(N_samples, KS, CvM, DKL, CRPS, mean, std)
def rank_metrics_table(application, data_file, weights_fn, method="gan"):
if method=="gan":
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test,
noise_shapes, steps_per_epoch) = train.setup_gan(data_file,
test_data_file=data_file, application=application, batch_size=64)
gen = wgan.gen
gen.load_weights(weights_fn)
elif method=="rainfarm":
(gen_det, batch_gen_train, batch_gen_valid,
batch_gen_test, steps_per_epoch) = train.setup_deterministic(data_file,
test_data_file=data_file, sample_random=True, n_samples=1, batch_size=64,
application=application, loss='mse')
gen = GeneratorRainFARM(16, batch_gen_test.decoder)
noise_shapes = lambda: []
noise_gen = noise.NoiseGenerator(noise_shapes(),
batch_size=batch_gen_valid.batch_size)
(ranks, crps_scores) = ensemble_ranks(gen, batch_gen_test,
noise_gen, num_batches=16)
KS = rank_KS(ranks)
CvM = rank_CvM(ranks)
DKL = rank_DKL(ranks)
OP = rank_OP(ranks)
CRPS = crps_scores.mean()
mean = ranks.mean()
std = ranks.std()
print("KS: {:.3f}".format(KS))
print("CvM: {:.3f}".format(CvM))
print("DKL: {:.3f}".format(DKL))
print("OP: {:.3f}".format(OP))
print("CRPS: {:.3f}".format(CRPS))
print("mean: {:.3f}".format(mean))
print("std: {:.3f}".format(std))
def reconstruct_time_series_partial(images_fn, gen, noise_shapes,
init_model, out_fn,
time_range, h=None, last_t=None, application="mchrzc", ds_factor=16, n_ensemble=4,
scaling_fn=path+"/../data/scale_rzc.npy", relax_lam=0.0):
if application == "mchrzc":
dec = data.RainRateDecoder(scaling_fn, below_val=np.log10(0.025))
else:
raise ValueError("Unknown application.")
downsampler = data.LogDownsampler(min_val=dec.below_val,
threshold_val=dec.value_range[0])
with netCDF4.Dataset(images_fn) as ds_img:
time = np.array(ds_img["time"][:], copy=False)
time_dt = [datetime(1970,1,1)+timedelta(seconds=t) for t in time]
t0 = bisect_left(time_dt, time_range[0])
t1 = bisect_left(time_dt, time_range[1])
images = np.array(ds_img["images"][t0:t1,...], copy=False)
time = time[t0:t1]
img_shape = images.shape[1:3]
img_shape = (
img_shape[0] - img_shape[0]%ds_factor,
img_shape[1] - img_shape[1]%ds_factor,
)
noise_gen = noise.NoiseGenerator(noise_shapes(img_shape),
batch_size=n_ensemble)
images_ds = np.zeros(
(images.shape[0],img_shape[0]//ds_factor,img_shape[1]//ds_factor,1),
dtype=np.uint8
)
images_gen = np.zeros(
(images.shape[0],)+img_shape+(1,n_ensemble),
dtype=np.uint8
)
# this finds the nearest index in the R encoding
def encoder():
lR = dec.logR
ind = np.arange(len(lR))
ip = interp1d(lR,ind)
def f(x):
y = np.zeros(x.shape, dtype=np.uint8)
valid = (x >= dec.value_range[0])
y[valid] = ip(x[valid]).round().astype(np.uint8)
return y
return f
encode = encoder()
for k in range(images.shape[0]):
print("{}/{}".format(k+1,images.shape[0]))
img_real = images[k:k+1,:img_shape[0],:img_shape[1],:]
img_real = dec(img_real)
img_real = img_real.reshape(
(1,1)+img_real.shape[1:])
img_real[np.isnan(img_real)] = dec.below_val
img_ds = downsampler(img_real)
img_ds = dec.normalize(img_ds)
img_ds_denorm = dec.denormalize(img_ds)
img_ds = np.tile(img_ds, (n_ensemble,1,1,1,1))
(n_init, n_update) = noise_gen()
if (h is None) or (time[k]-last_t != 600):
h = init_model.predict([img_ds[:,0,...], n_init])
(img_gen,h) = gen.predict([img_ds, h, n_update])
if relax_lam > 0.0:
# nudge h towards null
h_null = init_model.predict([
np.zeros_like(img_ds[:,0,...]), n_init
])
h = h_null + (1.0-relax_lam)*(h-h_null)
img_gen = dec.denormalize(img_gen)
img_gen = img_gen.transpose((1,2,3,4,0))
images_ds[k,...] = encode(img_ds_denorm[0,...])
images_gen[k,...] = encode(img_gen[0,...])
last_t = time[k]
with netCDF4.Dataset(out_fn, 'w') as ds:
dim_height = ds.createDimension("dim_height", img_shape[0])
dim_width = ds.createDimension("dim_width", img_shape[1])
dim_height_ds = ds.createDimension("dim_height_ds",
img_shape[0]/ds_factor)
dim_width_ds = ds.createDimension("dim_width_ds",
img_shape[1]/ds_factor)
dim_samples = ds.createDimension("dim_samples", images.shape[0])
dim_ensemble = ds.createDimension("dim_ensemble", n_ensemble)
dim_channels = ds.createDimension("dim_channels", 1)
var_params = {"zlib": True, "complevel": 9}
def create_var(name, dims, **params):
dtype = params.pop("dtype", np.float32)
var = ds.createVariable(name, dtype, dims, **params)
return var
var_img = create_var("images",
("dim_samples","dim_height","dim_width","dim_channels",
"dim_ensemble"),
chunksizes=(1,64,64,1,1), dtype=np.uint8, **var_params)
var_img.units = "Encoded R"
var_img_ds = create_var("images_ds",
("dim_samples","dim_height_ds","dim_width_ds","dim_channels"),
dtype=np.uint8, **var_params)
var_img_ds.units = "Encoded R"
var_time = create_var("time", ("dim_samples",),
chunksizes=(1,), dtype=np.float64, **var_params)
var_time.units = "Seconds since 1970-01-01 00:00"
var_img_ds[:] = images_ds
var_img[:] = images_gen
var_time[:] = time
return (h, last_t)
def reconstruct_time_series_monthly(images_fn, weights_fn, out_dir,
time_range, application="mchrzc", ds_factor=16, n_ensemble=4,
relax_lam=0.0):
(gen,_) = models.generator(num_timesteps=1)
init_model = models.initial_state_model()
(gen_init, noise_shapes) = models.generator_initialized(gen, init_model,
num_timesteps=1)
gen_init.load_weights(weights_fn)
t0 = time_range[0]
months = []
while t0 < time_range[1]:
(y,m) = (t0.year, t0.month)
m += 1
if m > 12:
m = 1
y += 1
t1 = datetime(y,m,1)
months.append((t0,t1))
t0 = t1
(h, last_t) = (None, None)
for month in months:
out_fn = out_dir + "/timeseries-{}-{}{:02d}.nc".format(
application,month[0].year,month[0].month)
(h, last_t) = reconstruct_time_series_partial(images_fn, gen,
noise_shapes, init_model, out_fn, month, h=h, last_t=last_t,
application=application, ds_factor=ds_factor, n_ensemble=n_ensemble,
relax_lam=relax_lam
)
def log_spectral_distance(img1, img2):
def power_spectrum_dB(img):
fx = np.fft.fft2(img)
fx = fx[:img.shape[0]//2,:img.shape[1]//2]
px = abs(fx)**2
return 10 * np.log10(px)
d = (power_spectrum_dB(img1)-power_spectrum_dB(img2))**2
d[~np.isfinite(d)] = np.nan
return np.sqrt(np.nanmean(d))
def log_spectral_distance_batch(batch1, batch2):
lsd_batch = []
for i in range(batch1.shape[0]):
for j in range(batch1.shape[1]):
lsd = log_spectral_distance(
batch1[i,j,:,:,0], batch2[i,j,:,:,0]
)
lsd_batch.append(lsd)
return np.array(lsd_batch)
def image_quality(gen, batch_gen, noise_shapes, num_instances=1,
N_batches=100):
N = batch_gen.N
#N_batches = N//batch_gen.batch_size
img_shape = batch_gen.img_shape
noise_gen = noise.NoiseGenerator(noise_shapes(img_shape),
batch_size=batch_gen.batch_size, random_seed=1234)
batch_gen.reset(random_seed=1234)
rmse_all = []
ssim_all = []
lsd_all = []
for k in range(N_batches):
(img_real, img_ds) = next(batch_gen)
for i in range(num_instances):
n = noise_gen()
img_gen = gen.predict([img_ds]+n)
rmse = np.sqrt(((img_real-img_gen)**2).mean(axis=(2,3,4)))
ssim = msssim.MultiScaleSSIM(img_real, img_gen, 1.0)
lsd = log_spectral_distance_batch(img_real, img_gen)
rmse_all.append(rmse.flatten())
ssim_all.append(ssim.flatten())
lsd_all.append(lsd.flatten())
rmse_all = np.concatenate(rmse_all)
ssim_all = np.concatenate(ssim_all)
lsd_all = np.concatenate(lsd_all)
return (rmse_all, ssim_all, lsd_all)
def quality_metrics_by_time(application, data_fn, out_fn,
weights_dir, check_every=1):
(wgan, batch_gen_train, batch_gen_valid, _,
noise_shapes, steps_per_epoch) = train.setup_gan(data_fn,
application=application, batch_size=32)
gen = wgan.gen
files = os.listdir(weights_dir)
def get_app(fn):
return fn.split("-")[1]
files = sorted(fn for fn in files if get_app(fn)==application)
def log_line(line):
with open(out_fn, 'a') as f:
print(line, file=f)
log_line("N RMSE MSSSIM LSD")
for fn in files[::check_every]:
N_samples = int(fn.split("-")[-1].split(".")[0])
print(N_samples)
gen.load_weights(weights_dir+"/"+fn)
(rmse, ssim, lsd) = image_quality(gen, batch_gen_valid, noise_shapes)
log_line("{} {:.6f} {:.6f} {:.6f}".format(
N_samples, rmse.mean(), ssim.mean(), np.nanmean(lsd)))
def quality_metrics_table(application, data_fn, weights_fn, method="gan"):
if method == "gan":
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test,
noise_shapes, steps_per_epoch) = train.setup_gan(data_fn,
test_data_file=data_fn, application=application, batch_size=32)
gen = wgan.gen
gen.load_weights(weights_fn)
elif method == "gen_det":
(gen_det, batch_gen_train, batch_gen_valid,
batch_gen_test, steps_per_epoch) = train.setup_deterministic(data_fn,
test_data_file=data_fn, sample_random=True, n_samples=1, batch_size=32,
application=application, loss='mse')
gen_det.load_weights(weights_fn)
gen = GeneratorDeterministicPlaceholder(gen_det)
noise_shapes = lambda s: []
elif method == "lanczos":
(gen_det, batch_gen_train, batch_gen_valid,
batch_gen_test, steps_per_epoch) = train.setup_deterministic(data_fn,
test_data_file=data_fn, sample_random=True, n_samples=1, batch_size=32,
application=application, loss='mse')
gen = GeneratorLanczos((128,128))
noise_shapes = lambda s: []
elif method == "rainfarm":
(gen_det, batch_gen_train, batch_gen_valid,
batch_gen_test, steps_per_epoch) = train.setup_deterministic(data_fn,
test_data_file=data_fn, sample_random=True, n_samples=1, batch_size=32,
application=application, loss='mse')
gen = GeneratorRainFARM(16, batch_gen_test.decoder)
noise_shapes = lambda s: []
(rmse, ssim, lsd) = image_quality(gen, batch_gen_test, noise_shapes)
print("RMSE: {:.3f}".format(rmse.mean()))
print("MSSSIM: {:.3f}".format(ssim.mean()))
print("LSD: {:.3f}".format(np.nanmean(lsd)))
class GeneratorLanczos:
# class that can be used in place of a generator for evaluation purposes,
# using Lanczos filtering
def __init__(self, out_size):
self.out_size = out_size
def predict(self, *args):
y = args[0][0]
out_shape = y.shape[:2] + self.out_size + y.shape[4:]
x = np.zeros(out_shape, dtype=y.dtype)
for i in range(x.shape[0]):
for k in range(x.shape[1]):
x[i,k,:,:,0] = plots.resize_lanczos(y[i,k,:,:,0],
self.out_size)
return x
class GeneratorDeterministicPlaceholder:
def __init__(self, gen_det):
self.gen_det = gen_det
def predict(self, *args):
y = args[0]
return self.gen_det.predict(y)
class GeneratorRainFARM:
def __init__(self, ds_factor, decoder):
self.ds_factor = ds_factor
self.decoder = decoder
self.batches = 0
def predict(self, *args):
print(self.batches)
self.batches += 1
y = args[0][0]
y = self.decoder.denormalize(y)
P = 10**y
P[~np.isfinite(P)] = 0
out_size = (y.shape[2]*self.ds_factor, y.shape[3]*self.ds_factor)
out_shape = y.shape[:2] + out_size + y.shape[4:]
x = np.zeros(out_shape, dtype=y.dtype)
for i in range(y.shape[0]):
alpha = rainfarm.get_alpha_seq(P[i,...,0])
r = [rainfarm.rainfarm_downscale(p, alpha=alpha, threshold=0.1,
ds_factor=self.ds_factor) for p in P[0,...,0]]
log_r = np.log10(r)
log_r[~np.isfinite(log_r)] = np.nan
log_r = self.decoder.normalize(log_r)
log_r[~np.isfinite(log_r)] = 0.0
x[i,...,0] = log_r
x = x.clip(0,1)
return x
```
#### File: downscaling-rnn-gan/dsrnngan/gan.py
```python
import gc
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.python.keras.utils import generic_utils
from layers import GradientPenalty, RandomWeightedAverage
from meta import Nontrainable, input_shapes, ensure_list
from meta import save_opt_weights, load_opt_weights
class WGANGP(object):
def __init__(self, gen, disc, num_channels=1, num_timesteps=8,
gradient_penalty_weight=10, lr_disc=0.0001, lr_gen=0.0001,
avg_seed=None):
self.gen = gen
self.disc = disc
self.num_channels = num_channels
self.num_timesteps = num_timesteps
self.gradient_penalty_weight = gradient_penalty_weight
self.lr_disc = lr_disc
self.lr_gen = lr_gen
self.build_wgan_gp()
def filenames_from_root(self, root):
fn = {
"gen_weights": root+"-gen_weights.h5",
"disc_weights": root+"-disc_weights.h5",
"gen_opt_weights": root+"-gen_opt_weights.h5",
"disc_opt_weights": root+"-disc_opt_weights.h5"
}
return fn
def load(self, load_files):
self.gen.load_weights(load_files["gen_weights"])
self.disc.load_weights(load_files["disc_weights"])
with Nontrainable(self.disc):
self.gen_trainer._make_train_function()
load_opt_weights(self.gen_trainer,
load_files["gen_opt_weights"])
with Nontrainable(self.gen):
self.disc_trainer._make_train_function()
load_opt_weights(self.disc_trainer,
load_files["disc_opt_weights"])
def save(self, save_fn_root):
paths = self.filenames_from_root(save_fn_root)
self.gen.save_weights(paths["gen_weights"], overwrite=True)
self.disc.save_weights(paths["disc_weights"], overwrite=True)
save_opt_weights(self.disc_trainer, paths["disc_opt_weights"])
save_opt_weights(self.gen_trainer, paths["gen_opt_weights"])
def build_wgan_gp(self):
# find shapes for inputs
cond_shapes = input_shapes(self.gen, "cond")
noise_shapes = input_shapes(self.gen, "noise")
sample_shapes = input_shapes(self.disc, "sample")
# Create generator training network
with Nontrainable(self.disc):
cond_in = [Input(shape=s) for s in cond_shapes]
noise_in = [Input(shape=s) for s in noise_shapes]
gen_in = cond_in+noise_in
gen_out = self.gen(gen_in)
gen_out = ensure_list(gen_out)
disc_in_gen = cond_in+[gen_out]
disc_out_gen = self.disc(disc_in_gen)
self.gen_trainer = Model(inputs=gen_in, outputs=disc_out_gen)
# Create discriminator training network
with Nontrainable(self.gen):
cond_in = [Input(shape=s) for s in cond_shapes]
noise_in = [Input(shape=s) for s in noise_shapes]
sample_in = [Input(shape=s) for s in sample_shapes]
gen_in = cond_in+noise_in
disc_in_real = sample_in[0]
disc_in_fake = self.gen(gen_in)
disc_in_avg = RandomWeightedAverage()([disc_in_real,disc_in_fake])
disc_out_real = self.disc(cond_in+[disc_in_real])
disc_out_fake = self.disc(cond_in+[disc_in_fake])
disc_out_avg = self.disc(cond_in+[disc_in_avg])
disc_gp = GradientPenalty()([disc_out_avg, disc_in_avg])
self.disc_trainer = Model(inputs=cond_in+sample_in+noise_in,
outputs=[disc_out_real,disc_out_fake,disc_gp])
self.compile()
def compile(self, opt_disc=None, opt_gen=None):
#create optimizers
if opt_disc is None:
opt_disc = Adam(self.lr_disc, beta_1=0.5, beta_2=0.9)
self.opt_disc = opt_disc
if opt_gen is None:
opt_gen = Adam(self.lr_gen, beta_1=0.5, beta_2=0.9)
self.opt_gen = opt_gen
with Nontrainable(self.disc):
self.gen_trainer.compile(loss=wasserstein_loss,
optimizer=self.opt_gen)
with Nontrainable(self.gen):
self.disc_trainer.compile(
loss=[wasserstein_loss, wasserstein_loss, 'mse'],
loss_weights=[1.0, 1.0, self.gradient_penalty_weight],
optimizer=self.opt_disc
)
def train(self, batch_gen, noise_gen, num_gen_batches=1,
training_ratio=1, show_progress=True):
disc_target_real = None
if show_progress:
# Initialize progbar and batch counter
progbar = generic_utils.Progbar(
num_gen_batches*batch_gen.batch_size)
disc_target_real = np.ones(
(batch_gen.batch_size, batch_gen.num_frames, 1), dtype=np.float32)
disc_target_fake = -disc_target_real
gen_target = disc_target_real
target_gp = np.zeros((batch_gen.batch_size, 1), dtype=np.float32)
disc_target = [disc_target_real, disc_target_fake, target_gp]
loss_log = []
for k in range(num_gen_batches):
# train discriminator
disc_loss = None
disc_loss_n = 0
for rep in range(training_ratio):
# generate some real samples
(sample, cond) = next(batch_gen)
noise = noise_gen()
with Nontrainable(self.gen):
dl = self.disc_trainer.train_on_batch(
[cond,sample]+noise, disc_target)
if disc_loss is None:
disc_loss = np.array(dl)
else:
disc_loss += np.array(dl)
disc_loss_n += 1
del sample, cond
disc_loss /= disc_loss_n
with Nontrainable(self.disc):
(sample, cond) = next(batch_gen)
gen_loss = self.gen_trainer.train_on_batch(
[cond]+noise_gen(), gen_target)
del sample, cond
if show_progress:
losses = []
for (i,dl) in enumerate(disc_loss):
losses.append(("D{}".format(i), dl))
for (i,gl) in enumerate([gen_loss]):
losses.append(("G{}".format(i), gl))
progbar.add(batch_gen.batch_size,
values=losses)
loss_log.append(np.hstack((disc_loss,gen_loss)))
gc.collect()
return np.array(loss_log)
def wasserstein_loss(y_true, y_pred):
return K.mean(y_true * y_pred, axis=-1)
```
#### File: downscaling-rnn-gan/dsrnngan/layers.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.engine import InputSpec
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import math_ops
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Dense, Conv2D
from tensorflow.python.keras.layers.merge import _Merge
from tensorflow.keras import initializers
from tensorflow.keras import backend as K
class GradientPenalty(Layer):
def __init__(self, **kwargs):
super(GradientPenalty, self).__init__(**kwargs)
def call(self, inputs):
target, wrt = inputs
grad = K.gradients(target, wrt)[0]
return K.sqrt(K.sum(K.batch_flatten(K.square(grad)), axis=1, keepdims=True))-1
def compute_output_shape(self, input_shapes):
return (input_shapes[1][0], 1)
class RandomWeightedAverage(_Merge):
def build(self, input_shape):
super(RandomWeightedAverage, self).build(input_shape)
if len(input_shape) != 2:
raise ValueError('A `RandomWeightedAverage` layer should be '
'called on exactly 2 inputs')
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError('A `RandomWeightedAverage` layer should be '
'called on exactly 2 inputs')
(x,y) = inputs
shape = K.shape(x)
weights = K.random_uniform(shape[:1],0,1)
for i in range(len(K.int_shape(x))-1):
weights = K.expand_dims(weights,-1)
return x*weights + y*(1-weights)
class SNConv2D(Conv2D):
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),
initializer=initializers.RandomNormal(0, 1),
name='sn',
trainable=False)
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def _get_trainable_var(self):
if self._trainable_var is None:
self._trainable_var = K.freezable_variable(
self._trainable, name=self.name + '_trainable')
return self._trainable_var
def _get_training_value(self, training=None):
if training is None:
training = K.learning_phase()
if isinstance(training, int):
training = bool(training)
if base_layer_utils.is_in_keras_graph():
training = math_ops.logical_and(training, self._get_trainable_var())
else:
training = math_ops.logical_and(training, self.trainable)
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
#Accroding the paper, we only need to do power iteration one time.
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
#Spectral Normalization
W_shape = self.kernel.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma = K.dot(_v, W_reshaped)
sigma = K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
if training == False:
W_bar = K.reshape(W_bar, W_shape)
else:
u = self.u.assign(tf.cond(
training,
lambda: _u,
lambda: self.u
))
with tf.control_dependencies([u]):
W_bar = K.reshape(W_bar, W_shape)
outputs = K.conv2d(
inputs,
W_bar,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
class SNDense(Dense):
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[-1]]),
initializer=initializers.RandomNormal(0, 1),
name='sn',
trainable=False)
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def _get_trainable_var(self):
if self._trainable_var is None:
self._trainable_var = K.freezable_variable(
self._trainable, name=self.name + '_trainable')
return self._trainable_var
def _get_training_value(self, training=None):
if training is None:
training = K.learning_phase()
if isinstance(training, int):
training = bool(training)
if base_layer_utils.is_in_keras_graph():
training = math_ops.logical_and(training, self._get_trainable_var())
else:
training = math_ops.logical_and(training, self.trainable)
return training
def call(self, inputs, training=None):
training = self._get_training_value(training)
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
W_shape = self.kernel.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma = K.dot(_v, W_reshaped)
sigma = K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
if training == False:
W_bar = K.reshape(W_bar, W_shape)
else:
u = self.u.assign(tf.cond(
training,
lambda: _u,
lambda: self.u
))
with tf.control_dependencies([u]):
W_bar = K.reshape(W_bar, W_shape)
output = K.dot(inputs, W_bar)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1,1), **kwargs):
self.padding = tuple(padding)
super(ReflectionPadding2D, self).__init__(**kwargs)
def compute_output_shape(self, s):
return (
s[0],
None if s[1] is None else s[1]+2*self.padding[0],
None if s[2] is None else s[2]+2*self.padding[1],
s[3]
)
def call(self, x):
(i_pad,j_pad) = self.padding
return tf.pad(x, [[0,0], [i_pad,i_pad], [j_pad,j_pad], [0,0]], 'REFLECT')
```
#### File: downscaling-rnn-gan/dsrnngan/plots.py
```python
from bisect import bisect_left
from datetime import datetime, timedelta
import gc
import os
from string import ascii_lowercase
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from matplotlib import colorbar, colors, gridspec
import netCDF4
import numpy as np
import pandas as pd
try:
from PIL import Image
except ImportError:
pass # to allow loading on setups witout PIL
import data
import models
import noise
import train
path = os.path.dirname(os.path.abspath(__file__))
def plot_img(img, value_range=(np.log10(0.1), np.log10(100)), extent=None):
plt.imshow(img, interpolation='nearest',
norm=colors.Normalize(*value_range), extent=extent)
plt.gca().tick_params(left=False, bottom=False,
labelleft=False, labelbottom=False)
def plot_sequences(gen, batch_gen, noise_gen,
num_samples=8, num_instances=4, out_fn=None,
plot_stride=1):
old_batch_size = batch_gen.batch_size
try:
batch_gen.batch_size = num_samples
noise_gen.batch_size = num_samples
(seq_real, cond) = next(batch_gen)
seq_gen = []
for i in range(num_instances):
seq_gen.append(gen.predict([cond]+noise_gen()))
finally:
batch_gen.batch_size = old_batch_size
noise_gen.batch_size = old_batch_size
seq_real = batch_gen.decoder.denormalize(seq_real)
cond = batch_gen.decoder.denormalize(cond)
seq_gen = [batch_gen.decoder.denormalize(seq) for seq in seq_gen]
num_frames = batch_gen.num_frames
if plot_stride > 1:
seq_real = seq_real[:,::plot_stride,...]
cond = cond[:,::plot_stride,...]
for i in range(len(seq_gen)):
seq_gen[i] = seq_gen[i][:,::plot_stride,...]
num_frames = seq_real.shape[1]
num_rows = num_samples*num_frames
num_cols = 2+num_instances
figsize = (num_cols*1.5, num_rows*1.5)
plt.figure(figsize=figsize)
gs = gridspec.GridSpec(num_rows, num_cols,
wspace=0.05, hspace=0.05)
value_range = batch_gen.decoder.value_range
for s in range(num_samples):
for t in range(num_frames):
i = s*num_frames+t
plt.subplot(gs[i,0])
plot_img(seq_real[s,t,:,:,0], value_range=value_range)
plt.subplot(gs[i,1])
plot_img(cond[s,t,:,:,0], value_range=value_range)
for k in range(num_instances):
j = 2+k
plt.subplot(gs[i,j])
plot_img(seq_gen[k][s,t,:,:,0], value_range=value_range)
if out_fn is not None:
plt.savefig(out_fn, bbox_inches='tight')
plt.close()
def plot_rank_metrics_by_samples(metrics_fn,ax=None,
plot_metrics=["KS", "DKL", "OP", "mean"], value_range=(-0.1,0.2),
linestyles=['solid', 'dashed', 'dashdot', ':',],
opt_switch_point=350000, plot_switch_text=True):
if ax is None:
ax = plt.gca()
df = pd.read_csv(metrics_fn, delimiter=" ")
x = df["N"]
for (metric,linestyle) in zip(plot_metrics,linestyles):
y = df[metric]
label = metric
if metric=="DKL":
label = "$D_\\mathrm{KL}$"
if metric=="OP":
label = "OF"
if metric=="mean":
y = y-0.5
label = "mean - $\\frac{1}{2}$"
ax.plot(x, y, label=label, linestyle=linestyle)
ax.set_xlim((0,x.max()))
ax.set_ylim(value_range)
ax.axhline(0, linestyle='--', color=(0.75,0.75,0.75), zorder=-10)
ax.axvline(opt_switch_point, linestyle='--', color=(0.75,0.75,0.75), zorder=-10)
if plot_switch_text:
text_x = opt_switch_point*0.98
text_y = value_range[1]-(value_range[1]-value_range[0])*0.02
ax.text(text_x, text_y, "Adam\u2192SGD", horizontalalignment='right',
verticalalignment='top', color=(0.5,0.5,0.5))
plt.grid(axis='y')
def plot_rank_metrics_by_samples_multiple(metrics_files,
value_ranges=[(-0.025,0.075),(-0.1,0.2)]):
(fig,axes) = plt.subplots(len(metrics_files),1, sharex=True,
squeeze=True)
plt.subplots_adjust(hspace=0.1)
for (i,(ax,fn,vr)) in enumerate(zip(axes,metrics_files,value_ranges)):
plot_rank_metrics_by_samples(fn,ax,plot_switch_text=(i==0),value_range=vr)
if i==len(metrics_files)-1:
ax.legend(ncol=5)
ax.set_xlabel("Training sequences")
ax.text(0.04, 0.97, "({})".format(ascii_lowercase[i]),
horizontalalignment='left', verticalalignment='top',
transform=ax.transAxes)
ax.set_ylabel("Rank metric")
ax.grid(axis='y')
def plot_quality_metrics_by_samples(quality_metrics_fn,
rank_metrics_fn, ax=None,
plot_metrics=["RMSE", "MSSSIM", "LSD", "CRPS"], value_range=(0,0.7),
linestyles=['-', '--', ':', '-.'], opt_switch_point=350000,
plot_switch_text=True):
if ax is None:
ax = plt.gca()
df = pd.read_csv(quality_metrics_fn, delimiter=" ")
df_r = pd.read_csv(rank_metrics_fn, delimiter=" ")
df["CRPS"] = df_r["CRPS"]
x = df["N"]
for (metric,linestyle) in zip(plot_metrics,linestyles):
y = df[metric]
label = metric
if metric=="MSSSIM":
y = 1-y
label = "$1 - $MS-SSIM"
if metric=="LSD":
label = "LSD [dB] / 50"
y = y/50
if metric=="CRPS":
y = y*10
label = "CRPS $\\times$ 10"
ax.plot(x, y, label=label, linestyle=linestyle)
ax.set_xlim((0,x.max()))
ax.set_ylim(value_range)
ax.axhline(0, linestyle='--', color=(0.75,0.75,0.75), zorder=-10)
ax.axvline(opt_switch_point, linestyle='--', color=(0.75,0.75,0.75), zorder=-10)
if plot_switch_text:
text_x = opt_switch_point*0.98
text_y = value_range[1]-(value_range[1]-value_range[0])*0.02
ax.text(text_x, text_y, "Adam\u2192SGD", horizontalalignment='right',
verticalalignment='top', color=(0.5,0.5,0.5))
def plot_quality_metrics_by_samples_multiple(
quality_metrics_files, rank_metrics_files):
(fig,axes) = plt.subplots(len(quality_metrics_files),1, sharex=True,
squeeze=True)
plt.subplots_adjust(hspace=0.1)
value_ranges = [(0,0.4),(0,0.8)]
for (i,(ax,fn_q,fn_r,vr)) in enumerate(zip(
axes,quality_metrics_files,rank_metrics_files,value_ranges)):
plot_quality_metrics_by_samples(fn_q,fn_r,ax,
plot_switch_text=(i==0), value_range=vr)
if i==0:
ax.legend(mode='expand', ncol=4, loc='lower left')
if i==1:
ax.set_xlabel("Training sequences")
ax.text(0.04, 0.97, "({})".format(ascii_lowercase[i]),
horizontalalignment='left', verticalalignment='top',
transform=ax.transAxes)
ax.set_ylabel("Quality metric")
ax.grid(axis='y')
def plot_sequences_horiz(gen, noise_shapes, batch_gen,
samples=[0,1,2], num_instances=3, out_fn=None,
plot_stride=2, random_seed=1234, application="mchrzc"):
num_samples = len(samples)
old_batch_size = batch_gen.batch_size
old_augment = batch_gen.augment
old_zeros_frac = batch_gen.zeros_frac
img_shape = batch_gen.sequences.shape[2:4]
noise_gen = noise.NoiseGenerator(noise_shapes(img_shape),
batch_size=num_samples, random_seed=random_seed)
# force the batch generator to return the selected samples
batch_gen.next_ind = np.array(samples)
try:
batch_gen.batch_size = num_samples
batch_gen.augment = False
batch_gen.zeros_frac = 0.0
(seq_real, cond) = next(batch_gen)
seq_gen = []
for i in range(num_instances):
seq_gen.append(gen.predict([cond]+noise_gen()))
finally:
batch_gen.batch_size = old_batch_size
batch_gen.augment = old_augment
batch_gen.zeros_frac = old_zeros_frac
seq_real = batch_gen.decoder.denormalize(seq_real)
cond = batch_gen.decoder.denormalize(cond)
seq_gen = [batch_gen.decoder.denormalize(seq) for seq in seq_gen]
num_frames = batch_gen.num_frames
if plot_stride > 1:
seq_real = seq_real[:,::plot_stride,...]
cond = cond[:,::plot_stride,...]
for i in range(len(seq_gen)):
seq_gen[i] = seq_gen[i][:,::plot_stride,...]
num_frames = seq_real.shape[1]
num_rows = num_samples
num_cols = num_frames
num_rows_s = 2+num_instances
figsize = (num_cols*1.5, num_rows*num_rows_s*1.60)
fig = plt.figure(figsize=figsize)
gs = gridspec.GridSpec(num_rows+1, 1, hspace=0.05,
height_ratios=[1]*num_rows+[0.035])
value_range = batch_gen.decoder.value_range
for s in range(num_samples):
gs_s = gridspec.GridSpecFromSubplotSpec(num_rows_s, num_cols,
subplot_spec=gs[s,0], wspace=0.05, hspace=0.05)
for t in range(num_frames):
plt.subplot(gs_s[0,t])
plot_img(seq_real[s,t,:,:,0], value_range=value_range)
if t==0:
plt.ylabel("Real", fontsize=16)
plt.text(0.01, 0.97, "({})".format(ascii_lowercase[s]),
horizontalalignment='left', verticalalignment='top',
transform=plt.gca().transAxes, fontsize=16)
if s==0:
plt.title("Time \u2192", fontsize=16)
plt.subplot(gs_s[1,t])
plot_img(cond[s,t,:,:,0], value_range=value_range)
if t==0:
plt.ylabel("Downs.", fontsize=16)
for k in range(num_instances):
j = 2+k
plt.subplot(gs_s[j,t])
plot_img(seq_gen[k][s,t,:,:,0], value_range=value_range)
if t==0:
plt.ylabel("Gen. #{}".format(k+1), fontsize=16)
if application == 'mchrzc':
units = "Rain rate [mm h$^{-1}$]"
cb_tick_loc = np.array([-1, 0, 1, 2])
cb_tick_labels = [0.1, 1, 10, 100]
elif application == 'goescod':
units = "Cloud optical thickness"
cb_tick_loc = np.log([2, 10, 50, 150])
cb_tick_labels = np.exp(cb_tick_loc).round().astype(int)
cax = plt.subplot(gs[-1,0]).axes
cb = colorbar.ColorbarBase(cax, norm=colors.Normalize(*value_range),
orientation='horizontal')
cb.set_ticks(cb_tick_loc)
cb.set_ticklabels(cb_tick_labels)
cax.tick_params(labelsize=16)
cb.set_label(units, size=16)
if out_fn is not None:
plt.savefig(out_fn, bbox_inches='tight')
plt.close()
def plot_examples_mchrzc(data_fn, weights_fn, plot_fn):
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test, noise_shapes,
steps_per_epoch) = train.setup_gan(data_fn, test_data_file=data_fn,
sample_random=True, n_samples=1, application='mchrzc',
random_seed=1234)
gen = wgan.gen
gen.load_weights(weights_fn)
plot_sequences_horiz(gen, noise_shapes, batch_gen_test, samples=[0,21,15],
application='mchrzc', plot_stride=1)
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
def plot_examples_goescod(data_fn, weights_fn, plot_fn):
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test, noise_shapes,
steps_per_epoch) = train.setup_gan(data_fn, test_data_file=data_fn,
sample_random=True, n_samples=1, application='goescod',
random_seed=1234)
gen = wgan.gen
gen.load_weights(weights_fn)
plot_sequences_horiz(gen, noise_shapes, batch_gen_test, samples=[0,1,2],
application='goescod', plot_stride=1)
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
def plot_examples_mchrzc_random(data_fn, weights_fn, plot_dir, num_examples=16):
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test, noise_shapes,
steps_per_epoch) = train.setup_gan(data_fn, test_data_file=data_fn,
sample_random=True, n_samples=1, application='mchrzc',
random_seed=2345)
gen = wgan.gen
gen.load_weights(weights_fn)
for k in range(num_examples):
plot_fn = plot_dir + "/examples-mchrzc-random-{:02d}.pdf".format(k)
plot_sequences_horiz(gen, noise_shapes, batch_gen_test, samples=[k],
application='mchrzc', plot_stride=1, num_instances=12)
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
def plot_examples_goescod_random(data_fn, weights_fn, plot_dir, num_examples=16):
(wgan, batch_gen_train, batch_gen_valid, batch_gen_test, noise_shapes,
steps_per_epoch) = train.setup_gan(data_fn, test_data_file=data_fn,
sample_random=True, n_samples=1, application='goescod',
random_seed=2345)
gen = wgan.gen
gen.load_weights(weights_fn)
for k in range(num_examples):
plot_fn = plot_dir + "/examples-goescod-random-{:02d}.pdf".format(k)
plot_sequences_horiz(gen, noise_shapes, batch_gen_test, samples=[k],
application='goescod', plot_stride=1, num_instances=12)
plt.savefig(plot_fn, bbox_inches='tight')
plt.close()
def plot_video_frame(img_real, img_ds, img_gen, oob_mask, time, num_ensemble=4):
assert(num_ensemble in {1,4})
img_shape = img_real.shape
if num_ensemble == 1:
figsize = (img_shape[1]/img_shape[0]*3*4, 4)
gs = gridspec.GridSpec(1,3,hspace=0.05, wspace=0.05)
elif num_ensemble == 4:
figsize = (img_shape[1]/img_shape[0]*3*4, 2*4)
gs = gridspec.GridSpec(2,3,hspace=0.05, wspace=0.05)
fig = plt.figure(figsize=figsize, dpi=210)
ds_factor = int(round((img_gen.shape[0]/img_ds.shape[0])))
oob_mask_ds = np.zeros(img_ds.shape, dtype=bool)
oob_mask_gen = np.zeros(img_gen[:,:,0].shape, dtype=bool)
for i_ds in range(oob_mask_ds.shape[0]):
for j_ds in range(oob_mask_ds.shape[1]):
i0 = i_ds*ds_factor
j0 = j_ds*ds_factor
i1 = i0+ds_factor
j1 = j0+ds_factor
oob_mask_ds[i_ds,j_ds] = oob_mask[i0:i1,j0:j1].any()
oob_mask_gen[i0:i1,j0:j1] = oob_mask_ds[i_ds,j_ds]
cmap_mask = colors.ListedColormap([
[0.0,0.0,0.0,0.0],
[0.75,0.75,0.75,1.0]
])
import shapefile
border = shapefile.Reader("../data/Border_CH.shp")
shapes = list(border.shapeRecords())
def draw_border():
for shape in shapes:
x = [i[0]/1000. for i in shape.shape.points[:]]
y = [i[1]/1000. for i in shape.shape.points[:]]
plt.plot(x,y,'k',linewidth=1.0)
extent_real = [254.5,965.5,-159.5,480.5]
extent_gen = [254.5,959.5,-159.5,480.5]
plt.subplot(gs[0,0])
plot_img(img_real, extent=extent_real)
plt.imshow(oob_mask.astype(int), cmap=cmap_mask, extent=extent_real)
draw_border()
plt.gca().set_xlim((extent_real[0],extent_real[1]))
plt.gca().set_ylim((extent_real[2],extent_real[3]))
plt.title("Real", fontsize=14)
if num_ensemble == 1:
gs_ds = gs[0,1]
elif num_ensemble == 4:
gs_ds = gs[1,0]
plt.subplot(gs_ds)
plot_img(img_ds, extent=extent_gen)
plt.imshow(oob_mask_ds.astype(int), cmap=cmap_mask, extent=extent_gen)
draw_border()
plt.gca().set_xlim((extent_real[0],extent_real[1]))
plt.gca().set_ylim((extent_real[2],extent_real[3]))
if num_ensemble == 1:
plt.title(time.strftime("%Y-%m-%d %H:%M UTC")+"\n\nDownsampled", fontsize=14)
elif num_ensemble == 4:
plt.xlabel("Downsampled", fontsize=14)
if num_ensemble == 1:
gs_list = [gs[0,2]]
elif num_ensemble == 4:
gs_list = [gs[0,1], gs[0,2], gs[1,1], gs[1,2]]
for (k,g) in enumerate(gs_list):
plt.subplot(g)
plot_img(img_gen[:,:,k], extent=extent_gen)
plt.imshow(oob_mask_gen.astype(int), cmap=cmap_mask, extent=extent_gen)
draw_border()
plt.gca().set_xlim((extent_real[0],extent_real[1]))
plt.gca().set_ylim((extent_real[2],extent_real[3]))
if num_ensemble == 1:
plt.title("Reconstructed", fontsize=14)
elif num_ensemble == 4:
if k == 1:
plt.title("Generated #{}".format(k+1), fontsize=14)
elif k == 0:
plt.title(time.strftime("%Y-%m-%d %H:%M UTC") +
"\n\nGenerated#{}".format(k+1), fontsize=14)
else:
plt.xlabel("Generated #{}".format(k+1), fontsize=14)
def plot_video_frames_all(images_fn, gen_fn, out_dir,
format="png", application="mchrzc", time_range=None,
scaling_fn=path+"/../data/scale_rzc.npy", num_ensemble=4):
if application == "mchrzc":
dec = data.RainRateDecoder(scaling_fn, below_val=np.log10(0.025))
else:
raise ValueError("Unknown application.")
if not os.path.exists(out_dir):
os.mkdir(out_dir)
smoothener = data.Smoothener()
# To get a proper comparison of real and generated fields we must
# apply the same kind of preprocessing as we do when training the GAN
def decode_real(x):
oob_mask = (x==0)
nan_mask = (x==1)
x = dec(x)
x[nan_mask | oob_mask] = dec.below_val
x = x.reshape((1,1)+img_real.shape+(1,))
x = smoothener.smoothen(x)
x = x[0,0,:,:,0]
x[x < dec.value_range[0]] = np.nan
return (x, oob_mask)
def decode(x):
oob_mask = (x==0)
nan_mask = (x==1)
x = dec(x)
x[nan_mask | oob_mask] = np.nan
return x
with netCDF4.Dataset(images_fn, 'r') as ds_images:
time_real = np.array(ds_images["time"][:], copy=False)
with netCDF4.Dataset(gen_fn, 'r') as ds_gen:
t0 = ds_gen["time"][0]
k0 = bisect_left(time_real,t0)
N = ds_gen["images"].shape[0]
for k in range(k0,k0+ds_gen["images"].shape[0]):
time = float(ds_images["time"][k])
time = datetime(1970,1,1)+timedelta(seconds=time)
if time_range is not None:
if not (time_range[0]<=time<time_range[1]):
continue
print(k)
img_real = np.array(ds_images["images"][k,:,:,0], copy=False)
(img_real,oob_mask) = decode_real(img_real)
img_ds = decode(np.array(ds_gen["images_ds"][k-k0,:,:,0], copy=False))
img_gen = decode(np.array(ds_gen["images"][k-k0,:,:,0,:], copy=False))
plot_video_frame(img_real, img_ds, img_gen,
oob_mask, time, num_ensemble=num_ensemble)
out_fn = "{}/frame-{:05d}.{}".format(out_dir,k,format)
plt.savefig(out_fn, bbox_inches='tight')
plt.close()
def plot_rank_histogram(ax, ranks, N_ranks=101, **plot_params):
bc = np.linspace(0,1,N_ranks)
db = (bc[1]-bc[0])
bins = bc-db/2
bins = np.hstack((bins, bins[-1]+db))
(h,_) = np.histogram(ranks,bins=bins)
h = h / h.sum()
ax.plot(bc,h,**plot_params)
def plot_rank_cdf(ax, ranks, N_ranks=101, **plot_params):
bc = np.linspace(0,1,N_ranks)
db = (bc[1]-bc[0])
bins = bc-db/2
bins = np.hstack((bins, bins[-1]+db))
(h,_) = np.histogram(ranks,bins=bins)
h = h.cumsum()
h = h / h[-1]
ax.plot(bc,h,**plot_params)
def plot_rank_histogram_all(rank_files, labels, N_ranks=101):
(fig,axes) = plt.subplots(2,1,sharex=True,figsize=(6,3))
plt.subplots_adjust(hspace=0.15)
linestyles = ["-","--"]
colors = ["C0", "C1"]
for ((fn_valid,fn_test),label,ls,c) in zip(rank_files,labels,linestyles,colors):
with np.load(fn_test, allow_pickle=True) as f:
ranks = f['arr_0'].item()['ranks']
plot_rank_histogram(axes[0], ranks, N_ranks=N_ranks,
label=label, linestyle=ls, linewidth=2, c=c, alpha=0.7, zorder=1)
with np.load(fn_valid) as f:
ranks = f['arr_0']
plot_rank_histogram(axes[0], ranks, N_ranks=N_ranks,
label=None, linestyle=ls, linewidth=0.75, c=c, zorder=2)
bc = np.linspace(0,1,N_ranks)
axes[0].plot(bc, [1./N_ranks]*len(bc), linestyle=':', label="Uniform", c='C2', zorder=0)
axes[0].set_ylabel("Norm. occurrence")
ylim = axes[0].get_ylim()
axes[0].set_ylim((0,ylim[1]))
axes[0].set_xlim((0,1))
axes[0].text(0.01, 0.97, "(a)",
horizontalalignment='left', verticalalignment='top',
transform=axes[0].transAxes)
for ((fn_valid,fn_test),label,ls,c) in zip(rank_files,labels,linestyles,colors):
with np.load(fn_test, allow_pickle=True) as f:
ranks = f['arr_0'].item()['ranks']
plot_rank_cdf(axes[1], ranks, N_ranks=N_ranks,
label=label, linestyle=ls, linewidth=2, c=c, alpha=0.7, zorder=1)
with np.load(fn_valid) as f:
ranks = f['arr_0']
plot_rank_cdf(axes[1], ranks, N_ranks=N_ranks,
label=None, linestyle=ls, linewidth=0.75, c=c, zorder=2)
axes[1].plot(bc,bc,linestyle=':', label="Uniform", c='C2', zorder=0)
axes[1].set_ylabel("CDF")
axes[1].set_xlabel("Normalized rank")
axes[1].set_ylim(0,1)
axes[1].set_xlim((0,1))
axes[1].text(0.01, 0.97, "(b)",
horizontalalignment='left', verticalalignment='top',
transform=axes[1].transAxes)
axes[1].legend(loc='lower right')
def plot_all(
mchrzc_data_fn,
goescod_data_fn,
figs_dir="../figures/",
mchrzc_gen_weights_fn="../models/gen_weights-mchrzc-0361600.h5",
goescod_gen_weights_fn="../models/gen_weights-goescod-0371200.h5",
mchrzc_quality_metrics_fn="../data/quality_metrics_by_time-mchrzc.txt",
goescod_quality_metrics_fn="../data/quality_metrics_by_time-goescod.txt",
mchrzc_rank_metrics_fn="../data/rank_metrics_by_time-mchrzc.txt",
goescod_rank_metrics_fn="../data/rank_metrics_by_time-goescod.txt",
mchrzc_rank_samples_valid_fn="../data/ranks-mchrzc-361600-valid.npz",
mchrzc_rank_samples_test_fn="../data/ranks-mchrzc-361600-test.npz",
goescod_rank_samples_valid_fn="../data/ranks-goescod-371200-valid.npz",
goescod_rank_samples_test_fn="../data/ranks-goescod-371200-test.npz"
):
plot_examples_mchrzc(
mchrzc_data_fn,
mchrzc_gen_weights_fn,
"{}/examples-mchrzc.pdf".format(figs_dir)
)
gc.collect()
plot_examples_mchrzc_random(
mchrzc_data_fn, mchrzc_gen_weights_fn, figs_dir
)
gc.collect()
plot_examples_goescod(
goescod_data_fn,
goescod_gen_weights_fn,
"{}/examples-goescod.pdf".format(figs_dir)
)
gc.collect()
plot_examples_goescod_random(
goescod_data_fn, goescod_gen_weights_fn, figs_dir
)
gc.collect()
plot_quality_metrics_by_samples_multiple(
[mchrzc_quality_metrics_fn, goescod_quality_metrics_fn],
[mchrzc_rank_metrics_fn, goescod_rank_metrics_fn]
)
plt.savefig("{}/quality-metrics-time.pdf".format(figs_dir),
bbox_inches='tight')
plt.close()
plot_rank_metrics_by_samples_multiple(
[mchrzc_rank_metrics_fn, goescod_rank_metrics_fn]
)
plt.savefig("{}/rank-metrics-time.pdf".format(figs_dir),
bbox_inches='tight')
plt.close()
plot_rank_histogram_all(
[
(mchrzc_rank_samples_valid_fn,mchrzc_rank_samples_test_fn),
(goescod_rank_samples_valid_fn,goescod_rank_samples_test_fn),
],
["MCH-RZC", "GOES-COT"]
)
plt.savefig("{}/rank-distribution.pdf".format(figs_dir),
bbox_inches='tight')
plt.close()
plots.plot_comparison("/data/nowgan/test-samples-2017-128x128.nc",
"../models/gen_weights-mchrzc-0361600.h5",
"../models/gen_det_weights-mse.h5", random_seed=16)
plt.savefig("../figures/comparison.pdf", bbox_inches='tight')
plt.close()
def resize_lanczos(img, size):
return np.array(Image.fromarray(img).resize(size, resample=Image.LANCZOS))
def plot_comparison(test_data_file, gen_gan_weights, gen_det_mse_weights,
application="mchrzc", random_seed=None):
(_, _, batch_gen) = train.setup_batch_gen(
test_data_file, test_data_file=test_data_file,
application=application, random_seed=random_seed,
batch_size=1
)
old_batch_size = batch_gen.batch_size
try:
batch_gen.batch_size = 1
(seq_real, cond) = next(batch_gen)
finally:
batch_gen.batch_size = old_batch_size
size = tuple(seq_real.shape[2:4])
seq_lanczos = np.array([resize_lanczos(x, size) for x in cond[0,...,0]])
(gen, _) = models.generator()
init_model = models.initial_state_model()
(gen_gan, noise_shapes) = models.generator_initialized(
gen, init_model)
gen_det = models.generator_deterministic(gen_gan)
noise = [np.random.randn(*((1,)+s)) for s in noise_shapes(size)]
gen_gan.load_weights(gen_gan_weights)
seq_gan = gen_gan.predict([cond]+noise)
gen_det.load_weights(gen_det_mse_weights)
seq_mse = gen_det.predict(cond)
seq_real = batch_gen.decoder.denormalize(seq_real)
cond = batch_gen.decoder.denormalize(cond)
seq_lanczos = batch_gen.decoder.denormalize(seq_lanczos)
seq_mse = batch_gen.decoder.denormalize(seq_mse)
seq_gan = batch_gen.decoder.denormalize(seq_gan)
import rainfarm
P = 10**cond
P[~np.isfinite(P)] = 0
alpha = rainfarm.get_alpha_seq(P[0,...,0])
print(alpha)
r = [rainfarm.rainfarm_downscale(p, alpha=alpha, threshold=0.1)
for p in P[0,...,0]]
log_r = np.log10(r)
log_r[~np.isfinite(log_r)] = np.nan
sequences = [
seq_real[0,...,0],
cond[0,...,0],
seq_lanczos,
seq_mse[0,...,0],
log_r,
seq_gan[0,...,0]
]
labels = [
"Real", "Downsampled", "Lanczos", "Det. RCNN", "RainFARM", "GAN"
]
num_cols = seq_real.shape[1]
num_rows = len(sequences)
plt.figure(figsize=(1.5*num_cols,1.5*num_rows))
gs = gridspec.GridSpec(num_rows,num_cols,wspace=0.05,hspace=0.05)
for k in range(seq_real.shape[1]):
for i in range(num_rows):
plt.subplot(gs[i,k])
plot_img(sequences[i][k,:,:])
if k==0:
plt.ylabel(labels[i])
gc.collect()
``` |
{
"source": "jleinonen/gan-elements",
"score": 3
} |
#### File: gan-elements/cgan/train.py
```python
from data import MNISTBatchGenerator, NoiseGenerator
from gan import GAN
from models import cgan_disc, cgan_gen
import plots
def build_gan():
noise_dim = 64
gen = cgan_gen(noise_dim=noise_dim)
disc = cgan_disc()
gan = GAN(gen, disc)
batch_gen = MNISTBatchGenerator()
noise_gen = NoiseGenerator([(noise_dim,)])
return (gan, batch_gen, noise_gen)
def train_gan(gan, batch_gen, noise_gen, num_epochs=1, steps_per_epoch=1, plot_fn=None):
gan.fit_generator(batch_gen, noise_gen, num_epochs=num_epochs,
steps_per_epoch=steps_per_epoch)
plots.plot_samples(gan.gen, batch_gen, noise_gen, out_fn=plot_fn)
if __name__ == "__main__":
(gan, batch_gen, noise_gen) = build_gan()
for i in range(200):
train_gan(gan, batch_gen, noise_gen, steps_per_epoch=20,
plot_fn="../figures/cgan_samples_{:03d}.png".format(i))
``` |
{
"source": "jleinonen/geogan",
"score": 2
} |
#### File: geogan/geogan/goesdata.py
```python
import json
import netCDF4
import numpy as np
class BatchGenerator(object):
def __init__(self, data_file, errors_file=None, batch_size=32,
tile_shape=(128,128), random_seed=None):
self.data_file = data_file
self.errors_file = errors_file
self.batch_size = batch_size
self.tile_shape = tile_shape
self.img_shape = tile_shape # for compatibility
self.ds = netCDF4.Dataset(data_file, 'r')
self.N = self.ds["image"].shape[0]
self.n_channels = self.ds["image"].shape[-1]
self.image_shape = self.ds["image"].shape[1:3]
self.timestamps = [str(ts) for ts in
netCDF4.chartostring(self.ds["timestamp"][:])]
with open(errors_file) as f:
self.errors = json.load(f)
self.prng = np.random.RandomState(seed=random_seed)
def __del__(self):
if "ds" in self.__dict__:
self.ds.close()
def __iter__(self):
return self
def __next__(self):
batch_shape = (self.batch_size,) + self.tile_shape + \
(self.n_channels,)
batch = np.zeros(batch_shape, dtype=np.float32)
for k in range(self.batch_size):
tile = self.sample_tile()
tile = tile.astype(np.float32)
batch[k,...] = tile
batch /= 127.5
batch -= 1
return batch
def sample_tile(self):
tile_errors = True
while tile_errors:
k = self.prng.randint(self.N)
i0 = self.prng.randint(self.image_shape[0]-self.tile_shape[0])
i1 = i0+self.tile_shape[0]
j0 = self.prng.randint(self.image_shape[1]-self.tile_shape[1])
j1 = j0+self.tile_shape[1]
timestamp = self.timestamps[k]
if timestamp in self.errors:
rect = (i0,i1,j0,j1)
tile_errors = any(rects_overlap(rect, r) for r
in self.errors[timestamp])
else:
tile_errors = False
tile = np.array(self.ds["image"][k,i0:i1,j0:j1,:], copy=False)
return tile
def rects_overlap(rect1, rect2):
(l1, r1, b1, t1) = rect1
(l2, r2, b2, t2) = rect2
return (l1 < r2) and (r1 >= l2) and (t1 >= b2) and (b1 < t2)
```
#### File: geogan/geogan/train.py
```python
import gc
import netCDF4
import numpy as np
import gan
import goesdata
import mchdata
import models
import noise
import plots
def setup_gan(data_file=None, application="mch",
num_epochs=1, steps_per_epoch=None, training_ratio=1,
batch_size=32, sample_random=False, scaling_fn="../data/scale_rzc.npy",
error_fn="../data/goes_errors.json", n_samples=None, random_seed=None,
lr_disc=0.0001, lr_gen=0.0001):
if data_file is not None:
if application == "mch":
with netCDF4.Dataset(data_file, 'r') as ds:
if n_samples is None:
seq = np.array(ds["sequences"][:], copy=False)
else:
if sample_random:
prng = np.random.RandomState(seed=random_seed)
ind = prng.choice(ds["sequences"].shape[0], n_samples,
replace=False)
seq = np.array(ds["sequences"][ind,...], copy=False)
else:
seq = np.array(ds["sequences"][n_samples[0]:n_samples[1]],
copy=False)
dec = mchdata.RainRateDecoder(scaling_fn, below_val=np.log10(0.025))
batch_gen = mchdata.BatchGenerator(seq, dec, batch_size=batch_size,
random_seed=random_seed)
num_channels = 1
elif application == "goes":
batch_gen = goesdata.BatchGenerator(data_file, errors_file=error_fn,
batch_size=batch_size, random_seed=random_seed)
num_channels = 3
else:
raise ValueError("Unknown application.")
if steps_per_epoch is None:
steps_per_epoch = batch_gen.N//batch_gen.batch_size
(gen_styled, gen, styling, noise_shapes) = models.generator_styled(
num_channels=num_channels)
disc = models.discriminator(num_channels=num_channels)
wgan = gan.WGANGP(gen_styled, disc, lr_disc=lr_disc, lr_gen=lr_gen,
num_channels=num_channels)
gc.collect()
return (wgan, batch_gen, noise_shapes, steps_per_epoch)
def train_gan(wgan, batch_gen, noise_shapes, steps_per_epoch, num_epochs,
application="mch"):
img_shape = batch_gen.img_shape
noise_gen = noise.NoiseGenerator(noise_shapes(img_shape),
batch_size=batch_gen.batch_size)
for epoch in range(num_epochs):
print("Epoch {}/{}".format(epoch+1,num_epochs))
wgan.train(batch_gen, noise_gen, steps_per_epoch, training_ratio=5)
plots.plot_samples(wgan.gen, batch_gen, noise_gen,
application=application,
out_fn="../figures/progress_{}.pdf".format(application))
return wgan
```
#### File: geogan/geogan/utils.py
```python
import h5py
from keras import backend as K
def save_opt_weights(model, filepath):
with h5py.File(filepath, 'w') as f:
# Save optimizer weights.
symbolic_weights = getattr(model.optimizer, 'weights')
if symbolic_weights:
optimizer_weights_group = f.create_group('optimizer_weights')
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
# Default values of symbolic_weights is /variable for theano
if K.backend() == 'theano':
if hasattr(w, 'name') and w.name != "/variable":
name = str(w.name)
else:
name = 'param_' + str(i)
else:
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
optimizer_weights_group.attrs['weight_names'] = weight_names
for name, val in zip(weight_names, weight_values):
param_dset = optimizer_weights_group.create_dataset(
name,
val.shape,
dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def load_opt_weights(model, filepath):
with h5py.File(filepath, mode='r') as f:
optimizer_weights_group = f['optimizer_weights']
optimizer_weight_names = [n.decode('utf8') for n in
optimizer_weights_group.attrs['weight_names']]
optimizer_weight_values = [optimizer_weights_group[n] for n in
optimizer_weight_names]
model.optimizer.set_weights(optimizer_weight_values)
def ensure_list(x):
if type(x) != list:
x = [x]
return x
def input_shapes(model, prefix):
shapes = [il.shape[1:] for il in
model.inputs if il.name.startswith(prefix)]
shapes = [tuple([d.value for d in dims]) for dims in shapes]
return shapes
``` |
{
"source": "jleinonen/weather4cast-bigdata",
"score": 2
} |
#### File: weather4cast-bigdata/weather4cast/main.py
```python
import argparse
from functools import partial
import datasets
import ensemble
import models
from models import rnn3_model, rnn4_model, rnn5_model, crr_combo_model
def get_ensemble_weights(weights="ridge"):
assert (weights in ["equal", "ridge", "ridge_lagrange"])
if weights == "equal":
w = {
"temperature": [1/5]*5,
"crr_intensity": [1/2]*2,
"asii_turb_trop_prob": [1/3]*3,
"cma": [1/2]*2,
}
elif weights == "ridge":
w = {
"temperature": [0.1455, 0.2666, 0.0904, 0.2487, 0.2457],
"crr_intensity": [0.5206, 0.5320],
"asii_turb_trop_prob": [0.2722, 0.2941, 0.4344],
"cma": [0.5165, 0.4864],
}
elif weights == "ridge_lagrange":
w = {
"temperature": [0.1455, 0.2666, 0.0904, 0.2487, 0.2457],
"crr_intensity": [0.5122, 0.4878],
"asii_turb_trop_prob": [0.2722, 0.2941, 0.4344],
"cma": [0.5165, 0.4864],
}
return w
def get_model(model_type):
model_func = {
"resgru_deep": models.rnn3_model,
"resgru_shallow": models.rnn4_model,
"convgru_deep": models.rnn5_model,
"convgru_old": models.rnn2_model,
"crr_combo_model_old": models.crr_combo_model,
"crr_combo_model_new": partial(crr_combo_model, model_func=rnn5_model)
}[model_type]
return model_func
def regions_for_dir(comp_dir):
if "core" in comp_dir:
regions = ["R1", "R2", "R3", "R7", "R8"]
else:
regions = ["R4", "R5", "R6", "R9", "R10", "R11"]
def build_model_list(w):
modif_crr_model = partial(crr_combo_model, model_func=rnn5_model)
var_models = [
("CTTH", "temperature", [
("../models/srnn_adabelief_1-temperature.h5", rnn4_model, w["temperature"][0]),
("../models/srnn_adabelief_2-temperature.h5", rnn4_model, w["temperature"][1]),
("../models/srnn_adabelief_3-temperature.h5", rnn4_model, w["temperature"][2]),
("../models/srnn_adabelief_4-temperature.h5", rnn4_model, w["temperature"][3]),
("../models/srnn_adabelief_5-temperature.h5", rnn4_model, w["temperature"][4]),
]),
("CRR", "crr_intensity", [
("../models/srnn_adabelief_3-crr_intensity.h5", crr_combo_model, w["crr_intensity"][0]),
("../models/srnn_adabelief_4-crr_intensity.h5", modif_crr_model, w["crr_intensity"][1]),
]),
("ASII", "asii_turb_trop_prob", [
("../models/srnn_adabelief_1-asii_turb_trop_prob.h5", rnn4_model, w["asii_turb_trop_prob"][0]),
("../models/srnn_adabelief_2-asii_turb_trop_prob.h5", rnn3_model, w["asii_turb_trop_prob"][1]),
("../models/srnn_adabelief_3-asii_turb_trop_prob.h5", rnn3_model, w["asii_turb_trop_prob"][2]),
]),
("CMA", "cma", [
("../models/srnn_adabelief_1-cma.h5", rnn4_model, w["cma"][0]),
("../models/srnn_adabelief_2-cma.h5", rnn3_model, w["cma"][1]),
]),
]
return var_models
def generate_predictions(
submission_dir,
comp_dir="w4c-core-stage-1",
regions=None,
weights="ridge"
):
if regions is None:
regions = regions_for_dir(comp_dir)
batch_gen_valid = datasets.BatchGenerator(
comp_dir=comp_dir,
regions=regions,
data_subset="test",
augment=False,
shuffle=False
)
w = get_ensemble_weights(weights=weights)
var_models = build_model_list(w)
comb_model = models.ensemble_model_with_weights(
batch_gen_valid, var_models=var_models, logit=(weights!="equal"))
datasets.generate_submission(
comb_model,
submission_dir,
regions=regions,
comp_dir=comp_dir
)
def evaluate(
comp_dir="w4c-core-stage-1",
regions=None,
dataset="CTTH",
variable="temperature",
batch_size=32,
model_type="resgru",
weight_fn=None
):
if regions is None:
regions = regions_for_dir(comp_dir)
batch_gen_valid = datasets.BatchGenerator(
comp_dir=comp_dir,
regions=regions,
data_subset="validation",
augment=False,
shuffle=False
)
datasets.setup_univariate_batch_gen(batch_gen_valid,
dataset, variable, batch_size=batch_size)
model_func = get_model(model_type)
model = models.init_model(batch_gen_valid, model_func=model_func)
if weight_fn is not None:
model.load_weights(weight_fn)
eval_results = model.evaluate(batch_gen_valid)
print(eval_results)
def evaluate_ensemble(
comp_dir="w4c-core-stage-1",
regions=None,
dataset="CTTH",
variable="temperature",
batch_size=32,
model_type="resgru",
weight_fn=None,
ensemble_weights="ridge"
):
if regions is None:
regions = regions_for_dir(comp_dir)
batch_gen_valid = datasets.BatchGenerator(
comp_dir=comp_dir,
regions=regions,
data_subset="validation",
augment=False,
shuffle=False
)
datasets.setup_univariate_batch_gen(batch_gen_valid,
dataset, variable, batch_size=batch_size)
w = get_ensemble_weights(weights=ensemble_weights)
var_models = build_model_list(w)
var_list = [v[1] for v in var_models]
ind = var_list.index(variable)
model_list = var_models[ind][2]
var_model_list = []
var_ensemble_weights = []
for (model_weights, model_func, ensemble_weight) in model_list:
model = models.init_model(batch_gen_valid, model_func=model_func,
compile=False, init_strategy=False)
model.load_weights(model_weights)
var_model_list.append(model)
var_ensemble_weights.append(ensemble_weight)
logit = (ensemble_weights != "equal")
weighted_model = ensemble.weighted_model(
var_model_list, var_ensemble_weights, variable,
logit=(logit and (variable=="asii_turb_trop_prob"))
)
eval_results = weighted_model.evaluate(batch_gen_valid)
print(eval_results)
def train(
comp_dir="w4c-core-stage-1",
regions=None,
dataset="CTTH",
variable="temperature",
batch_size=32,
model_type="resgru_shallow",
weight_fn=None
):
if regions is None:
regions = regions_for_dir(comp_dir)
batch_gen_train = datasets.BatchGenerator(
comp_dir=comp_dir,
regions=regions,
data_subset="training"
)
batch_gen_valid = datasets.BatchGenerator(
comp_dir=comp_dir,
regions=regions,
data_subset="validation",
augment=False,
shuffle=False
)
datasets.setup_univariate_batch_gen(batch_gen_train,
dataset, variable, batch_size=batch_size)
datasets.setup_univariate_batch_gen(batch_gen_valid,
dataset, variable, batch_size=batch_size)
model_func = get_model(model_type)
model = models.init_model(batch_gen_valid, model_func=model_func)
models.train_model(model, batch_gen_train, batch_gen_valid,
weight_fn=weight_fn)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str,
help="submit / evaluate / train")
parser.add_argument('--comp_dir', type=str,
help="Directory where the data are located")
parser.add_argument('--regions', type=str,
help="Comma-separated list or regions, default all regions for comp_dir")
parser.add_argument('--submission_dir', type=str, default="",
help="Directory to save the results in, will be created if needed")
parser.add_argument('--batch_size', type=int, default=32,
help="Batch size for training / evaluation")
parser.add_argument('--dataset', type=str, default="",
help="Dataset for training / evaluation")
parser.add_argument('--variable', type=str, default="",
help="Variable for training / evaluation")
parser.add_argument('--weights', type=str, default="",
help="Model weight file for training / evaluation")
parser.add_argument('--model', type=str, default="resgru",
help="Model type for training / evaluation, either 'convgru' or 'resgru'")
parser.add_argument('--ensemble_weights', type=str, default="ridge",
help="Ensemble weights, either 'ridge', 'equal' or 'ridge_lagrange'")
args = parser.parse_args()
mode = args.mode
regions = args.regions
if not regions:
regions = None
else:
regions = regions.split(",")
comp_dir = args.comp_dir
if mode == "submit":
submission_dir = args.submission_dir
assert(submission_dir != "")
generate_predictions(submission_dir,
comp_dir=comp_dir, regions=regions)
elif mode in ["evaluate", "evaluate_ensemble", "train"]:
batch_size = args.batch_size
dataset = args.dataset
variable = args.variable
weight_fn = args.weights
model_type = args.model
ensemble_weights = args.ensemble_weights
assert(dataset in ["CTTH", "CRR", "ASII", "CMA"])
assert(variable in ["temperature", "crr_intensity",
"asii_turb_trop_prob", "cma"])
if mode == "evaluate":
evaluate(comp_dir=comp_dir, regions=regions, dataset=dataset,
variable=variable, batch_size=batch_size, weight_fn=weight_fn,
model_type=model_type)
elif mode == "evaluate_ensemble":
evaluate_ensemble(comp_dir=comp_dir, regions=regions,
dataset=dataset, variable=variable, batch_size=batch_size,
weight_fn=weight_fn, model_type=model_type,
ensemble_weights=ensemble_weights)
else:
train(comp_dir=comp_dir, regions=regions, dataset=dataset,
variable=variable, batch_size=batch_size, weight_fn=weight_fn,
model_type=model_type)
``` |
{
"source": "jlejeune/putio-cli",
"score": 3
} |
#### File: commands/config/show.py
```python
import sys
from putio_cli.commands.config import Config
class Show(Config):
"""
show command to print configuration file
Usage:
putio-cli config show
"""
def run(self):
try:
cfgfile = open(self.cfgfilename, 'r')
except IOError:
sys.exit(
'Config file does not exist, please use template subcommand first')
print cfgfile.read()
cfgfile.close()
```
#### File: jlejeune/putio-cli/setup.py
```python
from os.path import abspath, dirname, join
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from putio_cli import __version__
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md')) as file:
long_description = file.read()
class Tox(TestCommand):
"""Run all tests."""
description = 'run tests'
user_options = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
import sys
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setup(
name='putio-cli',
version=__version__,
description='A command line program in Python to talk to Put.io Rest API',
long_description=long_description,
url='https://github.com/jlejeune/putio-cli',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
keywords='putio-cli',
packages=find_packages(exclude=['docs', 'tests*']),
install_requires=['docopt', 'putio.py'],
extras_require={
'test': ['tox', 'nose', 'coverage'],
},
tests_require=['tox'],
scripts=['putio-cli'],
cmdclass={'test': Tox},
)
``` |
{
"source": "jlema/Udacity-Self-Driving-Car-Engineer-Nanodegree",
"score": 4
} |
#### File: Term 1- Computer Vision and Deep Learning/Project 3 - Behavioral Cloning/image_preproc.py
```python
import cv2
import numpy as np
# Some of these image preprocessing functions are based on <NAME>'s augmentation code
# https://chatbotslife.com/using-augmentation-to-mimic-human-driving-496b569760a9#.nen5tsjgw
# Loads an image and changes the color space to RGB
def load_image(image_name):
image_name = image_name.strip()
#changing to RGB was crucial step in the image processing
#as the simulator feeds RGB images, not BGR images
image = cv2.cvtColor(cv2.imread(image_name), cv2.COLOR_BGR2RGB)
return image
# Halves the image size
def reduce_image(image):
r_image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_AREA)
return r_image
# Randomly flips image horizontally and returns the mirrored steering angle
# this is done to reduce bias for a particular turning direction
def flip_image(image, angle):
if np.random.randint(2) == 0:
return cv2.flip(image, 1), -angle
else:
return image, angle
# Randomly translates image vertically and horizontally
# This is done to improve recovery and to simulate driving uphill/downhill
def trans_image(image, steer, t_range=100):
rows,cols,ch = image.shape
# Horizontal translation
tr_x = t_range * np.random.uniform() - t_range / 2
# New steering angle
n_steer = steer + tr_x / t_range * 2 * 0.2
# Vertical translation
tr_y = 40 * np.random.uniform() - 40 / 2
# Translation matrix to be used for affine transformation
Trans_M = np.float32([[1, 0, tr_x],[0, 1, tr_y]])
t_image = cv2.warpAffine(image, Trans_M, (cols,rows))
return t_image, n_steer
# Crop top 68 pixels and bottom 20 pixels
# This is the equivalent of removing the sky and the car hood
def crop_image(image):
shape = image.shape
crop_image = image[68:shape[0]-20, 0:shape[1]]
return crop_image
# Change image color space to HSV
# Randomly scale V channel to increase/reduce brightness
# Return image color space to RGB
# This helps with shadows and driving with other different light conditions
def scale_brightness_image(image):
temp = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
b_scale = 0.25 + np.random.uniform() # range [0.25, 1.25)
# We use Numpy indexing instead of cv2.split and cv2.merge
# as those operations are more costly
# [:, :, 2] is the V channel
temp[:, :, 2] = temp[:, :, 2] * b_scale
scaled_image = cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)
return scaled_image
```
#### File: Term 1- Computer Vision and Deep Learning/Project 3 - Behavioral Cloning/model.py
```python
import argparse
import json
import pandas as pd
import numpy as np
from image_preproc import load_image, reduce_image, flip_image, trans_image, crop_image, scale_brightness_image
from keras.models import Sequential
from keras.layers import Dense, Flatten, Lambda, ELU
from keras.layers import Convolution2D
from keras.callbacks import TensorBoard
from sklearn.model_selection import train_test_split
ch, row, col = 3, 36, 160 # resized camera format
def get_model():
model = Sequential()
# Normalize data to -0.5 to 0.5 range
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=(row, col, ch),
output_shape=(row, col, ch)))
# The next layer is 3 1X1 filters, this has the effect of transforming the color space of the images.
# As we do not know the best color space beforehand, using 3 1X1 filters allows the model to choose its best color space
model.add(Convolution2D(3, 1, 1, border_mode="same"))
model.add(ELU())
# We follow the NVIDIA architecture and create 3 convolutional layers with 2x2 stride and 5x5 kernel
model.add(Convolution2D(3, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
# We follow the NVIDIA architecture and create 2 convolutional layers with no stride and 3x3 kernel
model.add(Convolution2D(48, 3, 3, border_mode="same"))
model.add(ELU())
model.add(Convolution2D(64, 3, 3, border_mode="same"))
model.add(Flatten())
model.add(ELU())
# We follow the NVIDIA architecture and create 3 fully connected layers
model.add(Dense(100))
model.add(ELU())
model.add(Dense(50))
model.add(ELU())
model.add(Dense(10))
model.add(ELU())
model.add(Dense(1))
# We optimize using Adam optimizer for Mean Square Error
model.compile(optimizer="adam", loss="mse")
return model
# training generator
def gen(image_names, steering, batch_size, augmentate=True):
while True:
# get a random sample of images of size batch size without replacement
batch_mask = np.random.choice(image_names.index, size=batch_size, replace=False)
x = []
y = []
image_path = ''
for i in range(batch_size):
index = batch_mask[i]
# load original steering angle
steer = steering[index]
# randomly remove lower steering angles (< 0.1)
if abs(steer) < 0.1:
if np.random.randint(2) == 0:
continue
# if we are augmentating (i.e. generating training data)
if (augmentate):
# randomly choose left, center or right images
# and apply a small shift to the steering angle to compensate
rand = np.random.randint(3)
if (rand == 0):
image_path = data['left'][index]
comp = .25
if (rand == 1):
image_path = data['center'][index]
comp = 0.
if (rand == 2):
image_path = data['right'][index]
comp = -.25
steer = steer + comp
image = load_image(image_path)
# cut off unnecessary top and bottom parts of image
image = crop_image(image)
# translate images horizontally and vertically
image, steer = trans_image(image, steer)
# increase/decrease brightness
image = scale_brightness_image(image)
# reduce size of image
image = reduce_image(image)
# flip images and steering angles
image, steer = flip_image(image, steer)
# if we are NOT augmentating (i.e. generating validation data)
else:
# load original image
image_path = data['center'][index]
image = load_image(image_path)
# cut off unnecessary top and bottom parts of image
image = crop_image(image)
# reduce size of image
image = reduce_image(image)
x.append(image)
y.append(steer)
x = np.array(x)
y = np.array(y)
yield x, y
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='Steering angle model trainer')
parser.add_argument('--batch', type=int, default=287, help='Batch size.')
parser.add_argument('--epoch', type=int, default=5, help='Number of epochs.')
parser.add_argument('--epochsize', type=int, default=19803, help='How many frames per epoch.')
args = parser.parse_args()
# import driving log
data = pd.read_csv('driving_log.csv')
# split data into training and validation
X_train, X_val, y_train, y_val = train_test_split(data['center'], data['steering'])
model = get_model()
model.summary()
model.fit_generator(
gen(X_train, y_train, args.batch),
samples_per_epoch=args.epochsize,
nb_epoch=args.epoch,
validation_data=gen(X_val, y_val, args.batch, False), # do not augmentate validation samples
nb_val_samples=len(X_val),
callbacks = [TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
)
print("Saving model weights and configuration file.")
model.save_weights("model.h5", True)
with open('model.json', 'w') as outfile:
json.dump(model.to_json(), outfile)
```
#### File: Term 1- Computer Vision and Deep Learning/Project 5 - Vehicle Detection and Tracking/window.py
```python
import matplotlib.pyplot as plt
import numpy as np
import cv2
from features import single_img_features, get_hog_features, bin_spatial, color_hist
# Functions reproduced from Search and Classify lesson
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_windows = np.int(xspan/nx_pix_per_step) - 1
ny_windows = np.int(yspan/ny_pix_per_step) - 1
# Initialize a list to append window positions to
window_list = []
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs*nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys*ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
window_list.append(((startx, starty), (endx, endy)))
# Return the list of windows
return window_list
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
# Define a function for plotting multiple images
# Not from lesson, taken from Q&A session
def visualize(fig, rows, cols, imgs, titles):
for i, img in enumerate(imgs):
plt.subplot(rows, cols, i+1)
plt.title(i+1)
img_dims = len(img.shape)
if img_dims < 3:
plt.imshow(img, cmap='hot')
plt.title(titles[i])
else:
plt.imshow(img)
plt.title(titles[i])
# Color space changing function
def convert_color(img, conv='RGB2YCrCb'):
if conv == 'RGB2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
if conv == 'BGR2YCrCb':
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
if conv == 'RGB2LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
if conv == 'RGB2HSV':
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# We scale image and apply HOG to the entire image - which effectively is the same as sampling with different window sizes
def find_cars(img, scale, hog_channel, ystart, ystop, orient, pix_per_cell, cell_per_block,
spatial_size, hist_bins, clf, scl, prob_threshold, cps):
draw_img = np.copy(img)
# Make a heatmap of zeros
heatmap = np.zeros_like(img[:,:,0])
img = img.astype(np.float32)/255
img_tosearch = img[ystart:ystop,:,:] # cropped version of the image to be searched
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb') # color transformed image
#ctrans_tosearch = convert_color(img_tosearch, conv='RGB2HSV') # color transformed image
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
# Split 3 color channels
if hog_channel == 'ALL':
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
elif hog_channel == 0:
ch1 = ctrans_tosearch[:,:,0]
elif hog_channel == 1:
ch2 = ctrans_tosearch[:,:,1]
elif hog_channel == 2:
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - 1 # Number of HOG cells on X axis
nyblocks = (ch1.shape[0] // pix_per_cell) - 1 # Number of HOG cells on Y axis
# nfeat_per_block = orient*cell_per_block**2 # Features per block = orientation * cells per block ^ 2
window = 64 # Size of the original windows that we're extracting feature vectors from (64 x 64)
nblocks_per_window = (window // pix_per_cell) - 1 # Number of HOG cells per window
cells_per_step = cps
# Instead of defining overlap, define how many cells to step
# How many cells do I want to step in the HOG array
# e.g. window size 64, pix_per_cell 8
# then nblocks_per_window = 7
# if we step 2 cells per step, we'd have 6 cells overlapping
# 6 / 8 = 0.75 or 75% overlap
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step # How many steps in the x direction we're going to do
nysteps = (nyblocks - nblocks_per_window) // cells_per_step # How many steps in the y direction we're going to do
# Compute individual channel HOG features for the entire image
if hog_channel == 'ALL':
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
elif hog_channel == 0:
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
elif hog_channel == 1:
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
elif hog_channel == 2:
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
# First scan vertically then horizontally
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch and unroll each of the feature vectors
if hog_channel == 'ALL':
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif hog_channel == 0:
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif hog_channel == 1:
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif hog_channel == 2:
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
# Stack up HOG feature vectors into one vector
if hog_channel == 'ALL':
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
elif hog_channel == 0:
hog_features = hog_feat1
elif hog_channel == 1:
hog_features = hog_feat2
elif hog_channel == 2:
hog_features = hog_feat3
# Define where we are in pixel space for the particular patch
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
# Extract the image patch
# Rescaling to original image size (64, 64)
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64, 64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = scl.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
test_prediction = clf.predict(test_features)
# If we have predicted a car (1) then draw a bounding box on the original image and add heat to the heatmap
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
if test_prediction == 1:
conf_score = clf.predict_proba(test_features)
# Only draw box and add to heatmap if they have a specific prob threshold or higher
if conf_score[0][1] >= prob_threshold: # cars class, first entry in array
cv2.rectangle(draw_img, (xbox_left, ytop_draw+ystart), (xbox_left+win_draw, ytop_draw+ystart+win_draw), (0, 0, 255), 6)
#img_boxes.append(((xbox_left, ytop_draw+ystart),(xbox_left+win_draw, ytop_draw+ystart+win_draw)))
heatmap[ytop_draw+ystart:ytop_draw+ystart+win_draw, xbox_left:xbox_left+win_draw] += 1
# If we have not predicted a car (0) then remove heat from the heatmap
# With a floor of zero to avoid negative values
# else:
# heatmap[ytop_draw+ystart:ytop_draw+ystart+win_draw, xbox_left:xbox_left+win_draw][np.nonzero(heatmap[ytop_draw+ystart:ytop_draw+ystart+win_draw, xbox_left:xbox_left+win_draw])] -= 1
return draw_img, heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
``` |
{
"source": "jlemon/zlogger",
"score": 3
} |
#### File: jlemon/zlogger/get_riders.py
```python
import sys, argparse, getpass
import requests
import json
import sqlite3
import os, time, stat
import mkresults
from collections import namedtuple
global args
global dbh
def post_credentials(session, username, password):
# Credentials POSTing and tokens retrieval
# POST https://secure.zwift.com/auth/realms/zwift/tokens/access/codes
try:
response = session.post(
url="https://secure.zwift.com/auth/realms/zwift/tokens/access/codes",
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "secure.zwift.com",
"User-Agent": "Zwift/1.5 (iPhone; iOS 9.0.2; Scale/2.00)",
"Accept-Language": "en-US;q=1",
},
data={
"client_id": "Zwift_Mobile_Link",
"username": username,
"password": password,
"grant_type": "password",
},
allow_redirects = False,
verify = args.verifyCert,
)
if args.verbose:
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
print('Response HTTP Response Body: {content}'.format(
content=response.content))
json_dict = json.loads(response.content)
return (json_dict["access_token"], json_dict["refresh_token"], json_dict["expires_in"])
except requests.exceptions.RequestException, e:
print('HTTP Request failed: %s' % e)
def query_player_profile(session, access_token, player_id):
# Query Player Profile
# GET https://us-or-rly101.zwift.com/api/profiles/<player_id>
try:
response = session.get(
url="https://us-or-rly101.zwift.com/api/profiles/%s" % player_id,
headers={
"Accept-Encoding": "gzip, deflate",
"Accept": "application/json",
"Connection": "keep-alive",
"Host": "us-or-rly101.zwift.com",
"User-Agent": "Zwift/115 CFNetwork/758.0.2 Darwin/15.0.0",
"Authorization": "Bearer %s" % access_token,
"Accept-Language": "en-us",
},
verify = args.verifyCert,
)
if args.verbose:
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
print('Response HTTP Response Body: {content}'.format(
content=response.content))
json_dict = json.loads(response.content)
return json_dict
except requests.exceptions.RequestException, e:
print('HTTP Request failed: %s' % e)
def logout(session, refresh_token):
# Logout
# POST https://secure.zwift.com/auth/realms/zwift/tokens/logout
try:
response = session.post(
url="https://secure.zwift.com/auth/realms/zwift/tokens/logout",
headers={
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "secure.zwift.com",
"User-Agent": "Zwift/1.5 (iPhone; iOS 9.0.2; Scale/2.00)",
"Accept-Language": "en-US;q=1",
},
data={
"client_id": "Zwift_Mobile_Link",
"refresh_token": refresh_token,
},
verify = args.verifyCert,
)
if args.verbose:
print('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
print('Response HTTP Response Body: {content}'.format(
content=response.content))
except requests.exceptions.RequestException, e:
print('HTTP Request failed: %s' % e)
def login(session, user, password):
access_token, refresh_token, expired_in = post_credentials(session, user, password)
return access_token, refresh_token
def updateRider(session, access_token, user):
# Query Player Profile
json_dict = query_player_profile(session, access_token, user)
if args.verbose:
print ("\n")
print (json_dict)
male = 1 if json_dict["male"] else 0
# Power Meter, Smart Trainer, zPower
if (json_dict["powerSourceModel"] == "zPower"):
power = 1
elif (json_dict["powerSourceModel"] == "Smart Trainer"):
power = 2
else:
power = 3
fname = json_dict["firstName"].strip()
lname = json_dict["lastName"].strip()
print ("id=%s wt=%s m=%s [%s] <%s %s>\n" %
(json_dict["id"], json_dict["weight"], json_dict["male"],
json_dict["powerSourceModel"], fname.encode('ascii', 'ignore'), lname.encode('ascii', 'ignore')))
c = dbh.cursor()
try:
c.execute("insert into rider " +
"(rider_id, fname, lname, age, weight, height, male, zpower," +
" fetched_at) " +
"values (?,?,?,?,?,?,?,?,date('now'))",
(json_dict["id"], fname, lname, json_dict["age"],
json_dict["weight"], json_dict["height"], male, power))
except sqlite3.IntegrityError:
c.execute("update rider " +
"set fname = ?, lname = ?, age = ?, weight = ?, height = ?," +
" male = ?, zpower = ?, fetched_at = date('now')" +
" where rider_id = ?",
(fname, lname, json_dict["age"],
json_dict["weight"], json_dict["height"], male, power,
json_dict["id"]))
def get_rider_list():
mkresults.dbh = sqlite3.connect('race_database.sql3')
conf = mkresults.config(args.config)
mkresults.conf = conf
mkresults.args = namedtuple('Args', 'no_cat debug')(no_cat=False, debug=args.verbose)
startTime = conf.start_ms / 1000
retrievalTime = startTime + conf.start_window_ms / 1000
sleepTime = retrievalTime - time.time()
while sleepTime > 0:
print "Sleeping %s seconds" % sleepTime
time.sleep(sleepTime)
sleepTime = retrievalTime - time.time()
conf.load_chalklines()
R, all_pos = mkresults.get_riders(conf.start_ms - conf.lookback_ms,
conf.finish_ms)
return [ r.id for r in R.values() if mkresults.filter_start(r) ]
def main(argv):
global args
global dbh
access_token = None
cookies = None
parser = argparse.ArgumentParser(description = 'Zwift Name Fetcher')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose output')
parser.add_argument('--dont-check-certificates', action='store_false',
dest='verifyCert', default=True)
parser.add_argument('-c', '--config', help='Use config file')
parser.add_argument('-u', '--user', help='Zwift user name')
parser.add_argument('idlist', metavar='rider_id', type=int, nargs='*',
help='rider ids to fetch')
args = parser.parse_args()
if args.user:
password = getpass.getpass("Password for %s? " % args.user)
else:
file = os.environ['HOME'] + '/.zwift_cred.json'
with open(file) as f:
try:
cred = json.load(f)
except ValueError, se:
sys.exit('"%s": %s' % (args.output, se))
f.close
args.user = cred['user']
password = cred['pass']
session = requests.session()
# test the credentials - token will expire, so we'll log in again after sleeping
access_token, refresh_token = login(session, args.user, password)
logout(session, refresh_token)
if args.config:
L = get_rider_list()
elif args.idlist:
L = args.idlist
else:
L = [ int(line) for line in sys.stdin ]
if args.verbose:
print 'Selected %d riders' % len(L)
access_token, refresh_token = login(session, args.user, password)
dbh = sqlite3.connect('rider_names.sql3')
for id in L:
updateRider(session, access_token, id)
dbh.commit()
dbh.close()
logout(session, refresh_token)
if __name__ == '__main__':
try:
main(sys.argv)
except KeyboardInterrupt:
pass
except SystemExit, se:
print "ERROR:",se
``` |
{
"source": "JleMyP/wol",
"score": 2
} |
#### File: wol/logic/crud.py
```python
from typing import List, Optional
from flask import abort, make_response
from marshmallow import Schema, fields
from peewee import JOIN
from playhouse.flask_utils import get_object_or_404
from ..doc_utils import exclude_parent_attrs
from ..fields import HostField, MacField, PortField
from ..models import Credentials, Target
from .core import check_host, wakeup_host
__all__ = ['create_target', 'get_target_by_id', 'get_all_targets', 'delete_target_by_id',
'get_target_by_name',
'edit_target_by_id', 'wakeup_target_by_id', 'check_target_by_id',
'create_credentials', 'get_credentials_by_id', 'get_all_credentials',
'delete_credentials_by_id', 'edit_credentials_by_id',
'TargetSchema', 'CredentialsSchema']
# TODO: drop flask deps
class CredentialsSchema(Schema):
"""credentials (de)serialization"""
id = fields.Int(dump_only=True) # noqa: A003, VNE003
username = fields.Str()
password = fields.Str()
pkey = fields.Str()
class TargetSchema(Schema):
"""target (de)serialization"""
id = fields.Int(dump_only=True) # noqa: A003, VNE003
name = fields.Str()
host = HostField()
mac = MacField()
wol_port = PortField()
credentials = fields.Nested(CredentialsSchema())
def _delete_object(model, id_: int) -> None:
obj = get_object_or_404(model, model.id == id_)
obj.delete_instance()
def _edit_object_by_id(model, id_: int, **kwargs) -> None:
obj = get_object_or_404(model, model.id == id_)
edited_fields = []
for field_name, value in kwargs.items():
field = getattr(model, field_name)
edited_fields.append(field)
setattr(obj, field_name, value)
obj.save(only=edited_fields)
def create_target(
name: str,
host: Optional[str] = None,
mac: Optional[str] = None,
wol_port: Optional[int] = None,
credentials: Optional[int] = None,
) -> int:
target = Target.create(name=name, host=host, mac=mac, wol_port=wol_port,
credentials=credentials)
return target.id
def get_target_by_id(id_: int) -> dict:
query = Target.select(Target, Credentials).join(Credentials, JOIN.LEFT_OUTER)
target = get_object_or_404(query, Target.id == id_)
return TargetSchema().dump(target)
def get_target_by_name(name: str) -> dict:
query = Target.select(Target, Credentials).join(Credentials, JOIN.LEFT_OUTER)
target = get_object_or_404(query, Target.name == name)
return TargetSchema().dump(target)
def get_all_targets() -> List[dict]:
query = Target.select(Target, Credentials).join(Credentials, JOIN.LEFT_OUTER)
return TargetSchema(many=True).dump(query)
def delete_target_by_id(id_: int) -> None:
_delete_object(Target, id_)
def edit_target_by_id(id_: int, **kwargs) -> None:
_edit_object_by_id(Target, id_, **kwargs)
def wakeup_target_by_id(id_: int) -> None:
target = get_object_or_404(Target, Target.id == id_)
if not target.mac:
abort(make_response({'error': 'empty mac'}, 400))
wakeup_host(target.mac, port=target.wol_port)
def check_target_by_id(id_: int) -> bool:
target = get_object_or_404(Target, Target.id == id_)
if not target.host:
abort(make_response({'error': 'empty host'}, 400))
return check_host(target.host)
def create_credentials(
username: str,
password: Optional[str] = None,
pkey: Optional[str] = None,
) -> int:
credentials = Credentials.create(username=username, password=password, pkey=pkey)
return credentials.id
def get_credentials_by_id(id_: int) -> dict:
credentials = get_object_or_404(Credentials, Credentials.id == id_)
return CredentialsSchema().dump(credentials)
def get_all_credentials() -> List[dict]:
qs = Credentials.select()
return list(qs.dicts())
def delete_credentials_by_id(id_: int) -> None:
_delete_object(Credentials, id_)
def edit_credentials_by_id(id_: int, **kwargs) -> None:
_edit_object_by_id(Credentials, id_, **kwargs)
for schema in (CredentialsSchema, TargetSchema):
exclude_parent_attrs(schema)
```
#### File: wol/views/pages.py
```python
from flask import Blueprint, render_template
from ..logic.crud import get_all_targets
pages = Blueprint('web', __name__, template_folder='../templates')
@pages.route('/targets/', methods=['GET'])
def get_web_targets():
targets = get_all_targets()
return render_template('targets.html', targets=targets)
```
#### File: wol/wol/wsgi.py
```python
import logging
import sys
from configargparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from environs import Env
from flask import Flask, Response, jsonify
from marshmallow import ValidationError
from .views import core
try:
from . import models
except ImportError:
models = None
else:
from .views import crud, pages
def create_app(no_db: bool = False):
env = Env()
env.read_env()
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
app = Flask(__name__)
app.register_blueprint(core, url_prefix='/api')
with env.prefixed('WOL_'):
logger.setLevel(env.log_level('LOG_LEVEL', logging.DEBUG))
if not env('NO_DB', False) and not no_db and models: # TODO: shit
app.config['DATABASE'] = env.str('DATABASE_URL', 'postgres://postgres@localhost:5432/wol')
models.db.init_app(app)
app.register_blueprint(crud, url_prefix='/api')
app.register_blueprint(pages)
@app.errorhandler(ValidationError)
def handle_validation(error: ValidationError):
response = jsonify(error.messages)
response.status_code = 400
return response
@app.errorhandler(NotImplementedError)
def handle_not_implemented(_error: NotImplementedError):
return Response(status=501)
return app
# TODO: catch 404?
def dev_server():
parser = ArgumentParser(
auto_env_var_prefix='WOL_',
formatter_class=ArgumentDefaultsHelpFormatter,
)
parser.add_argument('--bind', '-b', default='127.0.0.1', help="ip address to listen")
parser.add_argument('--port', '-p', default=5000, help="port to listen")
parser.add_argument('--debug', '-d', action='store_true', default=False,
help="run in debug mode")
parser.add_argument('--no-db', action='store_true', default=False,
help="do not use database and disable CRUD api")
parser.add_argument('command', choices=('run', 'initdb'), nargs='?', default='run')
args = parser.parse_args()
app = create_app(no_db=args.no_db)
if args.command == 'run':
app.run(host=args.bind, port=args.port, debug=args.debug)
elif args.command == 'initdb':
if args.no_db:
print("incompatible command and \"--no-db\" argument")
sys.exit(1)
elif not models:
print("database deps is not installed (extra \"db\"")
sys.exit(1)
else:
models.init_db()
print("db initialized")
if __name__ == '__main__':
dev_server()
``` |
{
"source": "jlenain/nectarchain",
"score": 2
} |
#### File: nectarchain/dqm/camera_monitoring.py
```python
from dqm_summary_processor import *
import math
import sqlite3
import os
class CameraMonitoring(dqm_summary):
def __init__(self, gaink):
self.k = gaink
return None
def ConfigureForRun(self,path, Chan, Samp, Reader1):
#define number of channels and samples
self.Chan = Chan
self.Samp= Samp
self.camera = CameraGeometry.from_name("NectarCam", 3)
self.cmap = 'gnuplot2'
self.subarray = Reader1.subarray
self.event_id = []
self.event_times = []
for i, evt1 in enumerate(Reader1):
self.run_start1= evt1.nectarcam.tel[0].svc.date
SqlFileDate = (astropytime.Time(self.run_start1, format='unix').iso).split(" ")[0]
SqlFilePath = ""
for i in range(len(path.split("/"))-1):
SqlFilePath = SqlFilePath + path.split("/")[i] + "/"
SqlFileName = SqlFilePath + "nectarcam_monitoring_db_" + SqlFileDate + ".sqlite"
print("SqlFileName", SqlFileName)
con = sqlite3.connect(SqlFileName)
cursor = con.cursor()
#print(cursor.fetchall())
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
TempData=cursor.execute('''SELECT * FROM monitoring_drawer_temperatures''')
#print(TempData.description)
self.DrawerTemp = cursor.fetchall()
cursor.close()
def ProcessEvent(self, evt):
trigger_time = evt.trigger.time.value
trigger_id = evt.index.event_id
self.event_times.append(trigger_time)
self.event_id.append(trigger_id)
def FinishRun(self):
self.event_id = np.array(self.event_id)
self.event_times = np.array(self.event_times)
self.run_start = self.event_times[self.event_id == np.min(self.event_id)] - 100
self.run_end = np.max(self.event_times) + 100
self.DrawerTemp = np.array(self.DrawerTemp)
self.DrawerTimes = np.array(self.DrawerTemp[:,3])
for i in range(len(self.DrawerTimes)):
self.DrawerTimes[i] = astropytime.Time(self.DrawerTimes[i], format = 'iso').unix
self.DrawerTemp11 = self.DrawerTemp[:,4][self.DrawerTimes > self.run_start]
self.DrawerTemp21 = self.DrawerTemp[:,5][self.DrawerTimes > self.run_start]
self.DrawerNum1 = self.DrawerTemp[:,2][self.DrawerTimes > self.run_start]
self.DrawerTimes_new = self.DrawerTimes[self.DrawerTimes > self.run_start]
self.DrawerTemp12 = self.DrawerTemp11[self.DrawerTimes_new < self.run_end]
self.DrawerTemp22 = self.DrawerTemp21[self.DrawerTimes_new < self.run_end]
self.DrawerNum2 = self.DrawerNum1[self.DrawerTimes_new < self.run_end]
self.DrawerTemp1_mean = []
self.DrawerTemp2_mean = []
TotalDrawers = np.max(self.DrawerNum2)
for i in range(TotalDrawers+1):
for j in range(7):
self.DrawerTemp1_mean.append(np.mean(self.DrawerTemp12[self.DrawerNum2 == i]))
self.DrawerTemp2_mean.append(np.mean(self.DrawerTemp22[self.DrawerNum2 == i]))
self.DrawerTemp1_mean = np.array(self.DrawerTemp1_mean)
self.DrawerTemp2_mean = np.array(self.DrawerTemp2_mean)
self.DrawerTemp_mean = (self.DrawerTemp1_mean + self.DrawerTemp2_mean)/2
def GetResults(self):
self.CameraMonitoring_Results_Dict = {}
self.CameraMonitoring_Results_Dict["CAMERA-TEMPERATURE-AVERAGE"] = self.DrawerTemp_mean
return self.CameraMonitoring_Results_Dict
def PlotResults(self,name,FigPath):
self.ChargeInt_Figures_Dict={}
self.ChargeInt_Figures_Names_Dict = {}
fig, disp = plt.subplots()
disp = CameraDisplay(self.camera)
disp.image = self.DrawerTemp_mean
disp.cmap = plt.cm.coolwarm
disp.axes.text(1.8, -0.3, 'Temperature', fontsize=12,rotation=90)
disp.add_colorbar()
plt.title("Camera temperature average")
full_name = name + '_CameraTemperature_Mean.png'
FullPath = FigPath +full_name
self.ChargeInt_Figures_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE"] = fig
self.ChargeInt_Figures_Names_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE"] = FullPath
plt.close()
fig1, disp = plt.subplots()
disp = CameraDisplay(self.camera)
disp.image = self.DrawerTemp1_mean
disp.cmap = plt.cm.coolwarm
disp.axes.text(1.8, -0.3, 'Temperature 1', fontsize=12,rotation=90)
disp.add_colorbar()
plt.title("Camera temperature average 1")
full_name = name + '_CameraTemperature_average1.png'
FullPath = FigPath +full_name
self.ChargeInt_Figures_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE-1"] = fig1
self.ChargeInt_Figures_Names_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE-1"] = FullPath
plt.close()
fig2, disp = plt.subplots()
disp = CameraDisplay(self.camera)
disp.image = self.DrawerTemp2_mean
disp.cmap = plt.cm.coolwarm
disp.axes.text(1.8, -0.3, 'Temperature 2', fontsize=12,rotation=90)
disp.add_colorbar()
plt.title("Camera temperature average 2")
full_name = name + '_CameraTemperature_average2.png'
FullPath = FigPath +full_name
self.ChargeInt_Figures_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE-2"] = fig2
self.ChargeInt_Figures_Names_Dict["CAMERA-TEMPERATURE-IMAGE-AVERAGE-2"] = FullPath
plt.close()
return self.ChargeInt_Figures_Dict, self.ChargeInt_Figures_Names_Dict
```
#### File: nectarchain/dqm/mean_waveforms.py
```python
from dqm_summary_processor import *
class MeanWaveForms_HighLowGain(dqm_summary):
def __init__(self, gaink):
self.k = gaink
return None
def ConfigureForRun(self,path, Chan, Samp, Reader1):
#define number of channels and samples
self.Chan = Chan
self.Samp= Samp
#redefine everything
self.Mwf = np.zeros((self.Chan,self.Samp))
self.Mwf_ped = np.zeros((self.Chan,self.Samp))
self.counter_evt = 0
self.counter_ped = 0
self.Mwf_average = np.zeros((self.Chan,self.Samp))
self.Mwf_ped_average = np.zeros((self.Chan,self.Samp))
self.Mwf_Mean_overChan = []
self.Mwf_ped_Mean_overChan = []
self.wf_list_plot = list(range(1, self.Samp+1))#used for plotting later on
return None
def ProcessEvent(self, evt):
if evt.trigger.event_type.value == 32: #count peds
self.counter_ped += 1
else:
self.counter_evt += 1
for ichan in range(self.Chan): #loop over channels # 1855 should be redefined as a variable
if evt.trigger.event_type.value == 32: #only peds now
self.Mwf_ped[ichan,:] += evt.r0.tel[0].waveform[self.k][ichan] # fill channels one by one and sum them for peds only
else:
self.Mwf[ichan,:] += evt.r0.tel[0].waveform[self.k][ichan] # fill channels one by one and sum them
return None
def FinishRun(self):
if (self.k==0):
gain_c = 'High'
if (self.k ==1):
gain_c = 'Low'
self.Mwf_average = self.Mwf/self.counter_evt #get average
#get average over channels
self.Mwf_Mean_overChan = np.mean(self.Mwf_average,axis=0)
if self.counter_ped > 0:
self.Mwf_ped_average = self.Mwf_ped/self.counter_ped #get average pedestals
self.Mwf_ped_Mean_overChan = np.mean(self.Mwf_ped_average,axis=0)
return None
def GetResults(self):
#INITIATE DICT
self.MeanWaveForms_Results_Dict = {}
#ASSIGN RESUTLS TO DICT
if (self.k==0):
self.MeanWaveForms_Results_Dict["WF-PHY-AVERAGE-HIGH-GAIN"] = self.Mwf_average
self.MeanWaveForms_Results_Dict["WF-PHY-AVERAGE-CHAN-HIGH-GAIN"] = self.Mwf_Mean_overChan
if self.counter_ped > 0:
self.MeanWaveForms_Results_Dict["WF-PED-AVERAGE-HIGH-GAIN"] = self.Mwf_ped_average
self.MeanWaveForms_Results_Dict["WF-AVERAGE-PED-CHAN-HIGH-GAIN"] = self.Mwf_ped_Mean_overChan
if (self.k ==1):
self.MeanWaveForms_Results_Dict["WF-AVERAGE-LOW-GAIN"] = self.Mwf_average
self.MeanWaveForms_Results_Dict["WF-AVERAGE-CHAN-LOW-GAIN"] = self.Mwf_Mean_overChan
if self.counter_ped > 0:
self.MeanWaveForms_Results_Dict["WF-PHY-PED-AVERAGE-LOW-GAIN"] = self.Mwf_ped_average
self.MeanWaveForms_Results_Dict["WF-PHY-AVERAGE-PED-CHAN-LOW-GAIN"] = self.Mwf_ped_Mean_overChan
return self.MeanWaveForms_Results_Dict
def PlotResults(self,name,FigPath):
self.MeanWaveForms_Figures_Dict = {}
self.MeanWaveForms_Figures_Names_Dict = {}
wf_list = np.array(self.wf_list_plot)
counter_fig = 0
colors = ['blue', 'red']
colors2 = ['cyan', 'orange']
titles = ['Physical', 'Pedestals']
Mean_plot_array = [self.Mwf_Mean_overChan, self.Mwf_ped_Mean_overChan]
#Set characters of gain: high or lo
if (self.k==0):
gain_c = 'High'
if (self.k ==1):
gain_c = 'Low'
full_fig, full_ax = plt.subplots()
if self.counter_ped > 0:
array_plot = [self.Mwf_average, self.Mwf_ped_average]
else:
array_plot = [self.Mwf_average]
for x in array_plot:
part_fig, part_ax = plt.subplots()
for ichan in range(self.Chan):
full_ax.plot(wf_list ,x[ichan,:], color = colors[counter_fig], alpha = 0.005, linewidth=1)
part_ax.plot(wf_list ,x[ichan,:], color = colors[counter_fig], alpha = 0.005, linewidth=1)
Mean_plot = Mean_plot_array[counter_fig]
full_ax_return = full_ax.plot(wf_list, Mean_plot, color = colors2[counter_fig], alpha = 1, linewidth=3, label = 'Mean ' + titles[counter_fig])
part_ax_return = part_ax.plot(wf_list, Mean_plot, color = colors2[counter_fig], alpha = 1, linewidth=3, label = 'Mean ' + titles[counter_fig])
part_ax.set_title('Mean Waveforms %s (%s Gain)' %(titles[counter_fig], gain_c))
part_ax.set_xlabel('Samples')
part_ax.set_ylabel('Amplitude (DC)')
part_ax.legend()
part_ax.grid()
part_name = name + '_MeanWaveforms_%s_%sGain.png' %(titles[counter_fig], gain_c)
PartPath = FigPath + part_name
self.MeanWaveForms_Figures_Dict["FIGURE-WF-%s-%s-GAIN" %(titles[counter_fig], gain_c)]= part_fig
self.MeanWaveForms_Figures_Names_Dict["FIGURE-WF-%s-%s-GAIN" %(titles[counter_fig], gain_c)]= PartPath
plt.close()
counter_fig +=1
full_ax.set_title('Mean Waveforms Combined Plot (%s Gain)' % gain_c)
full_ax.set_xlabel('Samples')
full_ax.set_ylabel('Amplitude (DC)')
full_ax.legend()
full_ax.grid()
full_name = name + '_MeanWaveforms_CombinedPlot_%sGain.png' %gain_c
FullPath = FigPath +full_name
self.MeanWaveForms_Figures_Dict["FIGURE-WF-COMBINED-%s-GAIN" % gain_c] = full_fig
self.MeanWaveForms_Figures_Names_Dict["FIGURE-WF-COMBINED-%s-GAIN" % gain_c]= FullPath
plt.close()
return self.MeanWaveForms_Figures_Dict, self.MeanWaveForms_Figures_Names_Dict
``` |
{
"source": "jlenain/PhotALPsConv",
"score": 2
} |
#### File: jlenain/PhotALPsConv/deltas.py
```python
import numpy as np
__version__=0.04
Bcrit = 4.414e13 # critical magnetic field in G
kpcmuG2GeV = 0.030422 # Conversion from kpc*muG to GeV in LHU (needs to be checked)
#g is photon axion coupling in 10^-11 GeV^-1
#B is magnetic field in nG
#returns Delta in Mpc^-1
#taken from Mirizzi & Montanino 2009
Delta_ag_Mpc= lambda g,B: 1.52e-2*g*B
#g is photon axion coupling in 10^-11 GeV^-1
#B is magnetic field in muG
#returns Delta in kpc^-1
Delta_ag_kpc= lambda g,B: 1.52e-2*g*B
#m is photon axion mass in 10^-10 eV
#E is Energy in TeV
#returns Delta in Mpc^-1
#taken from Mirizzi & Montanino 2009
Delta_a_Mpc= lambda m,E: -7.8e-4*m**2./E
#m is photon axion mass in 10^-9 eV
#E is Energy in GeV
#returns Delta in kpc^-1
Delta_a_kpc= lambda m,E: -7.8e-2*m**2./E
#n is electron density in 10^-7 cm^-3
#E is Energy in TeV
#returns Delta in Mpc^-1
#taken from Mirizzi & Montanino 2009
Delta_pl_Mpc= lambda n,E: -1.1e-11*n/E + Delta_CMB_Mpc(E)
#n is electron density in 10^-3 cm^-3
#E is Energy in GeV
#returns Delta in kpc^-1
Delta_pl_kpc= lambda n,E: -1.1e-7*n/E + Delta_CMB_kpc(E)
#E is Energy in GeV
#returns Delta in kpc^-1
# additional term from Dobrynina+ 2014, 1412.4771
Delta_CMB_kpc= lambda E: 0.8e-7*E
#E is Energy in TeV
#returns Delta in Mpc^-1
# additional term from Dobrynina+ 2014, 1412.4771
Delta_CMB_Mpc= lambda E: 0.8e-1*E
#B is magnetic field in nG
#E is Energy in TeV
#returns Delta in Mpc^-1
#taken from Mirizzi & Montanino 2009
Delta_QED_Mpc= lambda B,E: 4.1e-9*E*B**2.
#B is magnetic field in muG
#E is Energy in GeV
#returns Delta in kpc^-1
# with correction factors of Perna et al. 2012
Delta_QED_kpc= lambda B,E: 4.1e-9*E*B**2. * (1. + 1.2e-6 * B / Bcrit) / \
(1. + 1.33e-6*B / Bcrit + 0.59e-6 * (B / Bcrit)**2.)
def Delta_Osc_kpc_array(m,n,g,B,E):
"""
Compute Delta Osc
Parameters
----------
m: ALP mass, scalar, in neV
n: el. density in 10^-3 cm^-3, n-dim array
g: photon-ALP coyupling strength, scalar
B: magnetic field in muG, n-dim array
E: energy in GeV, m-dim array
Returns
-------
Delta_osc as mxn-dim array in kpc^-1
"""
if np.isscalar(E):
E = np.array([E])
if np.isscalar(B):
B = np.array([B])
if np.isscalar(n):
n = np.array([n])
if not B.shape[0] == n.shape[0]:
raise ValueError("B and n array have to have the same shape. B.shape: {0}, n.shape: {1}".format(B.shape[0],n.shape[0]))
result = -7.8e-2 * m ** 2. * (np.ones((E.shape[0],B.shape[0])).transpose()/E).transpose() # Delta_a as ExB-shaped matrix
result -= -1.1e-7*((np.ones((E.shape[0],B.shape[0]))*n).transpose()/E).transpose() # Delta_pl as ExB-shaped matrix
result *= result
result += 4. * np.ones((E.shape[0],B.shape[0]))* (1.52e-2*g*B)**2.
return np.sqrt(result)
def Delta_Osc_Mpc_array(m,n,g,B,E):
"""
Compute Delta Osc
Parameters
----------
m: ALP mass, scalar, in 10^-10 eV
n: el. density in 10^-7 cm^-3, n-dim array
g: photon-ALP coupling strength, scalar, in 10^-11 GeV^-1
B: magnetic field in nG, n-dim array
E: energy in TeV, m-dim array
Returns
-------
Delta_osc as mxn-dim array in Mpc^-1
"""
if np.isscalar(E):
E = np.array([E])
if np.isscalar(B):
B = np.array([B])
if np.isscalar(n):
n = np.array([n])
if not B.shape[0] == n.shape[0]:
raise ValueError("B and n array have to have the same shape. B.shape: {0}, n.shape: {1}".format(B.shape[0],n.shape[0]))
result = -7.8e-4 * m ** 2. * (np.ones((E.shape[0],B.shape[0])).transpose()/E).transpose() # Delta_a as ExB-shaped matrix
result -= -1.1e-11*((np.ones((E.shape[0],B.shape[0]))*n).transpose()/E).transpose() # Delta_pl as ExB-shaped matrix
result *= result
result += 4. * np.ones((E.shape[0],B.shape[0]))* (1.52e-2*g*B)**2.
return np.sqrt(result)
#Plasma freq in 10^-10 eV
#n is electron density in 10^-7 cm^-3
w_pl_e10 = lambda n: 0.000117*np.sqrt(n)
#Plasma freq in 10^-9 eV
#n is electron density in 10^-3 cm^-3
w_pl_e9 = lambda n: 0.00117*np.sqrt(n)
#from math import abs
#Critical energy for strong mixing regime in TeV
#m is photon axion mass in 10^-10 eV
#n is electron density in 10^-7 cm^-3
#B is magnetic field in nG
#g is photon axion coupling in 10^-11 GeV^-1
Ecrit_TeV= lambda m,n,B,g: 2.5e-2*abs(m**2. - w_pl_e10(n)**2.)/B/g
#Critical energy for strong mixing regime in GeV
#m is axion mass in 10^-09 eV
#n is electron density in 10^-3 cm^-3
#B is magnetic field in muG
#g is photon axion coupling in 10^-11 GeV^-1
Ecrit_GeV= lambda m,n,B,g: 2.5e0*abs(m**2. - w_pl_e9(n)**2.)/B/g
#Maximum energy for strong mixing regime in GeV
#B is magnetic field in muG
#g is photon axion coupling in 10^-11 GeV^-1
#Emax_GeV= lambda B,g: 2.12e6 * g / B
# Check this!
Emax_GeV= lambda B,g: kpcmuG2GeV*(3.5*4.1e-9*B**2 + 0.8e-7 )**-1 *B * g
# mixing angle
#m is axion mass in 10^-09 eV
#n is electron density in 10^-3 cm^-3
#B is magnetic field in muG
#g is photon axion coupling in 10^-11 GeV^-1
#E is energy in GeV
alpha_kpc = lambda g,B,n,E,m: 0.5 * np.arctan2(2. * Delta_ag_kpc(g,B) , (Delta_pl_kpc(n,E) + 3.5*Delta_QED_kpc(B,E) - Delta_a_kpc(m,E)))
# mixing angle
#m is axion mass in 10^-10 eV
#n is electron density in 10^-7 cm^-3
#B is magnetic field in nG
#g is photon axion coupling in 10^-11 GeV^-1
#E is energy in TeV
alpha_Mpc = lambda g,B,n,E,m: 0.5 * np.arctan2(2. * Delta_ag_Mpc(g,B) , (Delta_pl_Mpc(n,E) + 3.5*Delta_QED_Mpc(B,E) - Delta_a_Mpc(m,E)))
# oscillation wave number
#m is axion mass in 10^-09 eV
#n is electron density in 10^-3 cm^-3
#B is magnetic field in muG
#g is photon axion coupling in 10^-11 GeV^-1
#E is energy in GeV
Delta_osc_kpc = lambda g,B,n,E,m : np.sqrt((Delta_pl_kpc(n,E) + 3.5*Delta_QED_kpc(B,E) - Delta_a_kpc(m,E)) ** 2. + 4. * Delta_ag_kpc(g,B) ** 2.)
# oscillation wave number
#m is axion mass in 10^-10 eV
#n is electron density in 10^-7 cm^-3
#B is magnetic field in nG
#g is photon axion coupling in 10^-11 GeV^-1
#E is energy in TeV
Delta_osc_Mpc = lambda g,B,n,E,m : np.sqrt((Delta_pl_Mpc(n,E) + 3.5*Delta_QED_Mpc(B,E) - Delta_a_Mpc(m,E)) ** 2. + 4. * Delta_ag_Mpc(g,B) ** 2.)
```
#### File: jlenain/PhotALPsConv/tools.py
```python
__version__=0.01
__author__="<NAME> // <EMAIL>"
# --- Imports -------------- #
#from numpy import mean,nanmean,sqrt,sort,median,array
from numpy import mean,sqrt,sort,median,array
from math import floor,ceil
# -------------------------- #
# calculate index for lower confidence contour
# for matrix P along axis axis and for confidence level conf
ind_lo = lambda P,conf,axis: int(floor((P.shape[axis]*0.5*(1. - conf))))
# calculate index for upper confidence contour
# for matrix P along axis axis and for confidence level conf
ind_up = lambda P,conf,axis: int(ceil((P.shape[axis]*0.5*(1. + conf))))
def rms(x, axis=None):
"""calculate rms of x along axis axis"""
return sqrt(mean(x**2, axis=axis))
#def nanrms(x, axis=None):
# """calculate rms of x if x contains nans along axis axis"""
# return sqrt(nanmean(x**2, axis=axis))
def median_contours(P,axis = 0, conf = [0.68,0.95]):
"""
Calculate median and 68,95 % confidence contours of survival probability matrix P
Parameters
----------
P: np.array with photon survival probabilities, either n or n x m dimensional
kwargs
------
axis: int, axis along which median etc. is calculated, default: 0
conf: list with confidence levels, defaut: [0.68,0.95]
Returns
-------
dictionary with entries
median: n [or m] dimensional array with median entries
conf_{int(100 * conf)} 2 x n [or m] dimensional array with confidence contours around median
"""
result = {}
for c in conf:
idx_low = ind_lo(P,c,axis)
idx_up = ind_up(P,c,axis)
if idx_up > P.shape[axis] - 1:
idx_up = P.shape[axis] - 1
if axis:
result['conf_{0:n}'.format(int(c * 100))] = array([sort(P, axis = axis)[:,idx_low],sort(P, axis = axis)[:,idx_up]])
else:
result['conf_{0:n}'.format(int(c * 100))] = array([sort(P, axis = axis)[idx_low,:],sort(P, axis = axis)[idx_up,:]])
result['median'] = median(P,axis = axis)
return result
``` |
{
"source": "jlengrand/Ivolution",
"score": 3
} |
#### File: Ivolution/ivolution/Eye.py
```python
class Eye(object):
"""
Eye-like blob used in the Face Detection algorithm.
.. note::
This class **is not used for now**, but should get useful when implementing
the use interaction feature
"""
def __init__(self):
"""A facemovie redefinition of the human eye
:param x_pos: x position of the eye in the image (in pixels)
:type x_pos: int
:param y_pos: y position of the eye in the image (in pixels)
:type y_pos: int
:param x_size: x size of the blob (in pixels)
:type x_size: int
:param y_size: y size of the blob (in pixels)
:type y_size: int
:param conf: confidence indice, indicating the probability of the target to actually be an eye
:type conf: float
"""
x_pos = None # x position of the eye in the image
y_pos = None # y position of the eye in the image
x_size = None # x size of the blob in pixel
y_size = None # y size of the blob in pixel
conf = None # confidence indice, indicating the probability of the target to actually be an eye
```
#### File: Ivolution/ivolution/Facemovie_lib.py
```python
import os
import sys
import logging
import cv
from util import exif
import Guy
from util.Notifier import Observable
from util.Notifier import Observer
class FaceMovie(object, Observable, Observer):
'''
Main class of the whole application.
Contains the core image processing functions.
Takes a bunch of parameters and a list of images and creates the ouput, depending what the user asked for.
Contains general methods, aimed at being used trough an interface.
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param in_folder: the location where input files will be searched
:type in_folder: string
:param out_folder: the location where the outputs will be saved
:type out_folder: string
:param face_param: the location of the profile file used to train the classifier
:type face_param: string
"""
Observable.__init__(self) # used to send notifications to process
Observer.__init__(self, "Lib") # used to receive notification to stop
#self.console_logger = logging.getLogger('ConsoleLog') # Used to send messages to the console
self.my_logger = logging.getLogger('IvolutionFile.Lib') # Used to save events into a file
self.source = face_params.input_folder # Source folder for pictures
# Retrieving parameters for Face Detection
self.face_params = face_params
out_folder = self.face_params.output_folder
self.out_path = "./data"
self.out_name = "ivolution"
self.out_format = "avi"
# updating the out_folder if needed
self.check_out_name(out_folder)
self.sort_method = face_params.sort # sorting by name or using metadata (n or e)
self.mode = face_params.mode # can be crop or conservative.
###
self.guys = [] # List of pictures in source folder
self.center = [0, 0] # Position of the center in output images (x, y)
self.dims = [0, 0] # Size of the final output image (x, y). Depends on selected mode
self.nChannels = 0 # number of channels of the set of images
self.depth = 0 # depth of the set of images
self.weight_steps = 5 # number of images to be inserted between each frame to reduce violent switch
self.speed = [3, 6, 9] # this one should be internal. Number of fps for the video
self.run = True # command used to stop the processing if needed
def update(self, message):
"""
Used to receive system commands, using the Observer pattern
"""
if len(message) == 1: # system command
self.run = False
def list_guys(self):
"""
Aims at populating the guys list, using the source folder as an input.
Guys list can be sorted either by name, or using metadata.
In case source folder is not found; Exits without processing.
Non Image files are autmatically skipped.
Source folder is searched recursively. All subfolders are also processed.
.. note::In case no valid date is found for metadata mode, the images are taken in name order
"""
try:
os.path.exists(self.source)
os.path.isdir(self.source) # checking if folder exists
except: # find precise exception
#self.console_logger.critical("Source folder not found ! Exiting. . .")
self.my_logger.critical("Source folder not found ! Exiting. . .")
self.run = False
#sys.exit(0)
return -1
# loading images, create Guys and store it into guys
ptr = 0
for root, _, files in os.walk(self.source):
for a_file in files:
# notifying the Observers
self.notify_progress("Processing file", ptr, len(files))
if self.run: # as long as we want to continue
guy_source = os.path.join(root, a_file)
try:
cv.LoadImage(guy_source) # used to check image is valid
guy_name = os.path.splitext(a_file)[0]
# Tries to extract date from metadata
try:
guy_date = exif.parse(guy_source)['DateTime']
except Exception:
self.my_logger.warning("No metadata found for %s" % (guy_name))
#if self.sort_method == "exif":
#self.console_logger.warning(" No metadata found for %s" % (guy_name))
guy_date = ''
a_guy = Guy.Guy(guy_name, guy_date, guy_source)
ptr += 1 # Adding file only if picture
# populating guys
self.guys.append(a_guy)
self.notify(["Application", ["FILEADD", guy_name]])
except:
#self.console_logger.info("Skipping %s. Not an image file" % (guy_source))
self.my_logger.info("Skipping %s. Not an image file" % (guy_source))
# Checking if we have at least one image
if self.number_guys > 0:
self.sort_guys()
##self.console_logger.info("%d guys found in source folder." % (self.number_guys()))
self.my_logger.info("%d guys found in source folder." % (self.number_guys()))
return self.number_guys()
def sort_guys(self):
"""
Guys list has just been populated, but elements are not ordered yet.
Sorts the elements of the list either by name or by date extracted from metadata,
depending on the chosen mode.
"""
# Sorting either by exif date or name
if self.sort_method == "exif":
self.guys.sort(key=lambda g: g.date)
else: # default is sort by name
self.guys.sort(key=lambda g: g.name)
def search_faces(self):
"""
Searches for all faces in the guys we have
Results to be stored directly in guys
Takes each image one after the other, and create a guy out of it.
The Face of each guy is searched.
In case no face is found, a warning is returned and Guy is set to None
"""
ptr = 0
for a_guy in self.guys:
ptr += 1
if self.run:
faceres = 0
a_guy.search_face(self.face_params)
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
if a_guy.has_face(): # face(s) have been found
#self.console_logger.info("Face found for %s" % (a_guy.name))
self.my_logger.info("Face found for %s" % (a_guy.name))
faceres = 1 # for notifying
else:
#self.console_logger.warning("No face found for %s. Skipped . . ." % (a_guy.name))
self.my_logger.warning("No face found for %s. Skipped . . ." % (a_guy.name))
self.notify(["Application", ["FILEDONE", a_guy.name, faceres]])
def percent(self, num, den):
"""
Returns a float between 0 and 1, being the percentage given by num / den
"""
if num > den:
raise ArithmeticError
if den <= 0:
raise ZeroDivisionError
return (num / float(den))
def notify_progress(self, message_root, num, den):
"""
A notification scheme to quickly notify most common messages
"""
# notifying the Observers
try:
message = message_root + " %d / %d" % (num, den)
self.notify(["Application", [message, self.percent(num, den)]])
except (ArithmeticError, ZeroDivisionError):
self.my_logger.error("ArithmeticError on %s, %d, %d" % (message_root, num, den))
self.notify(["Application", ["Error", 0]])
def clean_guys(self):
"""
Removes all guys for who no face has been found.
This avoids all has_face loops in the rest of the application
"""
return [a_guy for a_guy in self.guys if a_guy.has_face()]
def prepare_faces(self):
"""
Searches for all faces and keep only the one that may be properly used.
Images without face are discarded.
The program is exited in case no face is found.
Searches for the reference size. If will be used later for image resizing, so that
all faces have the same size.
"""
self.search_faces()
# removes guys that have no faces
self.guys = self.clean_guys()
# check that everybody has the same number of channels
self.check_channels()
self.check_depth()
if self.number_guys() == 0:
#self.console_logger.error("No face has been found in the whole repository! Exiting. . . ")
self.my_logger.error("No face has been found in the whole repository! Exiting. . . ")
self.notify(["Error", 0])
sys.exit(0)
# normalize faces to make them clean
self.set_guys_ratio() # sets all faces to the same size, by calculating a ratio to a reference
def check_depth(self):
"""
Checks that the depth of all the images in guys is the same
Sets the depth for the video
"""
my_depth = []
for a_guy in self.guys:
my_depth.append(a_guy.depth)
my_depth = list(set(my_depth)) # remove duplicates
if len(my_depth) != 1:
# We do not have a unique number of channels for all images
#self.console_logger.error("All images must have the same depth")
self.my_logger.error("All images must have the same depth")
else:
self.depth = my_depth[0]
def check_channels(self):
"""
Checks that the number of channels of all the images in guys is the same
Sets the number of channels for the video
"""
my_chans = []
for a_guy in self.guys:
my_chans.append(a_guy.in_channels)
my_chans = list(set(my_chans)) # remove duplicates
if len(my_chans) != 1:
# We do not have a unique number of channels for all images
#self.console_logger.error("All images must have the same number of channels")
self.my_logger.error("All images must have the same number of channels")
else:
self.nChannels = my_chans[0]
def set_guys_ratio(self):
"""
For each Guy, calculates the factor by which the image is going to be resized so that all faces finally have the same size.
"""
ref = self.find_reference()
for a_guy in self.guys:
a_guy.set_ratio(ref)
def find_reference(self):
"""
Searched for the best face size we want to have.
Defined (for now), as the smallest of all found faces.
:returns int - the reference size of the bounding square for faces.
"""
references = []
for a_guy in self.guys:
if a_guy.has_face():
references.append(a_guy.faces[0][0][3]) # catch face size (width)
return min(references)
def find_final_dimensions(self, cropdims=(0, 0)):
"""
Finds the final dimensions that will be needed to create the output.
Depending on the desired output, it can be
- (default) the maximal size of the image, by overlapping all images and adding black borders.
- (crop) the maximal size of the image by overlapping all the images, without adding any black borders
- (custom crop) A chosen user size, defined as x * y times the head size.
"""
if self.mode == "conservative":
self.find_default_dims()
elif self.mode == "crop":
self.find_crop_dims()
elif self.mode == "custom crop":
# TODO : implement
#self.console_logger.critical("custom crop is not yet implemented")
self.my_logger.critical("custom crop is not yet implemented")
raise Exception
def find_default_dims(self):
"""
Calculates best output image size and position depending on
faces found in guys.
The system is simple. The output image should be as big as possible,
and faces are always placed in the same position. Depending on that,
the image input image is placed in the output at the correct position.
Black borders are set everywhere else.
"""
# TODO: badly done !
x_af = 0
y_af = 0
ptr = 0
for a_guy in self.guys:
if self.run:
ptr += 1
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
(xc, yc) = a_guy.resized_center()
(inx, iny) = a_guy.resized_dims()
# update center
if xc > self.center[0]:
self.center[0] = xc
if yc > self.center[1]:
self.center[1] = yc
# update right part
if (inx - xc) > x_af:
x_af = inx - xc
if (iny - yc) > y_af:
y_af = iny - yc
self.dims = [x_af + self.center[0], y_af + self.center[1]]
def find_crop_dims(self):
"""
Calculates smallest output image that can be used to avoid adding black borders on image
It will later be used to create the final image.
"""
# TODO: badly done !
ht = 1000000 # space left above eyes
hb = 1000000 # space left beneath eyes
wl = 1000000 # space left left of eyes
wr = 1000000 # space left right of eyes
#tr = 0
ptr = 0
for a_guy in self.guys:
if self.run:
ptr += 1
# notifying the Observers
self.notify_progress("Processing picture", ptr, self.number_guys())
(xc, yc) = a_guy.resized_center()
(inx, iny) = a_guy.resized_dims()
# finding width
if xc < wl:
wl = xc
if (inx - xc) < wr:
wr = inx - xc
# finding height
if yc < ht:
ht = yc
if (iny - yc) < hb:
hb = iny - yc
self.dims = [wl + wr, ht + hb]
self.center = [wl, ht]
def get_out_file(self):
"""
Reconstructs the final output file for the movie creation
:returns: String -- The ouput file path to be saved
"""
return os.path.join(self.out_path, (self.out_name + "." + self.out_format))
def save_movie(self):
"""
Creates a movie with all faces found in the inputs.
Guy is skipped if no face is found.
:param out_folder: the location where to save the output image.
:type out_folder: string
:param fps: the number of frames per second to be displayed in final video (3)
:type fps: int
"""
speedrate = self.face_params.speed
if "win" in sys.platform:
fourcc = cv.CV_FOURCC('C', 'V', 'I', 'D')
else: # some kind of Linux/Unix platform
fourcc = cv.CV_FOURCC('F', 'M', 'P', '4')
# Corrects frameSize to get a nice video output
frameSize = self.resizes_for_video_codec() # Fixme : Put in global parameter
# We have to resize the out_image to make them fit with the desired size
corr_im = cv.CreateImage(frameSize, self.depth, self.nChannels)
#frameSize = (652, 498)
pace = ["slow", "normal", "fast"]
my_video = cv.CreateVideoWriter(self.get_out_file(),
fourcc,
self.speed[speedrate],
frameSize,
1)
ii = 0
for a_guy in self.guys:
if self.run:
ii += 1
self.notify_progress("Saving frame", ii, self.number_guys())
#self.console_logger.info("Saving frame %d / %d" % (ii, self.number_guys()))
self.my_logger.info("Saving frame %d / %d" % (ii, self.number_guys()))
out_im = self.prepare_image(a_guy)
cv.Resize(out_im, corr_im, cv.CV_INTER_LINEAR)
cv.WriteFrame(my_video, corr_im)
def show_faces(self, mytime=1000):
"""
Show all faces that have been found for the guys.
The time for which each image will be displayed can be chosen.
:param mytime: time for which the image should be displayed (in ms) (1000)
:type mytime: int
"""
win_name = " Face Results"
cv.NamedWindow(win_name, cv.CV_WINDOW_NORMAL)
cv.ResizeWindow(win_name, 640, 480)
for a_guy in self.guys:
if self.run:
out_im = self.prepare_image(a_guy)
cv.ShowImage(win_name, out_im)
cv.WaitKey(mytime)
cv.DestroyWindow(win_name)
def save_faces(self, im_format="png"):
"""
Save all faces into out_folder, in the given image format
:param out_folder: the location where to save the output image.
:type out_folder: string
:param im_format: Format in which the image should be saved ("png")
:type im_format: string
"""
for a_guy in self.guys:
if self.run:
out_im = self.prepare_image(a_guy)
self.save_guy(out_im, a_guy.name, im_format)
def number_guys(self):
"""
Simply returns the number of guys in the current to-be movie
.. note::
Designed for interface use only
"""
return len(self.guys)
def out_display(self, im, name, time=1000, im_x=640, im_y=480):
"""
Displays the output image, for time ms.
Setting time to 0 causes the image to remains open.
Window name slightly changed to match output
:param im: the image to be saved, formatted as an OpenCV Image
:type im: IplImage
:param name: the name of the image to be saved
:type name: string
:param time: time for which the image should be displayed (in ms) (1000)
:type time: int
:param im_x: output size of the displayed image (in pixels) (640)
:type im_x: int
:param im_y: output size of the displayed image (in pixels) (480)
:type im_y: int
"""
win_name = name + " - out"
cv.NamedWindow(win_name, cv.CV_WINDOW_NORMAL)
cv.ResizeWindow(win_name, im_x, im_y)
cv.ShowImage(win_name, im)
cv.WaitKey(time)
cv.DestroyWindow(win_name)
def check_out_name(self, out_folder):
"""
Checks the desired output selected by the user.
It can be either a folder or a file itself.
Checks whether the designated path ends with a extension name.
In case it is, the extension is checked and changed if needed
:param out_folder: the path slected by the user as output location
:type out_folder: String
"""
if len(os.path.splitext(out_folder)[1]) > 0: # if ends up with an extension
self.out_path, complete_name = os.path.split(out_folder)
self.out_name, format = os.path.splitext(complete_name)
if format != self.out_format:
# the format is not compliant with what we can do. We refuse it
self.my_logger.info("Changing format to avi")
else:
# no filename is given. We keep the default
self.out_path = os.path.split(out_folder)[0]
def save_guy(self, im, name, ext):
"""
Saves output image to the given format (given in extension)
:param im: the image to be saved, formatted as an OpenCV Image
:type im: IplImage
:param name: the name of the image to be saved
:type name: string
:param out_folder: the location where to save the image
:type out_folder: string
:param ext: Format in which the image should be saved ("png")
:type ext: string
"""
file_name = name + "." + ext
out_name = os.path.join(self.out_path, file_name)
self.my_logger.info("Saving %s" % (out_name))
#self.console_logger.info("Saving %s" % (out_name))
cv.SaveImage(out_name, im)
def prepare_image(self, a_guy):
"""
Takes a Guy and processes its input image. Prepares the final output image for this
Guy, so that it is ready to be saved in the desired output.
:param a_guy: The Guy currently being processed.
:type a_guy: Guy
:returns: IplImage -- The ouput image, created depending on the chosen mode, ready to be saved
"""
if self.mode == "conservative":
out_im = a_guy.create_default_output(self.dims,
self.center)
elif self.mode == "crop":
out_im = a_guy.create_crop_output(self.dims,
self.center)
return out_im
def resizes_for_video_codec(self):
"""
Searches for the closest couple of frameSize so that width*height is a multiple of 4 to avoid weird image encoding.
:param frameSize: The desired video output size before correction. (in Pixels)
:type frameSize: (int, int)
:returns: corrected frameSize -- The desired output size after correction. In (x, y) form.
"""
frameSize = (self.dims[0], self.dims[1])
try:
x, y = frameSize
except ValueError:
self.my_logger.error("unknown format for frameSize ")
return (0, 0)
if not(isinstance(x, int)) or not(isinstance(x, int)):
self.my_logger.error("method expects two integers")
return (0, 0)
while ((x * self.nChannels) % 4) != 0:
x += 1
return (x, y)
```
#### File: Ivolution/ivolution/FacemovieThread.py
```python
import threading
import logging
import Facemovie_lib
from util.Notifier import Observer
from util.Notifier import Observable
class FacemovieThread(threading.Thread, Observable, Observer):
'''
Creates a Thread version of Facemovie using the facemovie_lib.
This class can then be run anywhere, from a GUI, script, ...
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param face_params: A faceparams object that contains all needed information to run the Facemovie.
:type face_params: FaceParams
"""
threading.Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self, "Application")
self.stop_process = False
self.face_params = face_params
self.facemovie = Facemovie_lib.FaceMovie(self.face_params)
self.facemovie.subscribe(self) # Subscribing to facemovie reports
self.subscribe(self.facemovie) # Used to send request to stop
self.my_logger = logging.getLogger('IvolutionFile.Thread')
#self.console_logger = logging.getLogger('ConsoleLog')
def update(self, message):
"""
Trigerred by IvolutionWindow.
Uses the Observer pattern to inform the user about the progress of the GUI.
"""
if len(message) == 1: # system commands
if message[0] == "STOP":
#self.console_logger.debug("Facemovie is going to stop")
self.my_logger.debug("Facemovie is going to stop")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
else:
#self.console_logger.debug("Unrecognized system command")
self.my_logger.debug("Unrecognized system command")
##self.console_logger.debug(message)
self.my_logger.debug(message)
elif len(message) == 2: # notifications
##self.console_logger.debug(message)
self.my_logger.debug(message)
if message[0] == "FILEADD":
self.notify(["Interface", [message[0], message[1], 0]])
else:
# notify gui about small updates
self.notify(["Interface", ["STATUS", message[0], message[1]]])
# checking for fatal error
if message[0] == "Error":
#self.console_logger.debug("Fatal Error detected")
self.my_logger.debug("Fatal Error detected")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
elif len(message) == 3: # notifications
if message[0] == "FILEDONE":
self.notify(["Interface", message])
else:
#self.console_logger.debug("Unrecognized command")
self.my_logger.debug("Unrecognized command")
#self.console_logger.debug(message)
self.my_logger.debug(message)
def run(self):
if not self.stop_process:
self.my_logger.debug("Listing pictures")
self.notify(["Interface", ["PROGRESS", "Listing pictures", 0.0]])
num_guys = self.facemovie.list_guys()
# FIXME: Later to be done in Lib
if num_guys < 0:
self.notify(["Interface", ["STATUS", "Source folder not found", 0.0]])
self.stop_process = True
elif num_guys == 0:
self.notify(["Interface", ["STATUS", "No image found in source folder", 0.0]])
self.stop_process = True
if not self.stop_process:
self.my_logger.debug("Detecting Faces")
self.notify(["Interface", ["PROGRESS", "Detecting Faces", 0.2]])
self.facemovie.prepare_faces() # I want to search for the faces, and characteristics of the images
if not self.stop_process:
self.my_logger.debug("Calculating video requirements")
self.notify(["Interface", ["PROGRESS", "Calculating video requirements", 0.6]])
self.facemovie.find_final_dimensions() # finds output size for desired mode.
if not self.stop_process:
self.my_logger.debug("Generating movie")
self.notify(["Interface", ["PROGRESS", "Generating movie", 0.8]])
self.facemovie.save_movie()
self.my_logger.debug("Movie saved")
self.notify(["Interface", ["PROGRESS", "Movie saved, Finished!", 1.0]])
# updating status to avoid remanent messages
self.notify(["Interface", ["STATUS", " ", 1.0]])
if not self.stop_process:
self.my_logger.debug("Thread terminated")
if self.stop_process:
self.notify(["Interface", ["PROGRESS", "Process cancelled!", 1.0]])
```
#### File: Ivolution/test/face_script.py
```python
import cv
def detectRedEyes(image, faceCascade, eyeCascade):
min_size = (20,20)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
# Allocate the temporary images
gray = cv.CreateImage((image.width, image.height), 8, 1)
smallImage = cv.CreateImage((cv.Round(image.width / image_scale),
cv.Round (image.height / image_scale)), 8 ,1)
# Convert color input image to grayscale
cv.CvtColor(image, gray, cv.CV_BGR2GRAY)
# Scale input image for faster processing
cv.Resize(gray, smallImage, cv.CV_INTER_LINEAR)
# Equalize the histogram
cv.EqualizeHist(smallImage, smallImage)
# Detect the faces
faces = cv.HaarDetectObjects(smallImage, faceCascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size)
# If faces are found
if faces:
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv.Rectangle(image, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)# If faces are found
# Estimate the eyes position
# First, set the image region of interest
# The last division removes the lower part of the face to lower probability for false recognition
cv.SetImageROI(image, (pt1[0],
pt1[1],
pt2[0] - pt1[0],
int((pt2[1] - pt1[1]) * 0.6)))
# Detect the eyes
eyes = cv.HaarDetectObjects(image, eyeCascade,
cv.CreateMemStorage(0),
haar_scale, min_neighbors,
haar_flags, (20,15))
# If eyes were found
if eyes:
# For each eye found
for eye in eyes:
# Draw a rectangle around the eye
cv.Rectangle(image,
(eye[0][0],
eye[0][1]),
(eye[0][0] + eye[0][2],
eye[0][1] + eye[0][3]),
cv.RGB(255, 0, 0), 1, 8, 0)
# Finally, reset the image region of interest (otherwise this won t
# be drawn correctly
cv.ResetImageROI(image)
return image
def load():
image = cv.LoadImage("input/Axel/2012-01-12-13h54m34DSCN9766.JPG")
faceCascade = cv.Load("haarcascades/haarcascade_frontalface_alt.xml")
eyeCascade = cv.Load("haarcascades/haarcascade_eye.xml")
return (image, faceCascade, eyeCascade)
def display(image):
cv.NamedWindow("Red Eye Test", cv.CV_WINDOW_AUTOSIZE)
cv.ResizeWindow("Red Eye Test", 10, 10)
cv.ShowImage("Red Eye Test", image)
cv.WaitKey(0)
cv.DestroyWindow("Red Eye Test")
if __name__ == '__main__':
print "test"
image, faceCascade, eyeCascade = load()
image = detectRedEyes(image, faceCascade, eyeCascade)
display(image)
``` |
{
"source": "jlengvarsky/codeAgainst",
"score": 3
} |
#### File: jlengvarsky/codeAgainst/deck_manager.py
```python
import json
class DeckManager:
__default = False
@staticmethod
def default():
if DeckManager.__default:
return DeckManager.__default
else:
DeckManager.__default = DeckManager()
return DeckManager.__default
def __init__(self):
self.packs = None
with open("./iic_cards.json", "r") as cardFile:
self.packs = json.load(cardFile)
def listPacks(self):
return self.packs.keys()
def getQuestions(self, pack):
return self.packs[pack]["questions"]
def getAnswers(self, pack):
return self.packs[pack]["answers"]
```
#### File: jlengvarsky/codeAgainst/game_manager.py
```python
import asyncio
from random import randint, shuffle
from utils import send_object, recv_object
from deck_manager import DeckManager
class GameManager:
def __init__(self, packs, questions=[], answers=[]):
self.joinCode = "".join(str(randint(0, 9)) for i in range(0, 6))
self.members = []
self.questions = []
self.answers = []
self.customQuestions = questions
self.customAnswers = answers
self.question = ""
self.status = "open"
self.judge = 0
self.packs = packs
def regenJoinCode(self):
self.joinCode = "".join(str(randint(0, 9)) for i in range(0, 6))
def addMember(self, member):
memberId = len(self.members)
member.chatMessageHandler = self.handleChatMessage
self.members.append(member)
return memberId
async def broadcastToAll(self, obj):
for member in self.members:
await member.addObj(obj)
async def broadcastToAllExcept(self, doNotInclude, obj):
for member in self.members:
if doNotInclude != member:
await member.addObj(obj)
async def handleChatMessage(self, obj, member):
await self.broadcastToAllExcept(member, {
"action": "chatMessage",
"from": member.name,
"content": obj["content"]
})
def setUpDecks(self, categories):
self.questions = self.customQuestions
self.answers = self.customAnswers
for category in categories:
self.questions = self.questions + DeckManager.default().getQuestions(category)
self.answers = self.answers + DeckManager.default().getAnswers(category)
shuffle(self.questions)
shuffle(self.answers)
def fillAllHands(self):
for member in self.members:
if len(self.answers) < 1:
self.setUpDecks(self.packs)
member.fillHand(self.answers)
async def getAnswerFromMember(self, member):
return {"answerObj": await member.getObj(), "member": member}
async def getAllAnswers(self):
membersToWaitFor = self.members[:self.judge] + self.members[self.judge + 1:]
rawAnswerData = await asyncio.gather(*[self.getAnswerFromMember(m) for m in membersToWaitFor])
result = []
for answer in rawAnswerData:
result.append({"answer": answer["answerObj"]["answer"], "member": answer["member"]})
return result
async def getJudgeSelection(self):
return await self.members[self.judge].getObj()
async def sendOutNewQuestion(self):
self.fillAllHands()
if len(self.questions) == 0:
self.setUpDecks(self.packs)
self.question = self.questions.pop().replace("[[BLANK]]", "___")
for member in self.members:
if member != self.members[self.judge]:
await member.addObj({
"action": "newQuestion",
"question": self.question,
"hand": member.hand,
"judge": self.members[self.judge].name
})
else:
await member.addObj({
"action": "youAreTheJudge",
"question": self.question
})
def clearSelectedCards(self, answerData):
for answer in answerData:
if answer["answer"] in answer["member"].hand:
answer["member"].hand.remove(answer["answer"])
async def startGame(self):
await self.broadcastToAll({"action": "preparingGame"})
self.judge = randint(0, len(self.members) - 1)
self.status = "ingame"
self.setUpDecks(self.packs)
await self.broadcastToAll({"action": "gameStart"})
while True:
await self.sendOutNewQuestion()
# gets all answer data as objects
answerData = await self.getAllAnswers()
# Begin judging
answersToDisplay = [answer["answer"] for answer in answerData]
shuffle(answersToDisplay)
await self.broadcastToAllExcept(self.members[self.judge], {
"action": "showAnswers",
"answers": answersToDisplay,
"question": self.question,
"judge": self.members[self.judge].name
})
await self.members[self.judge].addObj({
"action": "selectAnswer",
"answers": answersToDisplay,
"question": self.question
})
judgeSelection = await self.getJudgeSelection()
# Figure out who the judge picked
pickedMember = self.members[0]
for answer in answerData:
if answer["answer"] == judgeSelection["answer"]:
pickedMember = answer["member"]
break
# Send out selection data
await self.broadcastToAll({"action": "judgeSelected", "answer": judgeSelection["answer"], "question": self.question, "submittedBy": pickedMember.name})
# Send out score data
pickedMember.score += 1
await pickedMember.addObj({
"action": "updateScore",
"score": pickedMember.score
})
# Wait a bit
await asyncio.sleep(5)
# Get ready for next round
self.clearSelectedCards(answerData)
self.judge = (self.judge + 1) % len(self.members)
``` |
{
"source": "jleni/EPG",
"score": 2
} |
#### File: epg/envs/random_robots.py
```python
import numpy as np
from gym import Env
from gym.envs.registration import EnvSpec
from gym.wrappers.time_limit import TimeLimit
from epg.envs.mujoco.hopper import RandomWeightHopperEnv, RandomWeightHopperDirEnv, NormalHopperEnv
class RandomHopper(Env):
def __init__(self,
rand_mass=True, rand_gravity=True, rand_friction=True, rand_thickness=True,
seed=None, **_):
self.rand_mass = rand_mass
self.rand_gravity = rand_gravity
self.rand_friction = rand_friction
self.rand_thickness = rand_thickness
env = RandomWeightHopperEnv(rand_mass=self.rand_mass,
rand_gravity=self.rand_gravity,
rand_friction=self.rand_friction,
rand_thickness=self.rand_thickness)
self.observation_space = env.observation_space
self.action_space = env.action_space
self.reset_model = env.reset_model
self.step = env.step
def meta_reset(self, seed):
np.random.seed(seed)
env = RandomWeightHopperEnv(rand_mass=self.rand_mass,
rand_gravity=self.rand_gravity,
rand_friction=self.rand_friction,
rand_thickness=self.rand_thickness)
# Based on Hopper-v2
spec = EnvSpec(
'RandomWeightHopperEnv-v0',
entry_point='generic_rl.envs.mujoco:RandomWeightHopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0
)
env._spec = spec
env.seed(seed)
# Wrap the env as needed
env = TimeLimit(
env,
max_episode_steps=spec.max_episode_steps,
max_episode_seconds=spec.max_episode_seconds
)
self.env = env
# Fix for done flags.
self.env.reset()
self.step = env.step
self.render = env.render
self.reset = env.reset
class NormalHopper(Env):
def __init__(self, seed=None, **_):
env = NormalHopperEnv()
self.observation_space = env.observation_space
self.action_space = env.action_space
self.reset_model = env.reset_model
self.step = env.step
def meta_reset(self, seed):
np.random.seed(seed)
env = NormalHopperEnv()
# Based on Hopper-v2
spec = EnvSpec(
'NormalHopperEnv-v0',
entry_point='generic_rl.envs.mujoco:NormalHopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0
)
env._spec = spec
env.seed(seed)
# Wrap the env as needed
env = TimeLimit(
env,
max_episode_steps=spec.max_episode_steps,
max_episode_seconds=spec.max_episode_seconds
)
self.env = env
# Fix for done flags.
self.env.reset()
self.step = env.step
self.render = env.render
self.reset = env.reset
class DirHopper(Env):
def __init__(self, seed=None, **__):
env = RandomWeightHopperDirEnv()
self.observation_space = env.observation_space
self.action_space = env.action_space
self.reset_model = env.reset_model
self.step = env.step
def meta_reset(self, seed):
np.random.seed(seed)
env = RandomWeightHopperDirEnv()
# Based on Hopper-v2
spec = EnvSpec(
'DirHopperEnv-v0',
entry_point='generic_rl.envs.mujoco:DirHopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0
)
env._spec = spec
env.seed(seed)
# Wrap the env as needed
env = TimeLimit(
env,
max_episode_steps=spec.max_episode_steps,
max_episode_seconds=spec.max_episode_seconds
)
self.env = env
# Fix for done flags.
self.env.reset()
self.step = env.step
self.render = env.render
self.reset = env.reset
```
#### File: epg/launching/logger.py
```python
import datetime
import json
import os
import os.path as osp
import sys
import tempfile
LOG_OUTPUT_FORMATS = ['log', 'stdout', 'csv', 'json']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s' % filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
def make_output_format(format, ev_dir):
from mpi4py import MPI
os.makedirs(ev_dir, exist_ok=True)
rank = MPI.COMM_WORLD.Get_rank()
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
suffix = "" if rank == 0 else ("-mpi%03i" % rank)
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
elif format == 'json':
assert rank == 0
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif format == 'csv':
assert rank == 0
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("epg-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
if format_strs is None:
format_strs = LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s' % dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
```
#### File: EPG/epg/plotting.py
```python
import os
import numpy as np
from epg.launching import logger
from epg.utils import ret_to_obj
def plot_results(itr, results):
import matplotlib.pyplot as plt
def sliding_mean(data_array, window=5):
data_array = np.array(data_array)
new_list = []
for i in range(len(data_array)):
indices = list(range(max(i - window + 1, 0),
min(i + window + 1, len(data_array))))
avg = 0
for j in indices:
avg += data_array[j]
avg /= float(len(indices))
new_list.append(avg)
return np.array(new_list)
f, axarr = plt.subplots(2, len(results), figsize=(24, 6))
f.tight_layout()
for idx, r in enumerate(results):
smoothed_ret = sliding_mean(r['ep_return'], window=np.maximum(int(len(r['ep_return']) / 50), 1))
axarr[0, idx].plot(range(len(smoothed_ret)), smoothed_ret, linewidth=1.0, color='red')
obj = ret_to_obj(r['ep_return'])
axarr[0, idx].set_title('{:.3f}'.format(obj), y=0.8)
axarr[1, idx].plot(range(len(r['ep_kl'])), r['ep_kl'], linewidth=1.0, color='blue')
plt.show()
save_path = os.path.join(logger.get_dir(), 'analysis')
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(os.path.join(save_path, 'epoch_{}.png'.format(itr)))
plt.clf()
plt.close()
```
#### File: EPG/epg/rollout.py
```python
import time
import numpy as np
from epg import utils
from epg.launching import logger
def run_batch_rl(env, agent, inner_opt_freq, inner_max_n_epoch, inner_buffer_size, pool_rank, ppo_factor,
epoch=None,
render=False,
verbose=False):
from collections import deque
assert isinstance(inner_opt_freq, int)
assert isinstance(inner_max_n_epoch, int)
assert isinstance(inner_buffer_size, int)
lst_ep_rew, lst_loss, lst_ep_steps, lst_kl = [], [], [], []
buffer = deque(maxlen=inner_buffer_size)
n_ep, ep_rew, ep_steps = 0, 0., 0
tot_update_time, start_env_time = 0., time.time()
# Assumes meta wrapper used.
if epoch is not None:
env.meta_reset(epoch)
env.seed(epoch)
else:
env.meta_reset(pool_rank + utils.get_time_seed())
env.seed(pool_rank + utils.get_time_seed())
obs = env.reset()
n_steps = 0
for itr in range(inner_max_n_epoch):
ep_obs = []
for _ in range(inner_opt_freq):
obs = obs.astype(np.float32)
act = agent.act(obs)
obs_prime, rew, done, _ = env.step(agent.act_to_env_format(act))
ep_obs.append(obs)
buffer.append((obs, act, rew, done))
ep_rew += rew
ep_steps += 1
n_steps += 1
if done:
obs = env.reset()
lst_ep_rew.append(ep_rew)
lst_ep_steps.append(ep_steps)
if verbose and pool_rank == 0:
logger.log('Train run (ep {}, return {:.3f})'.format(n_ep, ep_rew))
ep_steps, ep_rew = 0, 0.
n_ep += 1
else:
obs = obs_prime
# This is disabled for now. But it's easy to add an exploration bonus as an additional
# input the the loss function!
# for rew_bonus_eval in agent.lst_rew_bonus_eval:
# rew_bonus_eval.fit_before_process_samples(obs)
start_update_time = time.time()
loss_input = [np.array([e[i] for e in buffer], dtype=np.float32) for i in range(len(buffer[0]))]
loss_input += [ppo_factor, inner_opt_freq]
loss, kl = agent.update(*loss_input)
lst_loss.append(loss)
lst_kl.append(kl)
tot_update_time += time.time() - start_update_time
# Evaluate final policy
obs, final_rew, ep_counter = env.reset(), [0., 0., 0.], 0
while ep_counter < 3:
obs = obs.astype(np.float32)
act = agent.act(obs)
obs_prime, rew, done, _ = env.step(agent.act_to_env_format(act))
final_rew[ep_counter] += rew
if done:
obs = env.reset()
ep_counter += 1
else:
obs = obs_prime
tot_env_time = time.time() - start_env_time - tot_update_time
if render:
logger.log('Rendering final policy for 5 steps ...')
obs, ep_rew = env.reset(), 0.
ep_counter = 0
while ep_counter < 5:
obs = obs.astype(np.float32)
act = agent.act(obs)
obs_prime, rew, done, _ = env.step(agent.act_to_env_format(act))
env.render()
ep_rew += rew
if done:
logger.log('Test run with final policy (return {:.3f}).'.format(ep_rew))
time.sleep(2)
obs, ep_rew = env.reset(), 0.
ep_counter += 1
else:
obs = obs_prime
return dict(ep_return=np.asarray(lst_ep_rew),
ep_final_rew=np.asarray(final_rew),
ep_loss=lst_loss,
ep_length=lst_ep_steps,
ep_kl=np.asarray(lst_kl),
update_time=tot_update_time,
env_time=tot_env_time)
``` |
{
"source": "jleni/ledger-ci-test",
"score": 3
} |
#### File: ledger-ci-test/tools/test.py
```python
from __future__ import print_function
import binascii
from ledgerblue.comm import getDongle
from ledgerblue.commException import CommException
try:
dongle = getDongle(True)
except CommException as e:
print(e)
quit()
def send(cmd, params=[]):
try:
cmd_str = "80{0:02x}".format(cmd)
for p in params:
cmd_str = cmd_str + "{0:02x}".format(p)
return dongle.exchange(binascii.unhexlify(cmd_str))
except CommException as e:
print("COMMEXC: ", e)
except Exception as e:
print("COMMEXC: ", e)
send(1)
``` |
{
"source": "jleni/lola",
"score": 3
} |
#### File: lola/lola_dice/policy.py
```python
import numpy as np
import tensorflow as tf
import sonnet as snt
class Policy(object):
"""The base class for policy networks.
Policy parameters are allowed to be functions of other policies. To keep
track of such dependencies, each policy stores a list of parent policies on
which it depends. To make an action or update a policy with a non-empty
list of dependencies, we need to ensure that all parent placeholders are
fed-in with appropriate values.
"""
def __init__(self, ob_size, num_actions, prev=None):
self.ob_size = ob_size
self.num_actions = num_actions
self._root = self
self._parents = tuple()
if prev is not None:
self._root = prev.root
self._parents = prev.parents + (prev, )
self._params = []
self._opponents = None
def build(self, scope, reuse=None):
raise NotImplementedError
@property
def opponents(self):
return self._opponents
@opponents.setter
def opponents(self, opponents):
self._opponents = opponents
@property
def parameters(self):
raise NotImplementedError
@property
def parents(self):
return self._parents
@property
def root(self):
return self._root
def get_feed_list(self, trace):
obs, acs, rets, values, infos = trace
aa = np.asarray([info['available_actions'] for info in infos])
feed_list = [
(self.obs_ph, obs),
(self.acs_ph, acs),
(self.rets_ph, rets),
(self.values_ph, values),
(self.avail_acs_ph, aa)
]
return feed_list
def act(self, ob, info, sess, parent_feed_list=[]):
aa = info['available_actions']
feed_list = [(self.obs_ph, [ob]), (self.avail_acs_ph, [aa])] + \
parent_feed_list
ac = sess.run(self.action, feed_dict=dict(feed_list))
return ac
def predict(self, ob, sess, parent_feed_list=[]):
feed_list = [(self.obs_ph, [ob])] + parent_feed_list
vpred = sess.run(self.vpred, feed_dict=dict(feed_list))
return vpred
@property
def parameters(self):
return self._params
class SimplePolicy(Policy):
"""A single layer network that maps states to action probabilities."""
def build(self, scope, reuse=None):
self.scope = scope
with tf.variable_scope(scope, reuse=reuse):
# Placeholders
self.acs_ph = tf.placeholder(
shape=[None, None], dtype=tf.int32, name="acs")
self.obs_ph = tf.placeholder(
shape=[None, None, self.ob_size], dtype=tf.float32, name="obs")
self.rets_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32, name="rets")
self.avail_acs_ph = tf.placeholder(
shape=[None, None, self.num_actions],
dtype=tf.int32,
name="avail_acs")
self.values_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32, name="target_values")
self.gamma_ph = tf.placeholder(
shape=[1, 1], dtype=tf.float32, name="gamma_ph")
self.discount = tf.cumprod(
self.gamma_ph * tf.ones_like(self.rets_ph),
axis=0, exclusive=True, name="discount")
with tf.variable_scope("policy", reuse=reuse):
pol_lin = snt.Linear(1, use_bias=False)
logits = snt.BatchApply(pol_lin)(self.obs_ph)
pol_params = [pol_lin.w]
# logits, pol_params = Linear3D(1)(self.obs_ph)
logits = tf.concat([logits, tf.zeros_like(logits)], -1)
# Mask out unavailable actions
# MA: Not sure how that affects the gradients. Maybe better for
# the environment to mask out the actions?
mask = -9999999 * tf.ones_like(logits)
logits = tf.where(
tf.equal(self.avail_acs_ph, 1), x=logits, y=mask)
# Log probs and actions
self.log_pi = tf.nn.log_softmax(logits)
self.acs_onehot = tf.one_hot(
self.acs_ph, self.num_actions, dtype=tf.float32)
self.log_pi_acs = tf.reduce_sum(
tf.multiply(self.log_pi, self.acs_onehot), axis=-1)
self.log_pi_acs_cumsum = tf.cumsum(self.log_pi_acs, axis=0)
self.action = tf.squeeze(tf.multinomial(
tf.reshape(self.log_pi, shape=(-1, self.num_actions)), 1))
# Value
with tf.variable_scope("value", reuse=reuse):
val_lin = snt.Linear(1, use_bias=True)
self.vpred = snt.BatchApply(val_lin)(self.obs_ph)
self.vpred = tf.squeeze(self.vpred)
val_params = [val_lin.w, val_lin.b]
# Parameters
self._params += pol_params + val_params
class MLPPolicy(Policy):
"""A feed-forward network with one or multiple hidden layers."""
def __init__(self, ob_size, num_actions, hidden_sizes=[16], prev=None):
super(MLPPolicy, self).__init__(ob_size, num_actions, prev=prev)
self.hidden_sizes = hidden_sizes
def build(self, scope, reuse=None):
self.scope = scope
with tf.variable_scope(scope, reuse=reuse):
# Placeholders
self.acs_ph = tf.placeholder(
shape=[None, None], dtype=tf.int32)
self.obs_ph = tf.placeholder(
shape=[None, None, self.ob_size], dtype=tf.float32)
self.rets_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32)
self.avail_acs_ph = tf.placeholder(
shape=[None, None, self.num_actions], dtype=tf.int32)
self.values_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32, name="target_values")
self.gamma_ph = tf.placeholder(
shape=[1, 1], dtype=tf.float32, name="gamma_ph")
self.discount = tf.cumprod(
self.gamma_ph * tf.ones_like(self.rets_ph),
axis=0, exclusive=True, name="discount")
with tf.variable_scope("policy", reuse=reuse):
# Hidden layers
pol_params = []
last = self.obs_ph
for i, units in enumerate(self.hidden_sizes):
pol_lin = snt.Linear(units, name="h_%d" % i)
last = snt.BatchApply(pol_lin)(last)
last = tf.nn.relu(last)
pol_params += [pol_lin.w, pol_lin.b]
pol_lin = snt.Linear(self.num_actions)
logits = snt.BatchApply(pol_lin)(last)
pol_params += [pol_lin.w, pol_lin.b]
# Mask out unavailable actions
# MA: Not sure how that affects the gradients. Maybe better for
# the environment to mask out the actions?
mask = -9999999 * tf.ones_like(logits)
logits = tf.where(
tf.equal(self.avail_acs_ph, 1), x=logits, y=mask)
# Log probs and actions
self.log_pi = tf.nn.log_softmax(logits)
self.acs_onehot = tf.one_hot(
self.acs_ph, self.num_actions, dtype=tf.float32)
self.log_pi_acs = tf.reduce_sum(
tf.multiply(self.log_pi, self.acs_onehot), axis=-1)
self.log_pi_acs_cumsum = tf.cumsum(self.log_pi_acs, axis=0)
self.action = tf.squeeze(tf.multinomial(
tf.reshape(self.log_pi, shape=(-1, self.num_actions)), 1))
# Value
with tf.variable_scope("value", reuse=reuse):
val_params = []
last = self.obs_ph
for i, units in enumerate(self.hidden_sizes):
val_lin = snt.Linear(units, name="h_%d" % i)
last = snt.BatchApply(val_lin)(last)
last = tf.nn.relu(last)
val_params += [val_lin.w, val_lin.b]
val_lin = snt.Linear(1)
self.vpred = snt.BatchApply(val_lin)(last)
self.vpred = tf.squeeze(self.vpred)
val_params += [val_lin.w, val_lin.b]
# Parameters
self._params += pol_params + val_params
class RecurrentPolicy(Policy):
"""A recurrent network with one or multiple hidden layers."""
def __init__(self, ob_size, num_actions, hidden_sizes=[16], prev=None):
super(MLPPolicy, self).__init__(ob_size, num_actions, prev=prev)
self.hidden_sizes = hidden_sizes
def build(self, scope, reuse=None):
self.scope = scope
with tf.variable_scope(scope, reuse=reuse):
# Placeholders
self.acs_ph = tf.placeholder(
shape=[None, None], dtype=tf.int32)
self.obs_ph = tf.placeholder(
shape=[None, None, self.ob_size], dtype=tf.float32)
self.rets_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32)
self.avail_acs_ph = tf.placeholder(
shape=[None, None, self.num_actions], dtype=tf.int32)
self.values_ph = tf.placeholder(
shape=[None, None], dtype=tf.float32, name="target_values")
self.gamma_ph = tf.placeholder(
shape=[1, 1], dtype=tf.float32, name="gamma_ph")
self.discount = tf.cumprod(
self.gamma_ph * tf.ones_like(self.rets_ph),
axis=0, exclusive=True, name="discount")
with tf.variable_scope("policy", reuse=reuse):
# Hidden layers
pol_params = []
last = self.obs_ph
for i, units in enumerate(self.hidden_sizes):
pol_lin = snt.Linear(units, name="h_%d" % i)
last = snt.BatchApply(pol_lin)(last)
last = tf.nn.relu(last)
pol_params += [pol_lin.w, pol_lin.b]
pol_lin = snt.Linear(self.num_actions)
logits = snt.BatchApply(pol_lin)(last)
pol_params += [pol_lin.w, pol_lin.b]
# Mask out unavailable actions
# MA: Not sure how that affects the gradients. Maybe better for
# the environment to mask out the actions?
mask = -9999999 * tf.ones_like(logits)
logits = tf.where(
tf.equal(self.avail_acs_ph, 1), x=logits, y=mask)
# Log probs and actions
self.log_pi = tf.nn.log_softmax(logits)
self.acs_onehot = tf.one_hot(
self.acs_ph, self.num_actions, dtype=tf.float32)
self.log_pi_acs = tf.reduce_sum(
tf.multiply(self.log_pi, self.acs_onehot), axis=-1)
self.log_pi_acs_cumsum = tf.cumsum(self.log_pi_acs, axis=0)
self.action = tf.squeeze(tf.multinomial(
tf.reshape(self.log_pi, shape=(-1, self.num_actions)), 1))
# Value
with tf.variable_scope("value", reuse=reuse):
val_params = []
last = self.obs_ph
for i, units in enumerate(self.hidden_sizes):
val_lin = snt.Linear(units, name="h_%d" % i)
last = snt.BatchApply(val_lin)(last)
last = tf.nn.relu(last)
val_params += [val_lin.w, val_lin.b]
val_lin = snt.Linear(1)
self.vpred = snt.BatchApply(val_lin)(last)
self.vpred = tf.squeeze(self.vpred)
val_params += [val_lin.w, val_lin.b]
# Parameters
self._params += pol_params + val_params
```
#### File: lola/envs/matching_pennies.py
```python
import gym
import numpy as np
from gym.spaces import Discrete, Tuple
from .common import OneHot
class IteratedMatchingPennies(gym.Env):
"""
A two-agent vectorized environment for the Matching Pennies game.
"""
NAME = 'IMP'
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = 5
def __init__(self, max_steps):
self.max_steps = max_steps
self.payout_mat = np.array([[1, -1],[-1, 1]])
self.action_space = \
Tuple([Discrete(self.NUM_ACTIONS), Discrete(self.NUM_ACTIONS)])
self.observation_space = \
Tuple([OneHot(self.NUM_STATES), OneHot(self.NUM_STATES)])
self.step_count = None
def reset(self):
self.step_count = 0
init_state = np.zeros(self.NUM_STATES)
init_state[-1] = 1
observations = [init_state, init_state]
return observations
def step(self, action):
ac0, ac1 = action
self.step_count += 1
rewards = [self.payout_mat[ac1][ac0], -self.payout_mat[ac1][ac0]]
state = np.zeros(self.NUM_STATES)
state[ac0 * 2 + ac1] = 1
observations = [state, state]
done = (self.step_count == self.max_steps)
return observations, rewards, done
```
#### File: lola/envs/prisoners_dilemma.py
```python
import gym
import numpy as np
from gym.spaces import Discrete, Tuple
from .common import OneHot
class IteratedPrisonersDilemma(gym.Env):
"""
A two-agent vectorized environment for the Prisoner's Dilemma game.
Possible actions for each agent are (C)ooperate and (D)efect.
"""
NAME = 'IPD'
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = 5
def __init__(self, max_steps):
self.max_steps = max_steps
self.payout_mat = np.array([[-1., 0.], [-3., -2.]])
self.action_space = \
Tuple([Discrete(self.NUM_ACTIONS), Discrete(self.NUM_ACTIONS)])
self.observation_space = \
Tuple([OneHot(self.NUM_STATES), OneHot(self.NUM_STATES)])
self.step_count = None
def reset(self):
self.step_count = 0
init_state = np.zeros(self.NUM_STATES)
init_state[-1] = 1
observations = [init_state, init_state]
return observations
def step(self, action):
ac0, ac1 = action
self.step_count += 1
rewards = [self.payout_mat[ac1][ac0], self.payout_mat[ac0][ac1]]
state = np.zeros(self.NUM_STATES)
state[ac0 * 2 + ac1] = 1
observations = [state, state]
done = (self.step_count == self.max_steps)
return observations, rewards, done
```
#### File: lola/scripts/run_lola_dice.py
```python
import click
import tensorflow as tf
from lola_dice.envs import IPD
from lola_dice.policy import SimplePolicy, MLPPolicy, RecurrentPolicy
from lola_dice.rpg import train
@click.command()
@click.option("--use-dice/--no-dice", default=True,
help="Whether to use the DiCE operator in the policy objective.")
@click.option("--use-opp-modeling/--no-opp-modeling", default=False,
help="Whether to use opponent modeling.")
@click.option("--batch-size", default=64)
@click.option("--epochs", default=200)
@click.option("--runs", default=5)
@click.option("--save-dir", default="results_ipd")
def main(use_dice, use_opp_modeling, epochs, batch_size, runs, save_dir):
n_agents = 2
env = IPD(max_steps=150, batch_size=batch_size)
def make_simple_policy(ob_size, num_actions, prev=None, root=None):
return SimplePolicy(ob_size, num_actions, prev=prev)
def make_mlp_policy(ob_size, num_actions, prev=None):
return MLPPolicy(ob_size, num_actions, hidden_sizes=[64], prev=prev)
def make_sgd_optimizer(*, lr):
return tf.train.GradientDescentOptimizer(learning_rate=lr)
for r in range(runs):
print("-" * 10, "Run: %d/%d" % (r + 1, runs), "-" * 10)
train(env, make_simple_policy, make_sgd_optimizer,
epochs=epochs,
gamma=.96,
lr_inner=.1,
lr_outer=.2,
lr_value=.1,
lr_om=.1,
inner_asymm=True,
n_agents=n_agents,
n_inner_steps=2,
value_batch_size=16,
value_epochs=0,
om_batch_size=16,
om_epochs=0,
use_baseline=False,
use_dice=use_dice,
use_opp_modeling=use_opp_modeling,
save_dir='%s/run-%d' % (save_dir, r + 1))
if __name__ == '__main__':
main()
``` |
{
"source": "jleni/marLo",
"score": 2
} |
#### File: marLo/marlo/base_env_builder.py
```python
import time
import json
import gym
import numpy as np
import marlo
from marlo import MalmoPython
import uuid
import hashlib
import base64
import xml.etree.ElementTree as ElementTree
import traceback
from jinja2 import Environment as jinja2Environment
from jinja2 import FileSystemLoader as jinja2FileSystemLoader
import logging
logger = logging.getLogger(__name__)
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class TurnState(object):
def __init__(self):
self._turn_key = None
self._has_played = False
def update(self, key):
self._has_played = False
self._turn_key = key
@property
def can_play(self):
return self._turn_key is not None and not self._has_played
@property
def key(self):
return self._turn_key
@property
def has_played(self):
return self._has_played
@has_played.setter
def has_played(self, value):
self._has_played = bool(value)
class MarloEnvBuilderBase(gym.Env):
"""Base class for all Marlo environment builders
All the individual ``MarloEnvBuilder`` classes
(for example: :class:`marlo.envs.DefaultWorld.main.MarloEnvBuilder`)
derive from this class.
This class provides all the necessary functions for the
lifecycle management of a MarLo environment.
"""
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, templates_folder):
super(MarloEnvBuilderBase, self).__init__()
self.templates_folder = templates_folder
self.setup_templating()
self._default_base_params = False
self.agent_host = MalmoPython.AgentHost()
self.mission_spec = None
self.client_pool = None
self.experiment_id = None
self._turn = None
def setup_templating(self):
"""
Sets up the basic ``jinja2`` templating fileloader and
environments.
The ``MarloEnvBuilder`` classes, expect the following variables
to be available to them for rendering the ``MissionSpec``
- ``self.jinja2_fileloader``
- ``self.jinj2_env``
"""
self.jinja2_fileloader = jinja2FileSystemLoader(self.templates_folder)
self.jinj2_env = jinja2Environment(loader=self.jinja2_fileloader)
def render_mission_spec(self):
"""
This function looks for a ``mission.xml`` template inside the
``templates`` folder, and renders it using ``jinja2``.
This can very well be overriden by ``MarloEnvBuilder`` if required.
"""
template = self.jinj2_env.get_template("mission.xml")
return template.render(
params=self.params
)
@property
def white_listed_join_params(self):
"""
This returns a list of whitelisted game parameters which can be
modified when joining a game by using :meth:`marlo.init`.
"""
return marlo.JOIN_WHITELISTED_PARAMS
@property
def default_base_params(self):
"""
The **default game parametes** for all MarLo environments. These can be
modified by either overriding this class in
:class:`marlo.envs.DefaultWorld.main.MarloEnvBuilder` or implementing
a `_default_params` function in the derived class.
The default parameters are as follows :
:param seed: Seed for the random number generator (Default : ``random``). (**Note** This is not properly integrated yet.)
:type seed: int
:param tick_length: length of a single in-game tick (in milliseconds) (Default : ``50``)
:type tick_length: int
:param role: Game Role with which the current agent should join. (Default : ``0``)
:type role: int
:param experiment_id: A unique alphanumeric id for a single game. This is used to validate the session that an agent is joining. (Default : ``random_experiment_id``).
:type experiment_id: str
:param client_pool: A `list` of `tuples` representing the Minecraft client_pool the current agent can try to join. (Default : ``[('127.0.0.1', 10000)]``)
:type client_pool: list
:param agent_names: A `list` of names for the agents that are expected to join the game. This is used by the templating system to add an appropriate number of agents. (Default : ``["MarLo-Agent-0"]``)
:type client_pool: list
:param max_retries: Maximum Number of retries when trying to connect to a client_pool to start a mission. (Default : ``30``)
:type max_retries: int
:param retry_sleep: Time (in seconds) that the execution should sleep between retries for starting a mission. (Default: ``3``)
:type retry_sleep: float
:param step_sleep: Time (in seconds) to sleep when trying to obtain the latest world state. (Default: ``0.001``)
:type step_sleep: float
:param skip_steps: Number of observation steps to skip everytime we attempt to the latest world_state. (Default: ``0``)
:type skip_steps: int
:param videoResolution: Resolution of the frame that is expected as the RGB observation. (Default: ``[800, 600]``)
:type videoResolution: list
:param videoWithDepth: If the depth channel should also be added to the observation. (Default: ``False`` )
:type videoWithDepth: bool
:param observeRecentCommands: If the Recent Commands should be included in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeRecentCommands: bool
:param observeHotBar: If the HotBar information should be included in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeHotBar: bool
:param observeFullInventory: If the FullInventory information should be included in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeFullInventory: bool
:param observeGrid: Asks for observations of the block types within a cuboid relative to the agent's position in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeGrid: bool, list
:param observeDistance: Asks for the Euclidean distance to a location to be included in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeDistance: bool, list
:param observeChat: If the Chat information should be included in the auxillary observation available through ``info['observation']``. (Default: ``False``)
:type observeChat: bool
:param continuous_to_discrete: Converts continuous actions to discrete. when allowed continuous actions are 'move' and 'turn', then discrete action space contains 4 actions: move -1, move 1, turn -1, turn 1. (Default : ``True``)
:type continuous_to_discrete: bool
:param allowContinuousMovement: If all continuous movement commands should be allowed. (Default : ``True``)
:type allowContinuousMovement: bool
:param allowDiscreteMovement: If all discrete movement commands should be allowed. (Default : ``True``)
:type allowDiscreteMovement: bool
:param allowAbsoluteMovement: If all absolute movement commands should be allowed. (Default : ``False``) (**Not Implemented**)
:type allowAbsoluteMovement: bool
:param add_noop_command: If a ``noop`` (``move 0\\nturn 0``) command should be added to the actions. (Default : ``True``)
:type add_noop_command: bool
:param recordDestination: Destination where Mission Records should be stored. (Default : ``None``)
:type recordDestination: str
:param recordObservations: If Observations should be recorded in the ``MissionRecord``s. (Default : ``None``)
:type recordObservations: bool
:param recordRewards: If Rewards should be recorded in the ``MissionRecord``s. (Default : ``None``)
:type recordRewards: bool
:param recordCommands: If Commands (actions) should be recorded in the ``MissionRecord``s. (Default : ``None``)
:type recordCommands: bool
:param recordMP4: If a MP4 should be recorded in the ``MissionRecord``, and if so, the specifications as : ``[frame_rate, bit_rate]``. (Default : ``None``)
:type recordMP4: list
:param gameMode: The Minecraft gameMode for this particular game. One of ``['spectator', 'creative', 'survival']``. (Default: ``survival``)
:type gameMode: str
:param forceWorldReset: Force world reset on every reset. Makes sense only in case of environments with inherent stochasticity (Default: ``False``)
:type forceWorldReset: bool
:param turn_based: Specifies if the current game is a turn based game. (Default : ``False``)
:type turn_based: bool
"""
if not self._default_base_params:
self._default_base_params = dotdict(
seed="random",
tick_length=50,
role=0,
experiment_id="random_experiment_id",
client_pool = [('127.0.0.1', 10000)],
agent_names = ["MarLo-Agent-0"],
max_retries=30,
retry_sleep=3,
step_sleep=0.001,
skip_steps=0,
videoResolution=[800, 600],
videoWithDepth=None,
observeRecentCommands=None,
observeHotBar=None,
observeFullInventory=None,
observeGrid=None,
observeDistance=None,
observeChat=None,
continuous_to_discrete=True,
allowContinuousMovement=True,
allowDiscreteMovement=True,
allowAbsoluteMovement=False,
add_noop_command=True,
recordDestination=None,
recordObservations=None,
recordRewards=None,
recordCommands=None,
recordMP4=None,
gameMode="survival",
forceWorldReset=False,
turn_based=False,
)
return self._default_base_params
def setup_video(self, params):
"""
Setups up the Video Requests for an environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Video
############################################################
if params.videoResolution:
if params.videoWithDepth:
self.mission_spec.requestVideoWithDepth(
*params.videoResolution
)
else:
self.mission_spec.requestVideo(*params.videoResolution)
def setup_observe_params(self, params):
"""
Setups up the Auxillary Observation Requests for an environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup observe<>*
############################################################
if params.observeRecentCommands:
self.mission_spec.observeRecentCommands()
if params.observeHotBar:
self.mission_spec.observeHotBar()
if params.observeFullInventory:
self.mission_spec.observeFullInventory()
if params.observeGrid:
self.mission_spec.observeGrid(*(params.observeGrid + ["grid"]))
if params.observeDistance:
self.mission_spec.observeDistance(
*(params.observeDistance + ["dist"])
)
if params.observeChat:
self.mission_spec.observeChat()
def setup_action_commands(self, params):
"""
Setups up the Action Commands for the current agent interacting with the environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Action Commands
############################################################
if params.allowContinuousMovement or params.allowAbsoluteMovement or \
params.allowDiscreteMovement:
# Remove all command handlers
self.mission_spec.removeAllCommandHandlers()
# ContinousMovement commands
if isinstance(params.allowContinuousMovement, list):
for _command in params.allowContinuousMovement:
self.mission_spec.allowContinuousMovementCommand(_command)
elif params.allowContinuousMovement is True:
self.mission_spec.allowAllContinuousMovementCommands()
# AbsoluteMovement commands
if isinstance(params.allowAbsoluteMovement, list):
for _command in params.allowAbsoluteMovement:
self.mission_spec.allowAbsoluteMovementCommand(_command)
elif params.allowAbsoluteMovement is True:
self.mission_spec.allowAllAbsoluteMovementCommands()
# DiscreteMovement commands
if isinstance(params.allowDiscreteMovement, list):
for _command in params.allowDiscreteMovement:
self.mission_spec.allowDiscreteMovementCommand(_command)
elif params.allowDiscreteMovement is True:
self.mission_spec.allowAllDiscreteMovementCommands()
def setup_observation_space(self, params):
"""
Setups up the Observation Space for an environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Observation Space
############################################################
self.video_height = self.mission_spec.getVideoHeight(0)
self.video_width = self.mission_spec.getVideoWidth(0)
self.video_depth = self.mission_spec.getVideoChannels(0)
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(self.video_height, self.video_width, self.video_depth),
dtype=np.uint8
)
# Setup a dummy first image
self.last_image = np.zeros(
(self.video_height, self.video_width, self.video_depth),
dtype=np.uint8
)
def setup_action_space(self, params):
"""
Setups up the action space for the current agent interacting with the environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Action Space
############################################################
continuous_actions = []
discrete_actions = []
multidiscrete_actions = []
multidiscrete_action_ranges = []
if params.add_noop_command:
discrete_actions.append("move 0\nturn 0")
command_handlers = self.mission_spec.getListOfCommandHandlers(0)
for command_handler in command_handlers:
commands = self.mission_spec.getAllowedCommands(0, command_handler)
for command in commands:
logger.debug("Command : {}".format(command))
if command_handler == "ContinuousMovement":
if command in ["move", "strafe", "pitch", "turn"]:
if params.continuous_to_discrete:
discrete_actions.append(command + " 1")
discrete_actions.append(command + " -1")
else:
continuous_actions.append(command)
elif command in ["crouch", "jump", "attack", "use"]:
if params.continuous_to_discrete:
discrete_actions.append(command + " 1")
discrete_actions.append(command + " 0")
else:
multidiscrete_actions.append(command)
multidiscrete_action_ranges.append([0, 1])
else:
raise ValueError(
"Unknown continuois action : {}".format(command)
)
elif command_handler == "DiscreteMovement":
if command in marlo.SINGLE_DIRECTION_DISCRETE_MOVEMENTS:
discrete_actions.append(command + " 1")
elif command in marlo.MULTIPLE_DIRECTION_DISCRETE_MOVEMENTS:
discrete_actions.append(command + " 1")
discrete_actions.append(command + " -1")
else:
raise ValueError(
"Unknown discrete action : {}".format(command)
)
elif command_handler in ["AbsoluteMovement", "Inventory"]:
logger.warn(
"Command Handler `{}` Not Implemented".format(
command_handler
)
)
else:
raise ValueError(
"Unknown Command Handler : `{}`".format(
command_handler
)
)
# Convert lists into proper gym action spaces
self.action_names = []
self.action_spaces = []
# Discrete Actions
if len(discrete_actions) > 0:
self.action_spaces.append(
gym.spaces.Discrete(len(discrete_actions))
)
self.action_names.append(discrete_actions)
# Continuous Actions
if len(continuous_actions) > 0:
self.action_spaces.append(
gym.spaces.Box(-1, 1, (len(continuous_actions),))
)
self.action_names.append(continuous_actions)
if len(multidiscrete_actions) > 0:
self.action_spaces.append(
gym.spaces.MultiDiscrete(multidiscrete_action_ranges)
)
self.action_names.append(multidiscrete_actions)
# No tuples in case a single action
if len(self.action_spaces) == 1:
self.action_space = self.action_spaces[0]
else:
self.action_space = gym.spaces.Tuple(self.action_space)
def setup_client_pool(self, params):
"""
Setups up the ``client_pool`` for the current environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Client Pool
############################################################
if not params.client_pool:
logger.warn("No client pool provided, attempting to create "
"a client_pool of the correct size")
number_of_agents = self.mission_spec.getNumberOfAgents()
params.client_pool = marlo.launch_clients(number_of_agents)
self.client_pool = MalmoPython.ClientPool()
for _client in params.client_pool:
self.client_pool.add(MalmoPython.ClientInfo(*_client))
if not isinstance(params.client_pool, list):
raise ValueError("params.client_pool must be a list of tuples"
"of (ip_address, port)")
def setup_mission_record(self, params):
"""
Setups up the ``mission_record`` for the current environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Mission Record
############################################################
self.mission_record_spec = MalmoPython.MissionRecordSpec() # empty
if params.recordDestination:
self.mission_record_spec.setDestination(params.recordDestination)
if params.recordRewards:
self.mission_record_spec.recordRewards()
if params.recordCommands:
self.mission_record_spec.recordCommands()
if params.recordMP4:
assert type(params.recordMP4) == list \
and len(params.recordMP4) == 2
self.mission_record_spec.recordMP4(*(params.recordMP4))
else:
if params.recordRewards or params.recordCommands or params.recordMP4:
raise Exception("recordRewards or recordCommands or recordMP4 "
"provided without specifyin recordDestination")
def setup_game_mode(self, params):
"""
Setups up the ``gameMode`` for the current environment.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Setup Game Mode
############################################################
if params.gameMode:
if params.gameMode == "spectator":
self.mission_spec.setModeToSpectator()
elif params.gameMode == "creative":
self.mission_spec.setModeToCreative()
elif params.gameMode == "survival":
logger.info("params.gameMode : Cannot force survival mode.")
else:
raise Exception("Unknown params.gameMode : {}".format(
params.gameMode
))
def setup_mission_spec(self, params):
"""
Generates and setups the first MissionSpec as generated by :meth:`render_mission_spec`.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
############################################################
# Instantiate Mission Spec
############################################################
mission_xml = self.render_mission_spec()
self.mission_spec = MalmoPython.MissionSpec(mission_xml, True)
def setup_turn_based_games(self, params):
"""
Setups up a ``turn_based`` game.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
"""
if params.turn_based:
self._turn = TurnState()
def init(self, params, dry_run=False):
"""
Generates the join tokens for all the agents in a game based on the provided game params.
:param params: Marlo Game Parameters as described in :meth:`default_base_params`
:type params: dict
:param dry_run: If the current execution is a ``dry_run``
:type dry_run: bool
:returns: List of join_tokens, one join_token for every agent in the game.
:rtype: list
"""
self.params.update(params)
self.dry_run = dry_run
self.build_env(self.params)
number_of_agents = self.mission_spec.getNumberOfAgents()
mission_xml = self.mission_spec.getAsXML(False)
join_tokens = []
experiment_id = str(uuid.uuid4())
for _idx in range(number_of_agents):
_join_token = {}
_join_token["role"] = _idx
_join_token["mission_xml"] = mission_xml
_join_token["experiment_id"] = experiment_id
_join_token["game_params"] = self.params
_join_token = base64.b64encode(
json.dumps(_join_token).encode('utf8')
)
join_tokens.append(_join_token)
return join_tokens
def build_env(self, params):
self.setup_mission_spec(params)
self.setup_turn_based_games(params)
self.setup_video(params)
self.setup_observe_params(params)
self.setup_action_commands(params)
self.setup_observation_space(params)
self.setup_action_space(params)
self.setup_client_pool(params)
self.setup_mission_record(params)
self.setup_game_mode(params)
########################################################################
# Env interaction functions
########################################################################
def reset(self):
if self.params.forceWorldReset:
# Force a World Reset on each reset
self.mission_spec.forceWorldReset()
# Attempt to start a mission
for retry in range(self.params.max_retries + 1):
logger.debug("RETRY : {}".format(retry))
# Role 0 (the server) could take some extra time to start
if self.params.role != 0:
time.sleep(1)
else:
time.sleep(0.1)
if self.params.experiment_id:
self.experiment_id = self.params.experiment_id
try:
if not self.client_pool:
raise Exception("client_pool not specified.")
self.agent_host.startMission(
self.mission_spec,
self.client_pool,
self.mission_record_spec,
self.params.role,
self.experiment_id
)
break #Break out of the try-to-connect loop
except RuntimeError as e:
traceback.format_exc()
if retry == self.params.max_retries:
logger.error("Error Starting Mission : {}".format(
traceback.format_exc()
))
raise e
else:
logger.warn("Error on attempting to start mission : {}"
.format(str(e)))
logger.warn("Will attempt again after {} seconds."
.format(self.params.retry_sleep))
time.sleep(self.params.retry_sleep)
logger.info("Waiting for mission to start...")
world_state = self.agent_host.getWorldState()
while not world_state.has_mission_begun:
time.sleep(0.1)
world_state = self.agent_host.getWorldState()
for error in world_state.errors:
logger.error("Error", error)
logger.warn(error.text)
logger.info("Mission Running")
frame = self._get_video_frame(world_state)
return frame
def _get_world_state(self):
# patiently wait till we get the next observation
while True:
time.sleep(self.params.step_sleep)
world_state = self.agent_host.peekWorldState()
if world_state.number_of_observations_since_last_state > \
self.params.skip_steps or not world_state.is_mission_running:
break
return self.agent_host.getWorldState()
def _get_video_frame(self, world_state):
if world_state.number_of_video_frames_since_last_state > 0:
assert len(world_state.video_frames) == 1
frame = world_state.video_frames[0]
image = np.frombuffer(frame.pixels, dtype=np.uint8)
image = image.reshape((frame.height, frame.width, frame.channels))
print("Frame Receieved : ".format(image.shape))
self.last_image = image
else:
# can happen only when mission ends before we get frame
# then just use the last frame, it doesn't matter much anyway
image = self.last_image
return image
def _get_observation(self, world_state):
if world_state.number_of_observations_since_last_state > 0:
missed = world_state.number_of_observations_since_last_state \
- len(world_state.observations) - self.params.skip_steps
if missed > 0:
logger.warn("Agent missed %d observation(s).", missed)
assert len(world_state.observations) == 1
return json.loads(world_state.observations[0].text)
else:
return None
def _send_command(self, command):
if self._turn:
self.agent_host.sendCommand(command, self._turn.key)
self._turn.has_payed = True
else:
logger.debug("Send Command : {}".format(command))
self.agent_host.sendCommand(command)
def _take_action(self, actions):
# no tuple in case of a single action
if len(self.action_spaces) == 1:
actions = [actions]
if self._turn:
if not self._turn.can_play:
return
# send corresponding command
for _spaces, _commands, _actions in \
zip(self.action_spaces, self.action_names, actions):
if isinstance(_spaces, gym.spaces.Discrete):
logger.debug(_commands[_actions])
# print("cmd " + cmds[acts])
self._send_command(_commands[_actions])
elif isinstance(_spaces, gym.spaces.Box):
for command, value in zip(_commands, _actions):
_command = "{}-{}".format(command, value)
logger.debug(_command)
self._send_command(_command)
elif isinstance(_spaces, gym.spaces.MultiDiscrete):
for command, value in zip(_commands, _actions):
_command = "{}-{}".format(command, value)
logger.debug(_command)
self._send_command(_command)
else:
logger.warn("Ignoring unknown action space for {}".format(
_commands
))
def step(self, action):
world_state = self.agent_host.peekWorldState()
if world_state.is_mission_running:
self._take_action(action)
world_state = self._get_world_state()
# Update turn state
if world_state.number_of_observations_since_last_state > 0:
data = json.loads(world_state.observations[-1].text)
turn_key = data.get(u'turn_key', None)
if turn_key is not None and turn_key != self._turn.key:
self._turn.update(turn_key)
# Log
for error in world_state.errors:
logger.warn(error.text)
for message in world_state.mission_control_messages:
logger.debug(message.text)
root = ElementTree.fromstring(message.text)
if root.tag == '{http://ProjectMalmo.microsoft.com}MissionEnded':
for el in root.findall(
'{http://ProjectMalmo.microsoft.com}HumanReadableStatus' # noqa: E501
):
logger.info("Mission ended: %s", el.text)
# Compute Rewards
reward = 0
for _reward in world_state.rewards:
print(_reward)
reward += _reward.getValue()
# Get observation
image = self._get_video_frame(world_state)
# detect if done ?
done = not world_state.is_mission_running
# gather info
info = {}
info['has_mission_begun'] = world_state.has_mission_begun
info['is_mission_running'] = world_state.is_mission_running
info['number_of_video_frames_since_last_state'] = world_state.number_of_video_frames_since_last_state # noqa: E501
info['number_of_rewards_since_last_state'] = world_state.number_of_rewards_since_last_state # noqa: E501
info['number_of_observations_since_last_state'] = world_state.number_of_observations_since_last_state # noqa: E501
info['mission_control_messages'] = [msg.text for msg in world_state.mission_control_messages] # noqa: E501
info['observation'] = self._get_observation(world_state)
return image, reward, done, info
def render(self, mode='rgb_array', close=False):
if mode == "rgb_array":
return self.last_image
elif mode == "human":
# TODO: Implement this
raise None
else:
raise NotImplemented("Render Mode not implemented : {}"
.format(mode))
def seed(self, seed=None):
self.mission_spec.setWorldSeed(str(seed))
return [seed]
``` |
{
"source": "jleni/pyscaffold",
"score": 2
} |
#### File: pyscaffold/extensions/tox.py
```python
from __future__ import absolute_import
from ..templates import tox as tox_ini
from ..api import Extension
from ..api import helpers
class Tox(Extension):
"""Generate Tox configuration file"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.add_files,
after='define_structure')
def add_files(self, struct, opts):
"""Add .tox.ini file to structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'tox.ini': (tox_ini(opts), helpers.NO_OVERWRITE)
}
return helpers.merge(struct, {opts['project']: files}), opts
```
#### File: src/pyscaffold/info.py
```python
from __future__ import absolute_import, print_function
import copy
import getpass
import os
import socket
from .contrib.six.moves import configparser
from .contrib.six import raise_from
from . import shell, utils
from .exceptions import (
ShellCommandException,
GitNotInstalled,
GitNotConfigured,
PyScaffoldTooOld,
NoPyScaffoldProject)
def username():
"""Retrieve the user's name
Returns:
str: user's name
"""
try:
user = next(shell.git("config", "--get", "user.name"))
user = user.strip()
except ShellCommandException:
user = getpass.getuser()
return utils.utf8_decode(user)
def email():
"""Retrieve the user's email
Returns:
str: user's email
"""
try:
email = next(shell.git("config", "--get", "user.email"))
email = email.strip()
except ShellCommandException:
user = getpass.getuser()
host = socket.gethostname()
email = "{user}@{host}".format(user=user, host=host)
return utils.utf8_decode(email)
def is_git_installed():
"""Check if git is installed
Returns:
bool: True if git is installed, False otherwise
"""
if shell.git is None:
return False
try:
shell.git("--version")
except ShellCommandException:
return False
return True
def is_git_configured():
"""Check if user.name and user.email is set globally in git
This will also return false if git is not available at all.
Returns:
bool: True if it is set globally, False otherwise
"""
try:
for attr in ["name", "email"]:
shell.git("config", "--get", "user.{}".format(attr))
except ShellCommandException:
return False
return True
def check_git():
"""Checks for git and raises appropriate exception if not
Raises:
:class:`~.GitNotInstalled`: when git command is not available
:class:`~.GitNotConfigured`: when git does not know user information
"""
if not is_git_installed():
raise GitNotInstalled
if not is_git_configured():
raise GitNotConfigured
def project(opts):
"""Update user options with the options of an existing PyScaffold project
Params:
opts (dict): options of the project
Returns:
dict: options with updated values
Raises:
:class:`~.PyScaffoldTooOld`: when PyScaffold is to old to update from
:class:`~.NoPyScaffoldProject`: when project was not generated with
PyScaffold
"""
from pkg_resources import iter_entry_points
opts = copy.deepcopy(opts)
try:
cfg = configparser.ConfigParser()
cfg.read(os.path.join(opts['project'], 'setup.cfg'))
if not cfg.has_section('pyscaffold'):
raise PyScaffoldTooOld
pyscaffold = cfg['pyscaffold']
metadata = cfg['metadata']
# This would be needed in case of inplace updates, see issue #138
# if opts['project'] == '.':
# opts['project'] = metadata['name']
# Overwrite only if user has not provided corresponding cli argument
opts.setdefault('package', pyscaffold['package'])
opts.setdefault('author', metadata['author'])
opts.setdefault('email', metadata['author-email'])
opts.setdefault('url', metadata['url'])
opts.setdefault('description', metadata['description'])
opts.setdefault('license', utils.best_fit_license(metadata['license']))
# Additional parameters compare with `get_default_options`
opts['classifiers'] = metadata['classifiers'].strip().split('\n')
opts['version'] = pyscaffold['version']
# complement the cli extensions with the ones from configuration
if 'extensions' in pyscaffold:
cfg_extensions = pyscaffold['extensions'].strip().split('\n')
opt_extensions = [ext.name for ext in opts['extensions']]
add_extensions = set(cfg_extensions) - set(opt_extensions)
for extension in iter_entry_points('pyscaffold.cli'):
if extension.name in add_extensions:
extension_obj = extension.load()(extension.name)
if extension.name in pyscaffold:
ext_value = pyscaffold[extension.name]
extension_obj.args = ext_value
opts[extension.name] = ext_value
opts['extensions'].append(extension_obj)
except Exception as e:
raise raise_from(NoPyScaffoldProject, e)
return opts
```
#### File: src/pyscaffold/termui.py
```python
from __future__ import absolute_import
import sys
ESCAPE = '\033[{:d}m'
STYLES = dict(
clear=0,
bold=1,
black=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
on_black=40,
on_red=41,
on_green=42,
on_yellow=43,
on_blue=44,
on_magenta=45,
on_cyan=46,
on_white=47
)
def isatty(stream=None):
"""Detect if the given stream/stdout is part of an interactive terminal.
Args:
stream: optionally the stream to check
Returns:
bool: result of check
"""
stream = stream or sys.stdout
if hasattr(stream, 'isatty'):
return stream.isatty()
return False
def init_colorama():
"""Initialize colorama if it is available.
Returns:
bool: result of check
"""
try:
import colorama # noqa
colorama.init()
return True
except ImportError:
return False
def curses_available():
"""Check if the curses package from stdlib is available.
Usually not available for windows, but its presence indicates that the
terminal is capable of displaying some UI.
Returns:
bool: result of check
"""
try:
import curses # noqa
return True
except ImportError:
return False
SYSTEM_SUPPORTS_COLOR = curses_available() or init_colorama()
# Eagerly executed, in order to avoid calling colorama.init multiple times
def supports_color(stream=None):
"""Check if the stream is supposed to handle coloring.
Returns:
bool: result of check
"""
return isatty(stream) and SYSTEM_SUPPORTS_COLOR
def decorate(msg, *styles):
"""Use ANSI codes to format the message.
Args:
msg (str): string to be formatted
*styles (list): the remaining arguments should be strings that
represent the 8 basic ANSI colors. ``clear`` and ``bold`` are also
supported. For background colors use ``on_<color>``.
Returns:
str: styled and formatted message
"""
if not styles:
return msg
styles = ''.join(ESCAPE.format(STYLES[s]) for s in styles if s in STYLES)
return styles + msg + ESCAPE.format(STYLES['clear'])
``` |
{
"source": "jlenn/movie-web-app",
"score": 2
} |
#### File: app/movies/views.py
```python
from django.shortcuts import render, redirect
from django.contrib import messages
from airtable import Airtable
import os
AT = Airtable(os.environ.get('AIRTABLE_MOVIESTABLE_BASE_ID'),
'Movies',
api_key=os.environ.get('AIRTABLE_API_KEY'))
# Create your views here.
def home_page(request):
user_query = str(request.GET.get('query', ''))
search_result = AT.get_all(formula="FIND('" + user_query.lower() + "', LOWER({Name}))")
stuff_for_frontend = {'search_result': search_result}
return render(request, 'movies/movies_stuff.html', stuff_for_frontend)
def create(request):
if request.method == 'POST':
data = {
'Name': request.POST.get('name'),
'Pictures': [{'url': request.POST.get('url') or 'https://www.classicposters.com/images/nopicture.gif'}],
'Rating': int(request.POST.get('rating')),
'Notes': request.POST.get('notes')
}
try:
response = AT.insert(data)
messages.success(request, 'New movie added: {}'.format(response['fields'].get('Name')))
except Exception as e:
messages.warning(request, 'Got an error when trying to add a movie: {}'.format(e))
return redirect('/')
def edit(request, movie_id):
if request.method == 'POST':
data= {
'Name': request.POST.get('name'),
'Pictures': [{'url': request.POST.get('url') or 'https://www.classicposters.com/images/nopicture.gif'}],
'Rating': int(request.POST.get('rating')),
'Notes': request.POST.get('notes'),
}
try:
response = AT.update(movie_id, data)
messages.success(request, 'Updated movie: {}'.format(response['fields'].get('Name')))
except Exception as e:
messages.warning(request, 'Got an error when trying to update a movie: {}'.format(e))
return redirect('/')
def delete(request, movie_id):
try:
movie_name = AT.get(movie_id)['fields'].get('Name')
AT.delete(movie_id)
messages.warning(request, 'Movie deleted: {}'.format(movie_name))
except Exception as e:
messages.warning(request, 'Got an error when trying to delete a movie: {}'.format(e))
return redirect('/')
``` |
{
"source": "jlennox/PicoPi",
"score": 3
} |
#### File: PicoPi/simon/simon.py
```python
import machine
import utime
import random
import _thread
import sh1106
import micropython
# Since there's multiple things that are logically grouped together, lets logically group them together.
class SimonIO:
def __init__(self, name, ledPin, buttonPin, buzzerFreq):
self.name = name
self.led = machine.Pin(ledPin, machine.Pin.OUT)
self.button = machine.Pin(buttonPin, machine.Pin.IN, machine.Pin.PULL_DOWN)
self.buzzerFreq = buzzerFreq
def buzz(self, enable):
if enable:
buzzer.duty_u16(1000)
buzzer.freq(self.buzzerFreq)
else:
buzzer.duty_u16(0)
def show(self, timeout):
self.led.on()
self.buzz(True)
utime.sleep(timeout)
self.led.off()
self.buzz(False)
# Configure our program so it knows what pins go where.
# All pin #'s are in the same place of the code to keep it centralized/organized.
pins = [
SimonIO("Green", 16, 17, 262), # 262 = freq for C4 note. These notes are from the C major scale.
SimonIO("Red", 18, 19, 294), # D4
SimonIO("Yellow", 20, 21, 330), # E4
SimonIO("Blue", 26, 27, 349), # F4
]
# Each one of these that's set to positive voltage will increase the difficulty.
difficultyPins = [
machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_DOWN),
machine.Pin(1, machine.Pin.IN, machine.Pin.PULL_DOWN),
]
buzzer = machine.PWM(machine.Pin(28))
displayi2c = machine.I2C(1, sda=machine.Pin(2), scl=machine.Pin(3), freq=800000)
# The display connects over I2C. The display has a command based language implemented
# on it's display controller chip (SH1106). Thankfully someone already wrote a MicrPython
# driver for it.
# https://github.com/robert-hh/SH1106
displayWidth = micropython.const(128)
displayHeight = micropython.const(64)
display = sh1106.SH1106_I2C(displayWidth, displayHeight, displayi2c, None, 60)
display.init_display()
display.contrast(255)
display.rotate(True)
display.invert(True)
display.poweron()
# Displays 2 rows of text centered on the screen.
def displayScore(label, wut):
# Blank out the old pixels
display.fill(0)
# Each character is 8 pixels wide. Since we want half that (we're centering)
# then we * 4 instead of * 8.
display.text(label, int(displayWidth / 2) - len(label) * 4, 30 - 6, 1)
# We're prone to giving numbers, so lets be sure it's a string.
wut = str(wut)
display.text(wut, int(displayWidth / 2) - len(wut) * 4, 30 + 6, 1)
display.show()
# Lets load the highscore.
highScore = int(0)
try:
with open("highscore.txt", "r") as highScoreFile:
highScoreStr = highScoreFile.read()
highScore = int("0" if highScoreStr == "" or highScoreStr is None else highScoreStr)
except:
pass
print("Found highscore: " + str(highScore))
# There's no escape!
while True:
# Reset game to initial state.
print("Reset.")
for pin in pins:
pin.led.off()
buzzer.duty_u16(0)
currentGame = []
currentScore = 0
# Run a sort of attract mode and show highscore.
displayScore("Highscore", highScore)
print("Running attract.")
for _ in range(2):
for pin in pins:
pin.led.on()
pin.buzz(True)
utime.sleep(.1)
for pin in reversed(pins):
pin.led.off()
pin.buzz(False)
utime.sleep(.1)
for _ in range(4):
for pin in pins:
pin.led.toggle()
utime.sleep(.1)
lost = False
# Each pin that's set reduces the amount of time throughout the program.
difficulty = 1.0
for difficultyPin in difficultyPins:
difficulty -= .4 if difficultyPin.value() == 1 else 0
# A base of 5 seconds.
inputTime = 5.0 * difficulty
print("Difficulty %f (inputTime: %f" % (difficulty, inputTime))
while not lost:
# Show their current score as they play
displayScore("Score", currentScore)
# Add a new entry.
newPin = random.choice(pins)
currentGame.append(newPin)
print("Adding new entry %s." % newPin.name)
# Replay the existing sequence
for entry in currentGame:
entry.show(.5 * difficulty)
utime.sleep(.2 * difficulty)
# Now they've got to enter that same sequence.
for entry in currentGame:
inputPin = None
# Loop until they enter something or inputTime elapses.
start = utime.time()
while inputPin is None and utime.time() - start < inputTime:
# Loop through the pins...
for pin in pins:
# ...and check each ones button to see if it's pushed.
if pin.button.value() == 1:
print("Button %s was pushed..." % pin.name)
# Give the real human player feedback that the button was pushed.
pin.buzz(True)
pin.led.on()
inputPin = pin
# Loop until they release the button.
while pin.button.value() == 1:
pass
pin.buzz(False)
pin.led.off()
print("...and released.")
# Sleep for a small amount of time. After testing, sometimes the button
# would register again.
utime.sleep(.2)
# Don't loop through the rest of the pins because we got a hit already.
break
utime.sleep(.01)
# They got it wrong! Or inputTime elapsed.
if inputPin is not entry:
print("They lost! Actual answer was %s." % entry.name)
# Blink the correct one in their stupid face.
entry.buzz(True)
for _ in range(12):
entry.led.toggle();
utime.sleep(.1)
entry.buzz(False)
# Start a new game, brings us back to the outer while loop.
lost = True
break
# Add a bit of a delay so it doesn't jump right into the replay when they release the button
utime.sleep(1)
currentScore += 1
if currentScore > highScore:
print("New highscore! " + str(currentScore))
highScore = currentScore
with open("highscore.txt", "w") as highScoreFile:
highScoreFile.write(str(highScore))
``` |
{
"source": "JLenssen/AttestationEngine",
"score": 2
} |
#### File: asvr/db/mqtt.py
```python
import paho.mqtt.client as mqtt
import a10.structures.identity
import a10.asvr.db.configuration
import threading
import time
def on_disconnect(client, userdata, rc):
logging.info("disconnecting reason " + str(rc))
client.connected_flag = False
client.disconnect_flag = True
def on_connect(client, metadata, flags, rc):
print("Connected mqtt: {}".format(rc))
def on_disconnect(client, metadata, flags, rc):
print("MQTT Disconnected")
try:
client.reconnect()
except:
print("Connection is fscked")
def publish(ch, t, op, data):
payload = str({"t": t, "op": op, "data": data})
mqttc.publish(ch, payload)
def sendKeepAlive():
print(
"Starting keepalive ping with rate ",
a10.asvr.db.configuration.MQTTKEEPALIVEPING,
)
while True:
print("ping!")
publish(
"AS/MQTTPING",
"ping",
"ping",
{"session": a10.asvr.db.configuration.ASSESSIONIDENTITY},
)
time.sleep(int(a10.asvr.db.configuration.MQTTKEEPALIVEPING))
print(a10.asvr.db.configuration.MQTTADDRESS)
#
# This is a bit nasty, but if two clients have the same name then the earlier one
# will be kicked off by the MQTT broker - at least in mosquitto
# So we will add the AS_Session_Identity and a UUID
#
id = (
a10.asvr.db.configuration.MQTTCLIENTNAME
+ "_"
+ a10.asvr.db.configuration.ASSESSIONIDENTITY
+ "_"
+ a10.structures.identity.generateID()
)
print("mqtt client id is ", id)
mqttc = mqtt.Client(id)
mqttc.on_connect = on_connect
mqttc.connect(a10.asvr.db.configuration.MQTTADDRESS, port=a10.asvr.db.configuration.MQTTPORT)
# KEEP ALIVE PING
print("Starting keep alive thead")
keepalivethread = threading.Thread(target=sendKeepAlive)
print("Keep alive thread ID is ", keepalivethread)
keepalivethread.start()
```
#### File: asvr/protocols/A10HttpRest.py
```python
import json
import requests
import subprocess
import tempfile
import secrets
import string
import base64
import a10.asvr.protocols.A10ProtocolBase
import a10.structures.constants
import a10.structures.returncode
class A10HttpRest(a10.asvr.protocols.A10ProtocolBase.A10ProtocolBase):
NAME = "A10HTTPREST"
def __init__(self, endpoint, policyintent, policyparameters, callparameters):
super().__init__(endpoint, policyintent, policyparameters, callparameters)
def exec(self):
print("1¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤")
# see the makecredential example for how to use this.
# basically to store data that shouldn't be transmitted to the element
# but needs to be persisted, eg: makecredetial's secret
transientdata = {}
# print(
# "Calling protocol A10HTTPREST ",
# self.endpoint,
# self.policyintent,
# self.policyparameters,
# self.callparameters,
# )
# print(
# " + ---------------Types ",
# type(self.endpoint),
# type(self.policyintent),
# type(self.policyparameters),
# type(self.callparameters),
# )
#
# Some intents require additional processing
#
if self.policyintent=="tpm2/credentialcheck":
c = self.makecredential()
if c==None:
return a10.structures.returncode.ReturnCode(
a10.structures.constants.PROTOCOLEXECUTIONFAILURE, {"msg": "Makecredential failed","transientdata":transientdata}
)
cred,secret = self.makecredential()
self.callparameters["credential"] = cred
transientdata["secret"]=secret
#
# Ok, now go on with the calling
#
elementURL = self.endpoint + "/" + self.policyintent
callbody = {
"policyparameters": self.policyparameters,
"callparameters": self.callparameters,
}
jsondata = json.dumps(callbody, ensure_ascii=False)
# note, we use POST because the body contains data, which is not part of the GET standard
try:
r = requests.post(
url=elementURL,
json=jsondata,
headers={"Content-type": "application/json", "Accept": "text/plain"},
timeout=30,
)
except requests.exceptions.ConnectionError as e:
return a10.structures.returncode.ReturnCode(
a10.structures.constants.PROTOCOLNETWORKFAILURE,
{"msg": "Network failure " + str(e),"transientdata":transientdata},
)
# This is already in JSON so ok
# print("RETURNING ",r,r.text,r.status_code)
# r.text is JSON but encoded as a strong,
# so we need to convert (load) it into a python dictionary if things went well
j = json.loads(r.text)
print("2¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤¤")
#
# Note we return a tuple of the data back from the element and the transient data
#
if r.status_code == 200:
return a10.structures.returncode.ReturnCode(
a10.structures.constants.PROTOCOLSUCCESS, {"claim":json.loads(r.text),"transientdata":transientdata}
)
else:
return a10.structures.returncode.ReturnCode(
a10.structures.constants.PROTOCOLEXECUTIONFAILURE, {"msg":json.loads(r.text),"transientdata":transientdata}
)
def makecredential(self):
print("\nmakecredential")
try:
ekpub = self.callparameters["ekpub"]
akname = self.callparameters["akname"]
except:
print("missing ekpub and/or akname ")
return None
# a bit of housekeeping
# store ek in temporary file
ektf = tempfile.NamedTemporaryFile()
ektf.write(bytes(ekpub, "utf-8"))
ektf.seek(0)
# generate secret
# This must be a maximum of 32 bytes for makecredential - it is possible that your TPM might vary, but 32 seems to be usual
alphabet = string.ascii_letters + string.digits
secret = "".join(secrets.choice(alphabet) for i in range(30))
print("Secret is ", secret)
secf = tempfile.NamedTemporaryFile()
secf.write(bytes(secret, "ascii"))
secf.seek(0)
# temporary file for credential
credf = tempfile.NamedTemporaryFile()
# makecredential
#
# assuming no local TPM with -T none --- might change this one day
# given that the tools need to be available .... or pytss.
# OK, maybe not such a bad thing that the AE runs on a device with a TPM or at least the tools
# Will be necessary for using tpm2_send in the other protocol
#
try:
out=""
cmd = (
"tpm2_makecredential -T none"
+ " -s "
+ secf.name
+ " -u "
+ ektf.name
+ " -n "
+ akname
+ " -G rsa -o "
+ credf.name
)
out = subprocess.check_output(cmd.split())
except:
print("tpm2_makecredential failed "+out)
return None
# read the credential
credf.seek(0)
cred = base64.b64encode(credf.read()).decode("utf-8")
# cleanup
ektf.close()
secf.close()
credf.close()
# return
return cred,secret
```
#### File: nut10/endpoints/ima_endpoint.py
```python
from flask import Blueprint, jsonify
import json
import datetime
import base64
from claims import claimstructure
ima_endpoint = Blueprint("ima_endpoint", __name__)
@ima_endpoint.route("/measurements", methods=["GET", "POST"])
def returnIMALOG():
c = claimstructure.Claim()
c.addHeaderItem("ta_received", str(datetime.datetime.now(datetime.timezone.utc)))
try:
f = open("/sys/kernel/security/ima/ascii_runtime_measurements","r")
imalog = f.read()
#eventlog_enc = base64.b85encode(eventlog).decode("utf-8")
#c.addPayloadItem("encoding", "base85/utf-8")
#c.addPayloadItem("eventlog", eventlog_enc)
c.addPayloadItem("size",len(imalog))
c.addPayloadItem("logfile","/sys/kernel/security/ima/ascii_runtime_measurements")
c.addPayloadItem("imalog",imalog)
#c.addPayloadItem("sizeencoded",len(eventlog_enc))
f.close()
except Exception as e:
c.addPayloadItem("error", str(e))
c.addHeaderItem("ta_complete", str(datetime.datetime.now(datetime.timezone.utc)))
rc = c.getClaim()
return jsonify(rc), 200
``` |
{
"source": "jleopold28/eds",
"score": 3
} |
#### File: eds/interfaces/vcs_provider.py
```python
from typing import Dict
from abc import abstractmethod
from eds.interfaces.plugin import Plugin
class VcsProvider(Plugin):
"""eds.vcs_provider interface."""
interface_name = "eds.vcs_provider"
schema = {
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "eds.vcs_provider",
"title": "VCS Provider",
"type": "object",
"properties": {}
}
@abstractmethod
def parse_event(self) -> Dict:
"""Parse webhook event for project url and ref."""
raise NotImplementedError()
@abstractmethod
def get_files(self) -> Dict:
"""Get project files."""
raise NotImplementedError()
@abstractmethod
def create_project(self) -> None:
"""Create a Project."""
raise NotImplementedError()
@abstractmethod
def delete_project(self) -> None:
"""Delete a Project."""
raise NotImplementedError()
@abstractmethod
def update_project(self) -> None:
"""Update a Project."""
raise NotImplementedError()
```
#### File: eds/eds/main.py
```python
from __future__ import annotations
from eds.interfaces.worker import Worker
from eds.event import Event
from eds.extend import get_plugin
from eds.project import Project
def main(event: Event) -> None:
"""The main routine to process events. Includes exception logging.
Args:
event (Event): The commit event to process.
"""
try:
process(event)
except Exception:
pass
def process(event: Event) -> None:
"""The main routine to process events.
Args:
event (Event): The commit event to process.
"""
worker = get_plugin(Worker.interface_name, event.worker_plugin)
if not event.eds_built:
worker.build_eds(event.eds_version)
return
else:
project = Project(event)
if not event.eds_plugins_built:
worker.build_eds(event.eds_version, project.plugin_versions)
return
for pipeline in project.pipelines:
pipeline.build()
```
#### File: eds/tests/test_plugin.py
```python
from eds.plugin import BasePlugin
class PluginChild(BasePlugin):
pass
class PluginParent(BasePlugin):
@property
def children(self):
return [PluginChild({})]
class PluginGrandParent(BasePlugin):
@property
def children(self):
return [PluginParent({})]
def test_get_child_plugins():
p = PluginGrandParent({})
assert len(p.descendants) == 2
assert type(p.descendants[0]).__name__ == 'PluginChild'
assert type(p.descendants[1]).__name__ == 'PluginParent'
def test_id_property():
p = PluginChild({'id': 'my_id'})
assert p.id == 'my_id'
def test_yaml_property():
p = PluginChild({'some': 'yaml'})
assert p.yaml == {'some': 'yaml'}
``` |
{
"source": "jleopold28/snippets-and-notes",
"score": 4
} |
#### File: ansible/notes/python.py
```python
def firstRepeatedWord(s):
words = s.replace(',',' ').replace(';',' ').replace(':',' ').replace('-',' ').split(' ')
for word_check in words[::-1]:
words.remove(word_check)
for word in words:
if word == word_check:
return word
```
#### File: machine-learning/general/kpca.py
```python
import scipy.spatial.distance as dist
import scipy.linalg as linalg
import numpy as np
def rbf_kernel_pca(data, gamma, n_components):
"""
rbf kernel pca implementation
params -
numpy ndarray data: shape = [n_samples, n_features]
float gamma: tuning param of rbf kernel
int n_components: num components to return
returns -
numpy ndarray projected data, list eigvals: shape = [n_samples, k_features]
"""
# calc pairwise squared euclidean distances in MxN dataset
sq_dists = dist.pdist(data, 'sqeuclidean')
# convert pairwise distances into square matrix
mat_sq_dists = dist.squareform(sq_dists)
# compute symmetric kernel matrix
k_mat = np.exp(-gamma * mat_sq_dists)
# center kernel matrix
flat = k_mat.shape[0]
one_flat = np.ones((flat, flat)) / flat
k_mat = (k_mat - one_flat.dot(k_mat) -
k_mat.dot(one_flat) + one_flat.dot(k_mat).dot(one_flat))
# obtain eigpairs from centered kernel matrix
# scipy.eigh returns them sorted
eigvals, eigvecs = linalg.eigh(k_mat)
# collect top k eigvecs (projected samples, eigvals)
# these are informally alphas and lambdas
return (np.column_stack((eigvecs[:, -index]
for index in range(1, n_components + 1))),
[eigvals[-index] for index in range(1, n_components + 1)])
def project_data(data_proj, data, gamma, alphas, lambdas):
"""project a data point"""
pair_dist = np.array([(np.sum(data_proj - row)**2) for row in data])
return np.exp(-gamma * pair_dist).dot(alphas / lambdas)
``` |
{
"source": "jlep/freestylesvg",
"score": 2
} |
#### File: jlep/freestylesvg/svg_visible.py
```python
import os
import re
from freestyle import *
from freestyle.functions import *
from freestyle.predicates import *
from freestyle.types import *
from freestyle.shaders import *
from parameter_editor import *
from freestyle.chainingiterators import *
# select
preds = [
pyNatureUP1D(Nature.SILHOUETTE),
pyNatureUP1D(Nature.CREASE),
ContourUP1D()
]
upred = join_unary_predicates(preds, OrUP1D)
upred = AndUP1D(QuantitativeInvisibilityUP1D(0), upred)
Operators.select(upred)
# chain
Operators.bidirectional_chain(ChainSilhouetteIterator())
# sort
Operators.sort(pyZBP1D())
scene = getCurrentScene()
current_frame = scene.frame_current
# shade and write svg
path = re.sub(r'\.blend$|$', '%06d.svg' % current_frame, bpy.data.filepath)
f = open(path, "a")
w = scene.render.resolution_x * scene.render.resolution_percentage / 100
h = scene.render.resolution_y * scene.render.resolution_percentage / 100
class SVGPathShader(StrokeShader):
def shade(self, stroke):
f.write('<path fill="none" stroke="black" stroke-width="2" d="\nM ')
for v in stroke:
x, y = v.point
f.write('%.3f,%.3f ' % (x, h - y))
f.write('"\n />')
shaders_list = [
SamplingShader(50),
SVGPathShader(),
ConstantColorShader(0, 0, 1),
ConstantThicknessShader(10)
]
f.write('<g id="layer_visible" inkscape:groupmode="layer" inkscape:label="visible">\n')
f.write('<g id="visible">\n')
Operators.create(TrueUP1D(), shaders_list)
f.write('</g>\n')
f.write('</g>\n')
f.close()
``` |
{
"source": "jlepinski/pyconvcli",
"score": 2
} |
#### File: pyconvcli/test_pyconvcli_internal_cli/cli.py
```python
from pyconvcli import PyConvCli
import os
def main():
cli = PyConvCli('test_pyconvcli_internal_cli',os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
cli.run()
def visualize():
cli= PyConvCli('test_pyconvcli_internal_cli',os.path.dirname(os.path.realpath(__file__)),'pyconvcli-test')
args,parsers = cli.parse_args()
cli.parsers=parsers
cli.visualize()
``` |
{
"source": "jlerasmus/ambianic-edge",
"score": 2
} |
#### File: ambianic/webapp/flaskr.py
```python
import os
import logging
import time
from pathlib import Path
import flask
from flask import Flask, request, jsonify, json
from flask_cors import CORS
from flask.logging import default_handler
from requests import get
from werkzeug.serving import make_server
from werkzeug.exceptions import HTTPException
from ambianic import config, DEFAULT_DATA_DIR, __version__
from ambianic.util import ServiceExit, ThreadedJob, ManagedService
from ambianic.webapp.server import samples, config_sources
log = logging.getLogger(__name__)
# configuration
DEBUG = True
class FlaskJob(ManagedService):
"""Flask based managed web service."""
def __init__(self, config):
"""Create Flask based web service."""
self.config = config
data_dir = None
if config:
data_dir = config.get('data_dir', None)
if not data_dir:
data_dir = DEFAULT_DATA_DIR
self.srv = None
app = create_app(data_dir=data_dir)
ip_address = '0.0.0.0'
port = 8778
log.info('starting flask web server on %s:%d', ip_address, port)
self.srv = make_server(ip_address, port, app)
ctx = app.app_context()
ctx.push()
with app.app_context():
flask.current_app.data_dir = data_dir
self.flask_stopped = True
log.debug('Flask process created')
def start(self, **kwargs):
"""Start service."""
log.debug('Flask starting main loop')
self.flask_stopped = False
try:
self.srv.serve_forever()
except ServiceExit:
log.info('Service exit requested')
self.flask_stopped = True
log.debug('Flask ended main loop')
def stop(self):
"""Stop service."""
if not self.flask_stopped:
log.debug('Flask stopping main loop')
self.srv.shutdown()
log.debug('Flask main loop ended')
def healthcheck(self):
"""Report health status."""
return time.monotonic(), 'OK'
class FlaskServer(ManagedService):
""" Thin wrapper around Flask constructs.
Allows controlled start and stop of the web app server
in a separate process.
Parameters
----------
config : yaml
reference to the yaml configuration file
"""
def __init__(self, config):
self.config = config
self.flask_job = None
def start(self, **kwargs):
log.info('Flask server job starting...')
f = FlaskJob(self.config)
self.flask_job = ThreadedJob(f)
self.flask_job.start()
log.info('Flask server job started')
def healthcheck(self):
# Note: Implement actual health check for Flask
# See if the /healthcheck URL returns a 200 quickly
return time.monotonic(), True
def heal(self):
"""Heal the server.
TODO: Keep an eye for potential scenarios that cause this server to
become unresponsive.
"""
def stop(self):
if self.flask_job:
log.info('Flask server job stopping...')
self.flask_job.stop()
self.flask_job.join()
log.info('Flask server job stopped.')
def create_app(data_dir=None):
log.debug('Creating Flask app...')
# if Ambianic is in INFO or DEBUG mode, pass that info on to Flask
if log.level <= logging.INFO:
os.environ['FLASK_ENV'] = 'development'
# create and configure the web app
# set the project root directory as the static folder, you can set others.
app = Flask(__name__, instance_relative_config=True)
app.logger.removeHandler(default_handler)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
# enable CORS for development
CORS(app, resources={r'/*': {'origins': '*'}})
# [Sitemap]
# sitemap definitions follow
# a simple page that says hello
@app.route('/')
def hello():
return 'Ambianic Edge! Helpful AI for home and business automation.'
# healthcheck page available to docker-compose
# and other health monitoring tools
@app.route('/healthcheck')
def health_check():
return 'Ambianic Edge is running in a cheerful healthy state!'
# live view of ambianic pipelines
@app.route('/pipelines')
def view_pipelines():
return flask.render_template('pipelines.html')
# healthcheck page available to docker-compose
# and other health monitoring tools
@app.route('/api/status')
def get_status():
response_object = {'status': 'OK', 'version': __version__}
resp = jsonify(response_object)
return resp
@app.route('/api/timeline', methods=['GET'])
@app.route('/api/timeline.json', methods=['GET'])
def get_timeline():
response_object = {'status': 'success'}
req_page = request.args.get('page', default=1, type=int)
log.debug('Requested timeline events page" %d', req_page)
nonlocal data_dir
resp = samples.get_timeline(page=req_page, data_dir=data_dir)
response_object['timeline'] = resp
log.debug('Returning %d timeline events', len(resp))
# log.debug('Returning samples: %s ', response_object)
resp = jsonify(response_object)
return resp
@app.route('/api/samples', methods=['GET', 'POST'])
def get_samples():
response_object = {'status': 'success'}
if request.method == 'POST':
post_data = request.get_json()
new_sample = {
'title': post_data.get('title'),
'author': post_data.get('author'),
'read': post_data.get('read')
}
samples.add_sample(new_sample)
response_object['message'] = 'Sample added!'
response_object['sample_id'] = new_sample["id"]
log.debug('Sample added: %s ', new_sample)
else:
req_page = request.args.get('page', default=1, type=int)
resp = samples.get_samples(page=req_page)
response_object['samples'] = resp
log.debug('Returning %d samples', len(resp))
# log.debug('Returning samples: %s ', response_object)
resp = jsonify(response_object)
return resp
@app.route('/api/samples/<sample_id>', methods=['PUT', 'DELETE'])
def update_sample(sample_id):
response_object = {'status': 'success'}
if request.method == 'PUT':
post_data = request.get_json()
sample = {
'id': sample_id,
'title': post_data.get('title'),
'author': post_data.get('author'),
'read': post_data.get('read')
}
log.debug('update_sample %s', sample)
samples.update_sample(sample)
response_object['message'] = 'Sample updated!'
if request.method == 'DELETE':
samples.delete_sample(sample_id)
response_object['message'] = 'Sample removed!'
return jsonify(response_object)
@app.route('/api/config', methods=['GET'])
def get_config():
return jsonify(config.as_dict())
@app.route(
'/api/config/source/<source_id>',
methods=['GET', 'PUT', 'DELETE']
)
def handle_config_source(source_id):
if request.method == 'DELETE':
config_sources.remove(source_id)
return jsonify({'status': 'success'})
if request.method == 'PUT':
source = request.get_json()
config_sources.save(source_id, source)
return jsonify(config_sources.get(source_id))
# sanity check route
@app.route('/api/ping', methods=['GET'])
def ping():
response_object = 'pong'
return jsonify(response_object)
@app.route('/static/<path:path>')
def static_file(path):
return flask.send_from_directory('static', path)
@app.route('/api/data/<path:path>')
def data_file(path):
data_path = Path(DEFAULT_DATA_DIR).resolve()
log.info('Serving static data file from: %r', data_path / path)
return flask.send_from_directory(data_path, path)
@app.route('/client', defaults={'path': 'index.html'})
@app.route('/client/', defaults={'path': 'index.html'})
@app.route('/client/<path:path>')
def client_file(path):
if log.level <= logging.DEBUG: # development mode
hostname = flask.request.host.split(':')[0]
base_uri = 'http://{host}:1234/'.format(host=hostname)
return get(f'{base_uri}{path}').content
# production mode
return flask.send_from_directory('client/dist', path)
@app.errorhandler(Exception)
def handle_exception(e: Exception):
"""Return JSON instead of HTML for HTTP errors."""
# start with the correct headers and status code from the error
if isinstance(e, HTTPException):
response = e.get_response()
response.content_type = "application/json"
# replace the body with JSON
response.data = json.dumps({
"code": e.code,
"error": e.description,
})
return response
# generic error handler
log.error("Request failed")
log.exception(e)
return jsonify(
code=500,
error="Request failed"
), 500
# @app.route('/', defaults={'path': 'index.html'})
# @app.route('/<path:path>')
# def client_all(path):
# return flask.send_from_directory('client/dist', path)
log.debug('Flask url map: %s', str(app.url_map))
log.debug('Flask config map: %s', str(app.config))
log.debug('Flask running in %s mode',
'development' if app.config['DEBUG'] else 'production')
log.debug('Flask app created.')
return app
``` |
{
"source": "jlerat/pybomwater",
"score": 3
} |
#### File: pybomwater/bom_water/spatial_util.py
```python
from geojson import Feature, FeatureCollection, Point
class spatail_utilty():
def create_geojson_feature(self, lat, long, station_no=None, station_id=None, name=None, long_name=None):
'''Create a geojson feature that can be append to a list'''
try:
coords = (float(long),float(lat))
a_point = Point(coords)
except ValueError as e:
return Feature(
geometry = None,
properties = {
'stationNo': station_no,
'stationId': station_id,
'name': name,
'long_name': long_name
}
)
return Feature(
geometry = a_point,
properties = {
'stationNo': station_no,
'stationId': station_id,
'name': name,
'long_name': long_name
}
)
def get_feature_collection(self, features):
return FeatureCollection(features)
def write_features(self, features, path):
collection = FeatureCollection(features)
with open(path, "w") as f:
f.write('%s' % collection)
``` |
{
"source": "jlerman44/cameo",
"score": 2
} |
#### File: cameo/api/hosts.py
```python
from __future__ import absolute_import, print_function
import os
from functools import partial
import six
from lazy_object_proxy import Proxy
import cameo
from cameo import load_model
from cameo import util
__all__ = ['hosts']
MODEL_DIRECTORY = os.path.join(os.path.join(cameo.__path__[0]), 'models/json')
class Host(object):
def __init__(self, name='', models=None, biomass=None, carbon_sources=None):
models = models or []
biomass = biomass or []
carbon_sources = carbon_sources or []
self.name = name
self.models = util.IntelliContainer()
for id, biomass, carbon_source in zip(models, biomass, carbon_sources):
def lazy_model_init(path):
model = load_model(path)
setattr(model, "biomass", biomass)
setattr(model, "carbon_source", carbon_source)
return model
model = Proxy(partial(lazy_model_init, os.path.join(MODEL_DIRECTORY, id + '.json')))
self.models[id] = model
def __str__(self):
return self.name
class Hosts(object):
def __init__(self, host_spec, aliases=None):
self._host_spec = host_spec
self._hosts = list()
for host_id, information in six.iteritems(self._host_spec):
host = Host(**information)
self._hosts.append(host)
setattr(self, host_id, host)
if aliases and isinstance(aliases, list):
for pair in aliases:
setattr(self, pair[1], getattr(self, pair[0]))
def __iter__(self):
return iter(self._hosts)
def __dir__(self):
return list(self._host_spec.keys())
HOST_SPECS = {
# 'iAF1260', 'iJO1366', 'EcoliCore'
'ecoli': {
'name': '<NAME>',
'models': ('iJO1366',),
'biomass': ('BIOMASS_Ec_iJO1366_core_53p95M',),
'carbon_sources': ('EX_glc__D_e',)
},
# 'iND750',
'scerevisiae': {
'name': '<NAME>',
'models': ('iMM904',),
'biomass': ('BIOMASS_SC5_notrace',),
'carbon_sources': ('EX_glc__D_e',)
}
}
hosts = Hosts(HOST_SPECS, aliases=[('scerevisiae', 'yeast')])
```
#### File: cameo/cameo/__init__.py
```python
import os
import sys
from cameo import config
from cameo.util import get_system_info, in_ipnb
if sys.version_info[0] == 2:
import imp
def find_module(name):
try:
imp.find_module(name)
return True
except ImportError:
return False
elif sys.version_info[0] == 3:
if sys.version_info[1] <= 3:
from importlib import find_loader as _find
else:
from importlib.util import find_spec as _find
def find_module(name):
return _find(name) is not None
_cameo_path = __path__[0]
_cameo_data_path = os.path.join(_cameo_path, 'data')
# fix - if matplotlib is installed it is not possible to import cameo without importing matplotlib on jupyter notebook.
if find_module("matplotlib") and in_ipnb():
from IPython import get_ipython
ipython = get_ipython()
ipython.magic("matplotlib inline")
system_info = get_system_info()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from cameo.io import load_model
from cameo import models
from .flux_analysis.analysis import flux_variability_analysis, phenotypic_phase_plane
from .flux_analysis.simulation import fba, pfba
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
del os, sys, in_ipnb, get_system_info, find_module
```
#### File: visualization/plotting/with_ggplot.py
```python
from __future__ import absolute_import
from math import ceil
import six
from warnings import warn
from ggplot import scale_colour_manual, geom_area, geom_tile, scale_x_continuous, scale_y_continuous, aes, facet_grid
from cameo.util import in_ipnb, inheritdocstring
from cameo.visualization.plotting import AbstractPlotter
@six.add_metaclass(inheritdocstring)
class GGPlotPlotter(AbstractPlotter):
def __init__(self, **options):
warn("ggplot interface is under construction...")
super(GGPlotPlotter, self).__init__(**options)
def production_envelope(self, dataframe, grid=None, width=None, height=None, title=None, points=None,
points_colors=None, palette=None, x_axis_label=None, y_axis_label=None):
palette = self.get_option('palette') if palette is None else palette
width = self.get_option('width') if width is None else width
colors = self._palette(palette, len(dataframe.strain.unique()))
plot = aes(data=dataframe, ymin="lb", ymax="ub", x="value", color=scale_colour_manual(colors)) + geom_area()
if title:
plot += geom_tile(title)
if x_axis_label:
plot += scale_x_continuous(name=x_axis_label)
if y_axis_label:
plot += scale_y_continuous(name=y_axis_label)
return plot
def flux_variability_analysis(self, dataframe, grid=None, width=None, height=None, title=None, palette=None,
x_axis_label=None, y_axis_label=None):
return aes(data=dataframe, )
@property
def _display(self):
if in_ipnb():
from IPython.display import display
return display
@staticmethod
def _make_grid(grid):
columns = ceil(grid.n_rows / len(grid.plots()))
return grid.plot[0] + facet_grid(grid.n_rows, columns, scales="fixed")
```
#### File: cameo/tests/test_api.py
```python
import os
import pickle
import re
import pytest
from cameo import api, load_model
from cameo import models, config
from cameo.api.hosts import Host
from cameo.api.products import Compound
MODELS = os.path.dirname(models.__file__)
UNIVERSALMODEL = load_model(os.path.join(MODELS, 'json/iJO1366.json'))
UNIVERSALMODEL.remove_reactions(UNIVERSALMODEL.exchanges)
def test_api():
mock_host = Host('core',
models=['e_coli_core'],
biomass=['BIOMASS_Ecoli_core_w_GAM'],
carbon_sources=['EX_glc__D_e'])
api.design.debug = True
pathways = api.design.predict_pathways(product=UNIVERSALMODEL.metabolites.ser__L_c, hosts=[mock_host],
database=UNIVERSALMODEL, aerobic=True)
optimization_reports = api.design.optimize_strains(pathways, config.default_view, aerobic=True)
pickle.loads(pickle.dumps(optimization_reports))
assert len(optimization_reports) > 0
def test_compound_repr():
pytest.mark.skipif(not re.match('Open Babel.*', os.popen('obabel').read()),
reason='Skipping because OpenBabel is not installed.')
compound = Compound('InChI=1S/H2O/h1H2')
assert re.match(r"^<\?xml version=\"1\.0\"\?>.*</svg>$", compound._repr_svg_().replace('\n', ''))
assert compound._repr_html_() == compound._repr_svg_()
def test_products():
assert api.products.search('3-hydroxy propionate').index[0] == 'MNXM872'
assert len(api.products.search('old spice')) == 0
def test_hosts():
assert api.hosts.ecoli.models.iJO1366.id == 'iJO1366'
assert api.hosts.scerevisiae.models.iMM904.id == 'iMM904'
```
#### File: cameo/tests/test_io.py
```python
from __future__ import absolute_import, print_function
import os
import cobra
import pytest
import cameo
from cameo import load_model
from cameo.config import solvers
try:
import libsbml
except ImportError:
libsbml = None
TESTDIR = os.path.dirname(__file__)
@pytest.fixture(scope="module", params=list(solvers))
def solver_interface(request):
return solvers[request.param]
class TestModelLoading(object):
def test_load_model_pickle_path(self, solver_interface):
model = load_model(os.path.join(TESTDIR, 'data/iJO1366.pickle'), solver_interface=solver_interface)
assert abs(model.optimize().f - 0.9823718127269768) < 10e-6
def test_load_model_pickle_handle(self, solver_interface):
with open(os.path.join(TESTDIR, 'data/iJO1366.pickle'), 'rb') as handle:
model = load_model(handle, solver_interface=solver_interface)
assert abs(model.optimize().f - 0.9823718127269768) < 10e-6
def test_load_model_sbml_path(self, solver_interface):
model = load_model(os.path.join(TESTDIR, 'data/iJO1366.xml'), solver_interface=solver_interface)
assert abs(model.optimize().f - 0.9823718127269768) < 10e-6
def test_load_model_sbml_handle(self, solver_interface):
with open(os.path.join(TESTDIR, 'data/iJO1366.xml')) as handle:
model = load_model(handle, solver_interface=solver_interface)
assert abs(model.optimize().f - 0.9823718127269768) < 10e-6
def test_load_model_sbml_path_set_none_interface(self):
model = load_model(os.path.join(TESTDIR, 'data/EcoliCore.xml'), solver_interface=None)
assert abs(model.optimize().f - 0.8739215069684306) < 10e-6
assert isinstance(model, cobra.Model)
def test_import_model_bigg(self):
model = cameo.models.bigg.e_coli_core
assert model.id == 'e_coli_core'
@pytest.mark.skipif(libsbml is None, reason="minho has fbc < 2, requiring missing lisbml")
def test_import_model_minho(self):
model = cameo.models.minho
if model.status != 'indexed':
pytest.skip('failed to index minho db')
assert model.__getattr__('Ecoli core Model').id == 'Ecoli_core_model'
def test_invalid_path(self):
with pytest.raises(Exception):
load_model("blablabla_model")
``` |
{
"source": "jlerman44/DnaWeaver",
"score": 2
} |
#### File: dnaweaver/AssemblyPlanReport/AssemblyPlanReport.py
```python
from copy import deepcopy
from .ObjectDict import ObjectDict
from . import mixins
class AssemblyPlanReport(
mixins.PlotsMixin,
mixins.FolderReportMixin,
mixins.GenbankExportMixin,
mixins.PdfReportMixin,
):
def __init__(self, plan, sources):
self.plan = ObjectDict.from_dict(plan)
self.sources = ObjectDict.from_dict(sources)
@staticmethod
def from_dnaweaver_quote(quote):
plan = quote.assembly_plan_as_dict()
sources = quote.source.dict_supply_graph()
return AssemblyPlan(plan, sources)
def to_steps_list(self):
plan = deepcopy(self.plan)
nodes = []
def rec(node, depth=0):
if node.get("_visited", False):
return
node["_visited"] = True
assembly_plan = node.get("assembly_plan", [])
node["children"] = [n["id"] for n in assembly_plan]
nodes.append(node)
for other in sorted(
assembly_plan, key=lambda n: n["segment_start"]
):
rec(other)
rec(plan)
return nodes
```
#### File: AssemblyPlanReport/mixins/GenbankExportMixin.py
```python
from copy import deepcopy
from io import StringIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
try:
# Biopython <1.78
from Bio.Alphabet import DNAAlphabet
has_dna_alphabet = True
except ImportError:
# Biopython >=1.78
has_dna_alphabet = False
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
class GenbankExportMixin:
def to_record(self, record=None, record_id=None):
"""Return a Biopython seqrecord of the quote.
>>> record = to_record(solution)
>>> # Let's plot with DnaVu:
>>> from dnavu import create_record_plot
>>> from bokeh.io import output_file, show
>>> output_file("view.html")
>>> plot = create_record_plot(record)
>>> show(plot)
"""
if record_id is None:
record_id = self.id
if record is None:
if has_dna_alphabet: # Biopython <1.78
record = SeqRecord(Seq(self.sequence, DNAAlphabet()), id=record_id)
else:
record = SeqRecord(Seq(self.sequence), id=record_id)
record.annotations["molecule_type"] = "DNA"
else:
record = deepcopy(record)
if self.plan is not None:
features = [
SeqFeature(
FeatureLocation(q.segment_start, q.segment_end, 1),
type="misc_feature",
qualifiers={
"label": "%s - From %s" % (q.id, q.source),
"name": q.id,
"source": q.source,
"price": q.price,
"lead_time": q.lead_time,
},
)
for q in self.plan
]
record.features = features + record.features
return record
def write_genbank(
self, filename=None, filehandle=None, record=None, record_id=None
):
record = self.to_record(record=record, record_id=record_id)
if filehandle is None:
with open(filename, "w+") as f:
SeqIO.write(record, f, "genbank")
else:
SeqIO.write(record, filehandle, "genbank")
def write_all_sequence_records(self, target):
for step in self.to_steps_list():
record = self.plan_step_to_record(step)
path = target._file(step.id + ".gb").open("w")
SeqIO.write(record, path, "genbank")
@staticmethod
def plan_step_to_record(plan_step, record=None, record_id=None):
"""Return a Biopython seqrecord of the quote.
>>> record = to_SeqRecord(solution)
>>> # Let's plot with DnaVu:
>>> from dnavu import create_record_plot
>>> from bokeh.io import output_file, show
>>> output_file("view.html")
>>> plot = create_record_plot(record)
>>> show(plot)
"""
if record_id is None:
record_id = plan_step.id
if record is None:
if has_dna_alphabet: # Biopython <1.78
record = SeqRecord(Seq(plan_step.sequence, DNAAlphabet()), id=record_id)
else:
record = SeqRecord(Seq(plan_step.sequence), id=record_id)
record.annotations["molecule_type"] = "DNA"
else:
record = deepcopy(record)
if plan_step.assembly_plan is not None:
features = [
SeqFeature(
FeatureLocation(q.segment_start, q.segment_end, 1),
type="misc_feature",
qualifiers={
"label": "%s - From %s" % (q.id, q.source),
"name": q.id,
"source": q.source,
"price": q.price,
"lead_time": q.lead_time,
},
)
for q in plan_step.assembly_plan
]
record.features = features + record.features
return record
```
#### File: mixins/PlotsMixin/ColorsMixin.py
```python
import colorsys
import itertools
import matplotlib.colors as cl
import matplotlib.cm as cm
def hls_to_hex(hue, luminance, saturation):
"""Return (R,G,B) equivalent of a hue/staturation/value color."""
return cl.rgb2hex(colorsys.hls_to_rgb(hue, luminance, saturation))
def rgb_to_hex(red, green, blue):
"""Return color as #rrggbb for the given color values."""
return "#%02x%02x%02x" % (int(red), int(green), int(blue))
class ColorsMixin:
def autocolor_quote_sources(
self,
hues=(0.635, 0.047, 0.117),
saturations=(0.9, 0.7, 0.5, 0.3),
min_lum=0.2,
max_lum=0.8,
):
"""Auto-add a `_report_color` field to the sources in in quote.sources.
Sources at the same depth share the same luminance.
"""
colors = itertools.cycle(
[
rgb_to_hex(*[255 * e ** 0.4 for e in cm.Paired(0.13 * i % 1.0)][:3])
for i in range(30)
]
)
for _name, source in sorted(self.sources.items()):
color = next(colors)
source._report_color = color
```
#### File: mixins/PlotsMixin/matplotlib_export.py
```python
from io import StringIO, BytesIO
from base64 import b64encode
def matplotlib_figure_to_file_string(fig, format="svg", **kwargs):
"""Return a string of the figure in the requested format."""
if format == "pdf":
output = BytesIO()
else:
output = StringIO()
fig.savefig(output, format=format, **kwargs)
return output.getvalue()
def matplotlib_figure_to_svg_base64_data(fig, **kwargs):
"""Return a string of the form 'data:image/svg+xml;base64,XXX' where XXX
is the base64-encoded svg version of the figure."""
svg_txt = matplotlib_figure_to_file_string(fig, format="svg", **kwargs)
svg_txt = "\n".join(svg_txt.split("\n")[4:])
svg_txt = "".join(svg_txt.split("\n"))
try:
return "data:image/svg+xml;base64," + b64encode(svg_txt)
except:
content = b64encode(svg_txt.encode("ascii"))
result = (b"data:image/svg+xml;base64," + content).decode("utf-8")
return str(result)
```
#### File: dnaweaver/biotools/sequence_homologies.py
```python
import tempfile
import time
import os
import subprocess
from Bio.Blast import NCBIXML
import numpy as np
from .sequence_operations import sequence_to_atgc
def blast_sequence(
sequence,
blast_db=None,
subject=None,
word_size=4,
perc_identity=80,
num_alignments=1000,
num_threads=3,
use_megablast=True,
ungapped=True,
):
"""Return a Biopython BLAST record of the given sequence BLASTed
against the provided database.
Parameters
----------
sequence
An ATGC sequence.
blast_db
Path to a BLAST database.
subject
Either a path to a fasta (.fa) file or an ATGC string. Subject to blast
against.
word_size
Word size to use in the blast.
perc_identity
Minimal percentage of identical nucleotides in a match for it to be kept.
num_alignments
Number of alignments to keep.
num_threads
Number of threads for the BLAST.
use_megablast
Whether to use Megablast.
ungapped
No-gaps matches only ?
Examples
--------
>>> blast_record = blast_sequence("ATTGTGCGTGTGTGCGT", "blastdb/ecoli")
>>> for alignment in blast_record.alignments:
>>> for hit in alignment.hsps:
>>> print (hit.identities)
"""
xml_file, xml_name = tempfile.mkstemp(".xml")
fasta_file, fasta_name = tempfile.mkstemp(".fa")
sequence = sequence_to_atgc(sequence)
with open(fasta_name, "w+") as f:
f.write(">seq\n" + sequence)
if subject is not None:
close_subject = True
if not subject.endswith(".fa"):
remove_subject = True
_subject_file, fasta_subject_name = tempfile.mkstemp(".fa")
with open(fasta_subject_name, "w+") as f:
f.write(">subject\n" + subject)
subject = fasta_subject_name
else:
remove_subject = False
else:
close_subject = False
p = subprocess.Popen(
[
"blastn",
"-out",
xml_name,
"-outfmt",
"5",
"-num_alignments",
str(num_alignments),
"-query",
fasta_name,
]
+ (["-db", blast_db] if blast_db is not None else ["-subject", subject])
+ (["-ungapped"] if ungapped else [])
+ (["-task", "megablast"] if use_megablast else [])
+ [
"-word_size",
str(word_size),
"-num_threads",
str(num_threads),
"-dust",
"no",
"-evalue",
"0.01",
"-perc_identity",
str(perc_identity),
],
close_fds=True,
stderr=subprocess.PIPE,
)
res, _blast_err = p.communicate()
p.wait()
error = None
for i in range(3):
try:
with open(xml_name, "r") as f:
res = list(NCBIXML.parse(f))
os.fdopen(xml_file, "w").close()
os.fdopen(fasta_file, "w").close()
os.remove(xml_name)
os.remove(fasta_name)
if close_subject:
open(subject, "w").close()
if remove_subject:
os.remove(subject)
if len(res) == 1:
return res[0]
else:
return res
break
except ValueError as err:
error = err
time.sleep(0.1)
else:
raise ValueError("Problem reading the blast record: " + str(error))
def make_blast_db(fasta_input, target):
proc = subprocess.Popen(
["makeblastdb", "-in", fasta_input, "-dbtype", "nucl", "-out", target]
)
proc.wait()
def perfect_match_locations_in_hsp(hsp, span_cutoff=10):
"""Return the locations of perfect matches in a BLAST HSP.
Only locations with a span above span_cutoff are kept.
"""
if hsp.align_length < span_cutoff:
return []
arr = np.frombuffer(hsp.match.encode(), dtype="uint8")
indices = [0] + list((arr != 124).nonzero()[0]) + [len(arr)]
return [
(start + hsp.query_start, end + hsp.query_start)
for start, end in zip(indices, indices[1:])
if end - start >= span_cutoff
]
def largest_common_substring(query, target, max_overhang):
"""Return the largest common substring between `query` and `target`.
Find the longest substring of query that is contained in target.
If the common substring is too much smaller than `query` False is returned,
else the location `(start, end)` of the substring in `target` is returned.
Parameters:
-----------
query (str)
The sequence to be found in target (minus some overhangs possibly).
target (str)
The sequence in which to find `query`.
max_overhang
Maximal size allowed for the flanking regions of `query` that would
not be contained in `target`.
Examples
--------
>>> seqA = '-----oooooooo'
>>> seqB = 'oooooo-----tttt'
>>> largest_common_substring(seqA, seqA, 80) # == (0, 12)
>>> largest_common_substring(seqA, seqB, 80) # == (5, 11)
Notes:
------
This is intended for finding whether `query` can be extracted from `target`
using PCR. See the PcrExtractionStation implementation in DnaSupplier.py.
"""
# The trick here is to start with the central region of "query".
# This region is initially as small as max_overhang allows, and it is
# progressively expanded on the sides
max_overhang = min(max_overhang, int(len(query) / 2))
start, end = max_overhang, len(query) - max_overhang
if query[start:end] not in target:
return False
while (start >= 0) and (query[start:end] in target):
start -= 1
start += 1
while (end < len(query)) and (query[start:end] in target):
end += 1
end -= 1
return start, end
```
#### File: dnaweaver/DnaAssemblyMethod/BluntEndAssemblyMethod.py
```python
from .DnaAssemblyMethod import DnaAssemblyMethod
class BluntEndAssemblyMethod(DnaAssemblyMethod):
def compute_fragment_for_sequence_segment(self, sequence, segment, **kw):
start, end = segment
return sequence[start:end]
```
#### File: dnaweaver/DnaAssemblyMethod/OverlapingAssemblyMethod.py
```python
from .DnaAssemblyMethod import DnaAssemblyMethod
from ..biotools import reverse_complement
class OverlapingAssemblyMethod(DnaAssemblyMethod):
"""General class for all overlapping assembly methods.
Parameters
----------
homology_arm_length
Length of the homology arm, or "overhang". A length of L means that
consecutive segments will overlap by 2*L.
"""
name = "Overlaping Assembly"
alternate_fragments_orientation = False
def __init__(self, overhang_selector, **properties):
super(OverlapingAssemblyMethod, self).__init__(**properties)
selector = overhang_selector
self.overhang_selector = selector
if selector.has_location_filter:
self.cut_location_constraints.append(selector.location_filter_method)
def compute_fragment_for_sequence_segment(self, sequence, segment, **kw):
selector = self.overhang_selector.compute_fragment_for_sequence_segment
fragment = selector(sequence, segment)
if self.alternate_fragments_orientation:
if kw.get("segment_position", 0) % 2:
fragment = reverse_complement(fragment)
return fragment
class GibsonAssemblyMethod(OverlapingAssemblyMethod):
"""Gibson Assembly Method. Just another overlap-method"""
name = "Gibson Assembly"
class OligoAssemblyMethod(OverlapingAssemblyMethod):
"""The Build-a-Genome Assembly Method. Just another overlap-method"""
alternate_fragments_orientation = True
name = "Oligo Assembly"
```
#### File: dnaweaver/DnaQuote/ExportsMixin.py
```python
import json
from copy import deepcopy
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
try:
# Biopython <1.78
from Bio.Alphabet import DNAAlphabet
has_dna_alphabet = True
except ImportError:
# Biopython >=1.78
has_dna_alphabet = False
from Bio import SeqIO
from Bio.SeqFeature import SeqFeature, FeatureLocation
from io import StringIO
from ..AssemblyPlanReport import AssemblyPlanReport
class ExportsMixin:
def tree_as_list(self):
"""Return a list containing the current AssemblyOperation and all its
sub-operations and their respective sub-operations.
Said otherwise, it flattens the assembly tree into the list of all
nodes.
"""
result = [self]
if self.assembly_plan is not None:
result += sum(
[child.tree_as_list() for segment, child in self.assembly_plan.items()],
[],
)
return result
def assembly_plan_as_dict(self, as_json=False, json_indent=None):
"""Return a JSON-like version of the nested tree.
Parameters
----------
as_json
If True, a JSON string is returned, else the result is a dict object.
json_indent
number of spaces in the JSON indentation (for pretty printing). The
default None means that the JSON will be on one line (TODO: check).
Returns
-------
{
"id": self.id,
"source": self.source.name,
"price": self.price,
"lead_time": self.lead_time,
"sequence": self.sequence,
"message": self.message,
"metadata" = self.metadata,
"assembly_plan": { (start1, end1): {(subquote_1)},
(start2, end2): {(subquote_2)},
}
}
"""
final_location = (
self.final_location if hasattr(self, "final_location") else None
)
matching_segment = (
self.matching_segment if hasattr(self, "matching_segment") else None
)
assembly_plan = []
if self.assembly_plan is not None:
for (segment, quote) in self.assembly_plan.items():
quote_as_dict = quote.assembly_plan_as_dict()
quote_as_dict["segment_start"] = segment[0]
quote_as_dict["segment_end"] = segment[1]
assembly_plan.append(quote_as_dict)
tree = {
"id": self.id,
"source": self.source.name,
"price": self.price,
"lead_time": self.lead_time,
"sequence": self.sequence,
"message": self.message,
"metadata": self.metadata,
"assembly_plan": assembly_plan,
"final_location": final_location,
"matching_segment": matching_segment,
"accepted": self.accepted,
}
metadata = tree["metadata"]
if "via" in metadata:
metadata["via"] = [
station if isinstance(station, str) else station.name
for station in metadata["via"]
]
if as_json:
return json.dumps(tree, indent=json_indent)
else:
return tree
def to_record(self, record=None, record_id=None):
"""Return a Biopython seqrecord of the quote.
>>> record = to_record(solution)
>>> # Let's plot with DnaVu:
>>> from dnavu import create_record_plot
>>> from bokeh.io import output_file, show
>>> output_file("view.html")
>>> plot = create_record_plot(record)
>>> show(plot)
"""
if record_id is None:
record_id = self.id
if record is None:
if has_dna_alphabet: # Biopython <1.78
record = SeqRecord(Seq(self.sequence, DNAAlphabet()), id=record_id)
else:
record = SeqRecord(Seq(self.sequence), id=record_id)
record.annotations["molecule_type"] = "DNA"
else:
record = deepcopy(record)
if self.assembly_plan is not None:
features = [
SeqFeature(
FeatureLocation(segment[0], segment[1], 1),
type="Feature",
qualifiers={
"name": quote.id,
"source": quote.source,
"price": quote.price,
"lead_time": quote.lead_time,
},
)
for segment, quote in self.assembly_plan.items()
]
record.features = features + record.features
return record
def write_genbank(
self, filename=None, filehandle=None, record=None, record_id=None
):
record = self.to_record(record=record, record_id=record_id)
if filename is not None:
with open(filename, "w+") as f:
SeqIO.write(record, f, "genbank")
else:
output = StringIO()
SeqIO.write(record, output, "genbank")
return output.getvalue()
def to_assembly_plan_report(
self, refine_fragments_locations=True, autocolor_quotes=True
):
"""Convert the quote into a full assembly plan data structure which
can be used to generate assembly reports."""
if refine_fragments_locations:
self.compute_fragments_final_locations()
if not self.full_assembly_plan_computed:
self.compute_full_assembly_plan()
original_source = self.source
if "via" in self.metadata:
# intermediary comparator of the quote
original_source = self.metadata["via"][0]
report = AssemblyPlanReport(
plan=self.assembly_plan_as_dict(),
sources=original_source.dict_supply_graph(),
)
if autocolor_quotes:
report.autocolor_quote_sources()
return report
```
#### File: dnaweaver/DnaQuote/PostProcessingMixin.py
```python
import itertools as itt
import os
import tempfile
from ..biotools import blast_sequence
class PostProcessingMixin:
def compute_full_assembly_plan(self, id_prefix="S", id_digits=5):
""" """
counter = itt.count()
def rec(quote):
if not quote.accepted:
return quote
if any(
[
hasattr(quote.source, attr)
for attr in ["supplier", "primers_supplier"]
]
):
if quote.assembly_plan is None:
quote = quote.source.get_quote(
quote.sequence,
max_lead_time=quote.lead_time,
with_assembly_plan=True,
)
segments = {
segment: rec(subquote)
for segment, subquote in sorted(
quote.assembly_plan.items(), key=lambda item: item[0]
)
}
quote.assembly_plan = segments
if id_prefix:
index = next(counter)
quote.id = "{id_prefix}_{index:0{id_digits}}".format(
id_prefix=id_prefix, index=index, id_digits=id_digits
)
return quote
rec(self)
if id_prefix:
index = next(counter)
self.id = "{id_prefix}_{index:0{id_digits}}".format(
id_prefix=id_prefix, index=index, id_digits=id_digits
)
self.full_assembly_plan_computed = True
def compute_fragments_final_locations(self):
"""Compute the exact final location of the fragments in the final
sequence.
"""
if not self.full_assembly_plan_computed:
self.compute_full_assembly_plan()
quotes = self.tree_as_list()
quotes_dict = {quote.id: quote for quote in quotes}
_, temp_fasta = tempfile.mkstemp(suffix=".fa")
with open(temp_fasta, "w+") as f:
for quote in quotes:
f.write(">%s\n%s\n" % (quote.id, quote.sequence))
results = blast_sequence(
self.sequence, subject=temp_fasta, word_size=10, perc_identity=100
)
if isinstance(results, list):
alignments = sum([rec.alignments for rec in results], [])
else:
alignments = results.alignments
for al in alignments:
hit = max(al.hsps, key=lambda hit: hit.align_length)
final_location = sorted((hit.query_start, hit.query_end))
matching_segment = sorted((hit.sbjct_start, hit.sbjct_end))
quotes_dict[al.hit_def].final_location = final_location
quotes_dict[al.hit_def].matching_segment = matching_segment
os.remove(temp_fasta)
def propagate_deadline(self, deadline):
"""Add a `deadline` attribute to the quote and propagate it to
the quote's children by taking into account the duration of operations.
For instance if "self" has a duration of 5 and receives a deadline
of 8, the quotes that "self" depends on will receive a deadline of
8-5=3.
"""
self.deadline = deadline
children_deadline = deadline - self.step_duration
if self.assembly_plan is not None:
for segment, child in self.assembly_plan.items():
child.propagate_deadline(children_deadline)
```
#### File: dnaweaver/DnaSupplier/builtin_constraints.py
```python
from Bio import Restriction
import re
from ..biotools import gc_content, reverse_complement
class NoPatternConstraint:
"""Constraint class forbidding a given pattern in DNA sequences.
Class of callables (sequence)-> True/False whether the sequence contains
the pattern.
Can be useful for defining constraints in DNA assembly methods or
DNA providers.
The interest of having this as a class is that a DnaSupplier using this
constraint can be displayed as a string with the pattern appearing
explicitly, which would not be the case for a function
Parameters
----------
pattern=None, enzyme=None, is_regex=False, with_revcomp=True
"""
def __init__(self, pattern=None, enzyme=None, is_regex=False, with_revcomp=True):
self.biopython_enzyme = None
if enzyme is not None:
if enzyme in Restriction.__dict__:
biopython_enzyme = Restriction.__dict__[enzyme]
if all([c in "ATGC" for c in biopython_enzyme.site]):
pattern = biopython_enzyme.site
else:
self.biopython_enzyme = biopython_enzyme
else:
raise ValueError("Unknown enzyme: %s" % enzyme)
self.enzyme = enzyme
self.pattern = pattern
self.is_regex = is_regex
self.with_revcomp = with_revcomp
if self.with_revcomp and self.pattern:
self.rev_pattern = reverse_complement(pattern)
def __call__(self, sequence):
if self.biopython_enzyme is not None:
return self.biopython_enzyme.search(sequence) == []
if self.is_regex:
cm_pattern = re.compile(self.pattern)
if cm_pattern.search(sequence) is not None:
if self.with_revcomp:
sequence_rev = reverse_complement(sequence)
return cm_pattern.search(sequence_rev) is not None
else:
return True
else:
return False
else:
if self.pattern not in sequence:
if self.with_revcomp:
return self.rev_pattern not in sequence
else:
return True
else:
return False
def __repr__(self):
return "No pattern '%s'" % (self.pattern)
class SequenceLengthConstraint:
def __init__(self, min_length=0, max_length=None):
self.min_length = min_length
self.max_length = max_length
def __call__(self, sequence):
L = len(sequence)
upper_bound = self.max_length if self.max_length is not None else L
return self.min_length <= L <= upper_bound
def __str__(self):
left_side = "" if (self.min_length == 0) else ("%d < " % self.min_length)
right_side = "" if (self.max_length is None) else (" < %d" % self.max_length)
return left_side + "length" + right_side
class GcContentConstraint:
def __init__(self, min_gc=0, max_gc=1.0, memoize=False):
self.min_gc = min_gc
self.max_gc = max_gc
self.memoize = True
self.memoization_dict = {}
def __call__(self, sequence):
if self.memoize:
if sequence not in self.memoization_dict:
result = self.min_gc <= gc_content(sequence) <= self.max_gc
self.memoization_dict[sequence] = result
return self.memoization_dict[sequence]
return self.min_gc <= gc_content(sequence) <= self.max_gc
def __str__(self):
left_side = (
"" if (self.min_gc == 0) else ("%.01f" % (self.min_gc * 100) + "% < ")
)
right_side = (
"" if (self.max_gc == 1) else (" < %.01f" % (self.max_gc * 100) + "%")
)
return left_side + "GC" + right_side
```
#### File: DnaSupplier/builtin_suppliers/PartsLibrary.py
```python
from Bio import SeqIO
from ...DnaQuote import DnaQuote
from ..DnaSupplier import DnaSupplier
class PartsLibrary(DnaSupplier):
"""Class for collections of ready-to-assemble parts.
This class is admittedly under-developed and could be expanded-subclassed
to accommodate the different kinds of registries etc.
"""
class_description = "Parts Library"
operation_type = "library"
report_fa_symbol = u""
report_fa_symbol_plain = "book"
report_color = "#feeefe"
collections_by_id = {}
library_classes = {}
def __init__(
self,
name,
parts_dict=None,
fasta_file=None,
memoize=False,
price_per_part=0,
lead_time=0,
sequence_constraints=(),
):
self.name = name
self.price_per_part = price_per_part
self.lead_time = lead_time
self.sequence_constraints = sequence_constraints
if fasta_file is not None:
parts_dict = {
record.id: str(record.seq).upper()
for record in SeqIO.parse(fasta_file, "fasta")
}
self.parts_dict = parts_dict
self.inverted_parts_dict = {v: k for k, v in parts_dict.items()}
self.sequences_set = set(self.inverted_parts_dict)
self.memoize = memoize
self.memoize_dict = {}
def get_best_price(
self, sequence, max_lead_time=None, with_assembly_plan=False,
):
"""Returns a price-optimal DnaQuote for the given sequence.
Parameters
----------
sequence (str)
The sequence submitted to the Dna Source for a quote.
max_lead_time (float)
If provided, the quote returned is the best quote (price-wise) whose
lead time is less or equal to max_lead_time.
with_assembly_plan
If True, the assembly plan is added to the quote.
"""
sequence = self.preprocess_sequence(sequence)
if sequence in self.sequences_set:
part_name = self.inverted_parts_dict[sequence]
return DnaQuote(
self,
sequence,
accepted=True,
price=self.price_per_part,
lead_time=self.lead_time,
message="Part: " + part_name,
metadata={"part_name": part_name},
)
return DnaQuote(
self, sequence, accepted=False, message="Sequence not in the library",
)
def preprocess_sequence(self, sequence):
"""Can be used by subclasses e.g. to anonymize wildcard nucleotides"""
return sequence
def additional_dict_description(self):
return {"flanks length": self.flanks_length}
@classmethod
def from_dict(cls, data):
parameters = cls.collections_by_id[data["collection"]]
library_class = parameters.pop("library_class")
if library_class in cls.library_classes:
library_class = cls.library_classes[library_class]
def get(param, default):
return data.get(param, parameters.get(param, default))
return library_class(
name=get("name", "library"),
price_per_part=get("price_per_part", 0),
lead_time=get("lead_time", 0),
parts_dict=get("parts_dict", None),
fasta_file=get("fasta_file", None),
memoize=get("memoize", None),
)
class GoldenGatePartsLibrary(PartsLibrary):
"""Library of parts for Golden Gate Assembly."""
class_description = "Golden Gate parts library"
def __init__(
self,
name,
parts_dict=None,
fasta_file=None,
price_per_part=0,
lead_time=0,
flanks_length=7,
memoize=False,
sequence_constraints=(),
):
PartsLibrary.__init__(
self,
name,
parts_dict=parts_dict,
fasta_file=fasta_file,
memoize=memoize,
sequence_constraints=sequence_constraints,
)
self.flanks_length = flanks_length
def suggest_cuts(self, sequence):
suggested_cuts = []
# + 2 is because the cut falls in the middle of the 4bp linker:
flank = self.flanks_length
for part, part_sequence in self.parts_dict.items():
segment = part_sequence[flank:-flank]
i = sequence.find(segment)
if i != -1:
suggested_cuts += [i + 2, i + len(segment) - 2]
return sorted(list(set(suggested_cuts)))
def suggest_segments(self, sequence):
suggested_segments = []
# + 2 is because the cut falls in the middle of the 4bp linker:
flank = self.flanks_length
for part, part_sequence in self.parts_dict.items():
segment = part_sequence[flank:-flank]
i = sequence.find(segment)
if i != -1:
L = len(segment)
suggested_segments.append(((i + 2, i + L - 2), part))
return sorted(set(suggested_segments))
@classmethod
def preprocess_sequence(cls, sequence):
"""Can be used by subclasses e.g. to anonymize wildcard nucleotides"""
return sequence[:6] + "N" + sequence[7:-7] + "N" + sequence[-6:]
def additional_dict_description(self):
return {
"class": "Golden Gate parts library",
"operation_type": "library",
"flanks length": self.flanks_length,
}
PartsLibrary.library_classes.update(
{"library": PartsLibrary, "golden_gate": GoldenGatePartsLibrary}
)
```
#### File: DnaSupplier/mixins/SupplyGraphNetwork.py
```python
class SupplyNetworkMixin:
def compute_supply_graph(self):
"""Return elements to plot the supply graph underlying this DnaSupplier.
Returns
-------
edges
A list [(s1,s2), (s1,s3), (s2, s5)...] of couples of DnaSuppliers in a
supplier-supplied relationship.
levels
A list of lists [[s1,s2], [s4,s8,s9]...] of sources. The first
sublist (first level) are all sources at the farthest distance from
the current source in the supply graph, and the last sublist contains
only the current DnaSupplier.
"""
source_max_level = {}
edges = []
def rec(source, depth, seen_sources):
if source in seen_sources:
return
if source not in source_max_level:
source_max_level[source] = depth
else:
source_max_level[source] = max(source_max_level[source], depth)
new_seen_sources = seen_sources + [source]
if hasattr(source, "suppliers"):
for other in source.suppliers:
edges.append((other, source))
rec(other, depth + 1, new_seen_sources)
elif hasattr(source, "supplier"):
edges.append((source.supplier, source))
rec(source.supplier, depth + 1, new_seen_sources)
if hasattr(source, "primers_supplier"):
edges.append((source.primers_supplier, source))
rec(source.primers_supplier, depth + 1, new_seen_sources)
rec(self, depth=0, seen_sources=[])
levels = [
[source for source, level in source_max_level.items() if level == i]
for i in range(max(source_max_level.values()) + 1)
][::-1]
return edges, levels
def dict_supply_graph(self):
sources = {}
def rec(source, depth=0):
if source in sources:
return
if hasattr(source, "is_ghost_source") and source != self:
return
sources[source.name] = source.dict_description()
sources[source.name]["_depth"] = depth
providers = sources[source.name]["providers"] = []
if hasattr(source, "suppliers"):
for other in source.suppliers:
providers.append(other.name)
rec(other, depth + 1)
# if hasattr(source, "dna_supplier"):
# providers.append(source.dna_supplier.name)
# rec(source.dna_supplier, depth + 1)
# if hasattr(source, "primers_supplier"):
# providers.append(source.primers_supplier.name)
# rec(source.primers_supplier, depth + 1)
# if hasattr(source, "dna_suppliers"):
# for other in source.dna_suppliers:
# providers.append(other.name)
# rec(other, depth + 1)
rec(self)
return sources
```
#### File: dnaweaver/SegmentSelector/FixedSizeSegmentSelector.py
```python
from .SegmentSelector import SegmentSelector
class FixedSizeSegmentSelector(SegmentSelector):
"""Selects segments of a constant size.
Great for methods involving large homology regions where melting
temperature matters less.
"""
def __init__(self, segment_size=100, left_addition="", right_addition=""):
self.segment_size = segment_size
self.left_addition = left_addition
self.right_addition = right_addition
def compute_segment_location(self, sequence, index):
return self.get_segment_coordinates(index, self.segment_size, len(sequence))
@property
def max_homology_size(self):
return self.segment_size
def __str__(self):
result = "FixedSize(%dbp)" % self.segment_size
if self.left_addition:
result = ("...%s-" % self.left_addition[-12:]) + result
if self.right_addition:
result = result + ("-%s..." % self.right_addition[:12])
return result
```
#### File: manuscript_examples/a_star_factor_comparison/generate_supply_network.py
```python
import os
from dnaweaver.biotools import gc_content
import dnaweaver as dw
def generate_supply_network(a_star_factor):
oligo_com = dw.CommercialDnaOffer(
name="Oligo.com",
sequence_constraints=[dw.SequenceLengthConstraint(max_length=200)],
pricing=dw.PerBasepairPricing(0.10),
lead_time=7,
)
deluxe_dna_com = dw.CommercialDnaOffer(
name="DeluxeDNA.com",
sequence_constraints=[dw.SequenceLengthConstraint(max_length=4000)],
pricing=dw.PerBasepairPricing(0.20),
lead_time=10,
)
cheap_dna_com = dw.CommercialDnaOffer(
name="CheapDNA.com",
sequence_constraints=[
dw.SequenceLengthConstraint(max_length=4000),
dw.NoPatternConstraint(enzyme="AarI"),
dw.NoPatternConstraint(enzyme="BsaI"),
lambda seq: (0.4 < gc_content(seq) < 0.6),
],
pricing=dw.PerBasepairPricing(0.10),
lead_time=15,
)
# OLIGOS TO BLOCKS ASSEMBLY
oligo_assembly_station = dw.DnaAssemblyStation(
name="Oligo Assembly Station",
assembly_method=dw.OligoAssemblyMethod(
overhang_selector=dw.TmSegmentSelector(
min_size=15, max_size=25, min_tm=50, max_tm=70
),
min_segment_length=40,
max_segment_length=200,
sequence_constraints=[dw.SequenceLengthConstraint(max_length=1500)],
duration=8,
cost=2,
),
supplier=oligo_com,
coarse_grain=20,
fine_grain=False,
a_star_factor=a_star_factor,
)
# BLOCKS TO CHUNKS ASSEMBLY
blocks_sources_comparator = dw.DnaSuppliersComparator(
name="bs_comparator",
suppliers=[oligo_assembly_station, cheap_dna_com, deluxe_dna_com],
memoize=True,
)
gibson_blocks_assembly_station = dw.DnaAssemblyStation(
name="Gibson Blocks Assembly",
assembly_method=dw.GibsonAssemblyMethod(
overhang_selector=dw.FixedSizeSegmentSelector(80),
min_segment_length=1000,
max_segment_length=4000,
duration=8,
cost=16,
),
supplier=blocks_sources_comparator,
coarse_grain=300,
fine_grain=False,
memoize=True,
a_star_factor=a_star_factor,
)
goldengate_blocks_assembly_station = dw.DnaAssemblyStation(
name="Golden Gate Blocks Assembly",
assembly_method=dw.GoldenGateAssemblyMethod(
enzyme="BsmBI",
wildcard_basepair="A",
min_segment_length=1000,
max_segment_length=4000,
duration=5,
cost=6,
),
supplier=blocks_sources_comparator,
coarse_grain=400,
fine_grain=False,
memoize=True,
a_star_factor=a_star_factor,
)
ecoli_genome_path = os.path.join(
"..", "..", "data", "ecoli_blast_db", "ecoli"
)
ecoli_genome = dw.PcrExtractionStation(
"E. coli Genome (PCR)",
primers_supplier=oligo_com,
homology_selector=dw.TmSegmentSelector(),
blast_database=ecoli_genome_path,
max_amplicon_length=10000,
extra_time=3,
extra_cost=1,
)
# CHUNKS TO MEGACHUNKS ASSEMBLY
return dw.DnaAssemblyStation(
name="Chunks assembly (Yeast)",
assembly_method=dw.GibsonAssemblyMethod(
overhang_selector=dw.FixedSizeSegmentSelector(300),
min_segment_length=7000,
max_segment_length=25000,
duration=8,
),
supplier=[
ecoli_genome,
goldengate_blocks_assembly_station,
gibson_blocks_assembly_station,
],
coarse_grain=1000,
fine_grain=None,
a_star_factor=a_star_factor,
memoize=True,
)
```
#### File: DnaWeaver/tests/test_full_report.py
```python
import matplotlib
matplotlib.use("Agg")
import os
from dnaweaver import (
PcrExtractionStation,
CommercialDnaOffer,
DnaAssemblyStation,
GibsonAssemblyMethod,
GoldenGateAssemblyMethod,
OligoAssemblyMethod,
DnaSuppliersComparator,
TmSegmentSelector,
FixedSizeSegmentSelector,
PerBasepairPricing,
NoPatternConstraint,
SequenceLengthConstraint,
)
from dnaweaver.biotools import gc_content
SEQUENCE_PATH = os.path.join("tests", "data", "full_example_50kb_sequence.txt")
ECOLI_DB_PATH = os.path.join("tests", "data", "ecoli_blast_db", "ecoli")
def test_full_report():
# OLIGO COMPANIES
a_star_factor = "auto"
memoize = True
oligo_com = CommercialDnaOffer(
name="Oligo.com",
sequence_constraints=[SequenceLengthConstraint(max_length=200)],
pricing=PerBasepairPricing(0.10),
lead_time=7,
)
deluxe_dna_com = CommercialDnaOffer(
name="DeluxeDNA.com",
sequence_constraints=[SequenceLengthConstraint(max_length=4000)],
pricing=PerBasepairPricing(0.20),
lead_time=10,
)
cheap_dna_com = CommercialDnaOffer(
name="CheapDNA.com",
sequence_constraints=[
SequenceLengthConstraint(max_length=4000),
NoPatternConstraint(enzyme="AarI"),
NoPatternConstraint(enzyme="BsaI"),
lambda seq: (0.4 < gc_content(seq) < 0.6),
],
pricing=PerBasepairPricing(0.10),
lead_time=15,
)
# OLIGOS TO BLOCKS ASSEMBLY
oligo_assembly_station = DnaAssemblyStation(
name="Oligo Assembly Station",
assembly_method=OligoAssemblyMethod(
overhang_selector=TmSegmentSelector(
min_size=15, max_size=25, min_tm=50, max_tm=70
),
min_segment_length=40,
max_segment_length=200,
sequence_constraints=[SequenceLengthConstraint(max_length=1500)],
duration=8,
cost=2,
),
supplier=oligo_com,
coarse_grain=20,
fine_grain=False,
a_star_factor=a_star_factor,
)
# BLOCKS TO CHUNKS ASSEMBLY
blocks_sources_comparator = DnaSuppliersComparator(
name="bs_comparator",
suppliers=[oligo_assembly_station, cheap_dna_com, deluxe_dna_com],
memoize=memoize,
)
gibson_blocks_assembly_station = DnaAssemblyStation(
name="Gibson Blocks Assembly",
assembly_method=GibsonAssemblyMethod(
overhang_selector=FixedSizeSegmentSelector(80),
min_segment_length=1000,
max_segment_length=4000,
duration=8,
cost=16,
),
supplier=blocks_sources_comparator,
coarse_grain=300,
fine_grain=False,
memoize=memoize,
a_star_factor=a_star_factor,
)
goldengate_blocks_assembly_station = DnaAssemblyStation(
name="Golden Gate Blocks Assembly",
assembly_method=GoldenGateAssemblyMethod(
enzyme="BsmBI",
wildcard_basepair="A",
min_segment_length=1000,
max_segment_length=4000,
duration=5,
cost=6,
),
supplier=blocks_sources_comparator,
coarse_grain=400,
fine_grain=False,
memoize=memoize,
a_star_factor=a_star_factor,
)
ecoli_genome = PcrExtractionStation(
"E. coli Genome (PCR)",
primers_supplier=oligo_com,
homology_selector=TmSegmentSelector(
min_size=18, max_size=22, min_tm=55, max_tm=65
),
blast_database=ECOLI_DB_PATH,
max_amplicon_length=10000,
extra_time=3,
extra_cost=1,
)
# CHUNKS TO MEGACHUNKS ASSEMBLY
chunks_assembly_station = DnaAssemblyStation(
name="Chunks assembly (Gibson)",
assembly_method=GibsonAssemblyMethod(
overhang_selector=FixedSizeSegmentSelector(300),
min_segment_length=7000,
max_segment_length=25000,
duration=8,
),
supplier=DnaSuppliersComparator(
[
ecoli_genome,
goldengate_blocks_assembly_station,
gibson_blocks_assembly_station,
]
),
coarse_grain=1000,
fine_grain=None,
a_star_factor=a_star_factor,
memoize=memoize,
)
with open(SEQUENCE_PATH, "r") as f:
sequence = f.read()
import time
t0 = time.time()
chunks_assembly_station.prepare_network_on_sequence(sequence)
quote = chunks_assembly_station.get_quote(
sequence, with_assembly_plan=True
)
t1 = time.time()
print("ELAPSED:", "%.02f" % (t1 - t0))
if quote.accepted:
print(quote.assembly_step_summary())
assert 3500 < quote.price < 3600
report = quote.to_assembly_plan_report()
report.write_full_report("@memory")
# report.plot_assembly_timeline(
# deadline=None,
# ax=None,
# rectangle_color="#bbbbff",
# scale=1.0,
# )
``` |
{
"source": "jlerman44/escher",
"score": 2
} |
#### File: escher/escher/plots.py
```python
from escher.quick_server import serve_and_open
from escher import urls
import os
from os.path import dirname, abspath, join, isfile, isdir
from warnings import warn
from urllib2 import urlopen, HTTPError, URLError
import json
import shutil
import appdirs
import re
from jinja2 import Environment, PackageLoader, Template
import codecs
import random
import string
# set up jinja2 template location
env = Environment(loader=PackageLoader('escher', 'templates'))
def get_cache_dir(name=None):
""" Get the cache dir as a string.
name: an optional subdirectory within the cache
"""
cache_dir = join(appdirs.user_cache_dir('escher', appauthor="<NAME>"))
if name is not None:
cache_dir = join(cache_dir, name)
try:
os.makedirs(cache_dir)
except OSError:
pass
return cache_dir
def clear_cache():
"""Empty the contents of the cache directory."""
cache_dir = get_cache_dir()
for root, dirs, files in os.walk(cache_dir):
for f in files:
os.unlink(join(root, f))
for d in dirs:
shutil.rmtree(join(root, d))
def list_cached_maps():
"""Return a list of all cached maps."""
try:
return [x.replace('.json', '') for x in os.listdir(get_cache_dir(name='maps'))]
except OSError:
print 'No cached maps'
return None
def list_cached_models():
"""Return a list of all cached models."""
try:
return [x.replace('.json', '') for x in os.listdir(get_cache_dir(name='models'))]
except OSError:
print 'No cached maps'
return None
def get_an_id():
return unicode(''.join(random.choice(string.ascii_lowercase)
for _ in range(10)))
def load_resource(resource, name, safe=False):
"""Load a resource that could be a file, URL, or json string."""
# if it's a url, download it
if resource.startswith('http://') or resource.startswith('https://'):
try:
download = urlopen(resource)
except URLError as err:
raise err
else:
return download.read()
# if it's a filepath, load it
if os.path.exists(resource):
if (safe):
raise Exception('Cannot load resource from file with safe mode enabled.')
try:
with open(resource, 'r') as f:
loaded_resource = f.read()
_ = json.loads(loaded_resource)
except ValueError as err:
raise ValueError('%s not a valid json file' % name)
else:
return loaded_resource
# try to validate the json
try:
_ = json.loads(resource)
except ValueError as err:
raise ValueError('Could not load %s. Not valid json, url, or filepath' % name)
else:
return resource
raise Exception('Could not load %s.' % name)
class Builder(object):
"""Viewable metabolic map.
This map will also show metabolic fluxes passed in during consruction. It
can be viewed as a standalone html inside a browswer. Alternately, the
respresentation inside an IPython notebook will also display the map.
Maps are stored in json files and are stored in a cache directory. Maps
which are not found will be downloaded from a map repository if found.
Arguments
---------
map_name: a string specifying a map to be downloaded from the Escher web server.
map_json: a json string, or a file path to a json file, or a URL specifying
a json file to be downloaded.
model_name: a string specifying a model to be downloaded from the Escher web
server.
model_json: a json string, or a file path to a json file, or a URL
specifying a json file to be downloaded.
reaction_data: a dictionary with keys that correspond to reaction ids
and values that will be mapped to reaction arrows and labels.
reaction_data: a dictionary with keys that correspond to metabolite ids and
values that will be mapped to metabolite nodes and labels.
local_host: a hostname that will be used for any local files in dev
mode. Defaults to the current host.
safe: if True, then loading files from the filesytem is not allowed. This is
to ensure the safety of using Builder with a web server.
"""
def __init__(self, map_name=None, map_json=None, model_name=None,
model_json=None, reaction_data=None, metabolite_data=None,
local_host='', safe=False):
self.safe = safe
# load the map
self.map_name = map_name
self.map_json = map_json
self.loaded_map_json = None
if map_name and map_json:
warn('map_json overrides map_name')
self.load_map()
# load the model
self.model_name = model_name
self.model_json = model_json
self.loaded_model_json = None
if model_name and model_json:
warn('model_json overrides model_name')
self.load_model()
# set the args
self.reaction_data = reaction_data
self.metabolite_data = metabolite_data
self.local_host = local_host.strip(os.sep)
# make the unique id
self.generate_id()
def generate_id(self):
self.the_id = get_an_id()
def load_model(self):
"""Load the model from input model_json using load_resource, or, secondarily,
from model_name.
"""
model_json = self.model_json
if model_json is not None:
self.loaded_model_json = load_resource(self.model_json,
'model_json',
safe=self.safe)
elif self.model_name is not None:
# get the name
model_name = self.model_name
model_name = model_name.replace(".json", "")
# if the file is not present attempt to download
cache_dir = get_cache_dir(name='models')
model_filename = join(cache_dir, model_name + ".json")
if not isfile(model_filename):
model_not_cached = 'Model "%s" not in cache. Attempting download from %s' % \
(model_name, urls.escher_home)
warn(model_not_cached)
try:
url = urls.model_download + model_name + ".json"
download = urlopen(url)
with open(model_filename, "w") as outfile:
outfile.write(download.read())
except HTTPError:
raise ValueError("No model named %s found in cache or at %s" % \
(model_name, url))
with open(model_filename) as f:
self.loaded_model_json = f.read()
def load_map(self):
"""Load the map from input map_json using load_resource, or, secondarily,
from map_name.
"""
map_json = self.map_json
if map_json is not None:
self.loaded_map_json = load_resource(self.map_json,
'map_json',
safe=self.safe)
elif self.map_name is not None:
# get the name
map_name = self.map_name
map_name = map_name.replace(".json", "")
# if the file is not present attempt to download
cache_dir = get_cache_dir(name='maps')
map_filename = join(cache_dir, map_name + ".json")
if not isfile(map_filename):
map_not_cached = 'Map "%s" not in cache. Attempting download from %s' % \
(map_name, urls.escher_home)
warn(map_not_cached)
try:
url = urls.map_download + map_name + ".json"
download = urlopen(url)
with open(map_filename, "w") as outfile:
outfile.write(download.read())
except HTTPError:
raise ValueError("No map named %s found in cache or at %s" % \
(map_name, url))
with open(map_filename) as f:
self.loaded_map_json = f.read()
def _embedded_css(self, is_local):
loc = (join(self.local_host, urls.builder_embed_css_local) if is_local else
urls.builder_embed_css)
download = urlopen(urls.builder_embed_css)
return unicode(download.read().replace('\n', ' '))
def _initialize_javascript(self, is_local):
javascript = (u"var map_data_{the_id} = {map_data};"
u"var cobra_model_{the_id} = {cobra_model};"
u"var reaction_data_{the_id} = {reaction_data};"
u"var metabolite_data_{the_id} = {metabolite_data};"
u"var css_string_{the_id} = '{style}';").format(
the_id=self.the_id,
map_data=(self.loaded_map_json if self.loaded_map_json else
u'null'),
cobra_model=(self.loaded_model_json if self.loaded_model_json else
u'null'),
reaction_data=(json.dumps(self.reaction_data) if self.reaction_data else
u'null'),
metabolite_data=(json.dumps(self.metabolite_data) if self.metabolite_data else
u'null'),
style=self._embedded_css(is_local))
return javascript
def _draw_js(self, the_id, enable_editing, menu, enable_keys, dev,
fill_screen, scroll_behavior):
draw = (u"Builder({{ selection: d3.select('#{the_id}'),"
u"enable_editing: {enable_editing},"
u"menu: {menu},"
u"enable_keys: {enable_keys},"
u"scroll_behavior: {scroll_behavior},"
u"fill_screen: {fill_screen},"
u"map: map_data_{the_id},"
u"cobra_model: cobra_model_{the_id},"
u"reaction_data: reaction_data_{the_id},"
u"metabolite_data: metabolite_data_{the_id},"
u"css: css_string_{the_id} }});").format(
the_id=the_id,
enable_editing=json.dumps(enable_editing),
menu=json.dumps(menu),
enable_keys=json.dumps(enable_keys),
scroll_behavior=json.dumps(scroll_behavior),
fill_screen=json.dumps(fill_screen))
if not dev:
draw = u'escher.%s' % draw
return draw
def _get_html(self, js_source='web', menu='none', scroll_behavior='pan',
html_wrapper=False, enable_editing=False, enable_keys=False,
minified_js=True, fill_screen=False, height='800px'):
"""Generate the Escher HTML.
Arguments
--------
js_source: Can be one of the following:
'web' - (Default) use js files from zakandrewking.github.io/escher.
'local' - use compiled js files in the local escher installation. Works offline.
'dev' - use the local, uncompiled development files. Works offline.
menu: Menu bar options include:
'none' - (Default) No menu or buttons.
'zoom' - Just zoom buttons (does not require bootstrap).
'all' - Menu and button bar (requires bootstrap).
scroll_behavior: Scroll behavior options:
'pan' - (Default) Pan the map.
'zoom' - Zoom the map.
'none' - No scroll events.
minified_js: If True, use the minified version of js files. If
js_source is 'dev', then this option is ignored.
html_wrapper: If True, return a standalone html file.
enable_editing: Enable the editing modes (build, rotate, etc.).
enable_keys: Enable keyboard shortcuts.
height: The height of the HTML container.
"""
if js_source not in ['web', 'local', 'dev']:
raise Exception('Bad value for js_source: %s' % js_source)
if menu not in ['none', 'zoom', 'all']:
raise Exception('Bad value for menu: %s' % menu)
if scroll_behavior not in ['pan', 'zoom', 'none']:
raise Exception('Bad value for scroll_behavior: %s' % scroll_behavior)
content = env.get_template('content.html')
# if height is not a string
if type(height) is int:
height = u"%dpx" % height
elif type(height) is float:
height = u"%fpx" % height
elif type(height) is str:
height = unicode(height)
# set the proper urls
is_local = js_source=='local' or js_source=='dev'
is_dev = js_source=='dev'
d3_url = (join(self.local_host, urls.d3_local) if is_local else
urls.d3)
escher_url = ("" if js_source=='dev' else
(join(self.local_host, urls.escher_min_local) if is_local and minified_js else
(join(self.local_host, urls.escher_local) if is_local else
(urls.escher_min if minified_js else
urls.escher))))
jquery_url = ("" if not menu=='all' else
(join(self.local_host, urls.jquery_local) if is_local else
urls.jquery))
boot_css_url = ("" if not menu=='all' else
(join(self.local_host, urls.boot_css_local) if is_local else
urls.boot_css))
boot_js_url = ("" if not menu=='all' else
(join(self.local_host, urls.boot_js_local) if is_local else
urls.boot_js))
require_js_url = (urls.require_js_local if is_local else
urls.require_js)
html = content.render(require_js=require_js_url,
id=self.the_id,
height=height,
escher_css=(join(self.local_host, urls.builder_css_local) if is_local else
urls.builder_css),
dev=is_dev,
d3=d3_url,
escher=escher_url,
jquery=jquery_url,
boot_css=boot_css_url,
boot_js=boot_js_url,
wrapper=html_wrapper,
host=self.local_host,
initialize_js=self._initialize_javascript(is_local),
draw_js=self._draw_js(self.the_id, enable_editing,
menu, enable_keys,
is_dev, fill_screen, scroll_behavior),)
return html
def display_in_notebook(self, js_source='web', menu='zoom', scroll_behavior='none',
enable_editing=False, enable_keys=False, minified_js=True,
height=500):
"""Display the plot in the notebook.
Arguments
--------
js_source: Can be one of the following:
'web' (Default) - use js files from zakandrewking.github.io/escher.
'local' - use compiled js files in the local escher installation. Works offline.
'dev' - use the local, uncompiled development files. Works offline.
menu: Menu bar options include:
'none' - No menu or buttons.
'zoom' - Just zoom buttons.
Note: The 'all' menu option does not work in an IPython notebook.
scroll_behavior: Scroll behavior options:
'pan' - Pan the map.
'zoom' - Zoom the map.
'none' - (Default) No scroll events.
enable_editing: Enable the editing modes (build, rotate, etc.).
enable_keys: Enable keyboard shortcuts.
minified_js: If True, use the minified version of js files. If js_source
is 'dev', then this option is ignored.
height: Height of the HTML container.
"""
html = self._get_html(js_source=js_source, menu=menu, scroll_behavior=scroll_behavior,
html_wrapper=False, enable_editing=enable_editing, enable_keys=enable_keys,
minified_js=minified_js, fill_screen=False, height=height)
if menu=='all':
raise Exception("The 'all' menu option cannot be used in an IPython notebook.")
# import here, in case users don't have requirements installed
from IPython.display import HTML
return HTML(html)
def display_in_browser(self, ip='127.0.0.1', port=7655, n_retries=50, js_source='web',
menu='all', scroll_behavior='pan', enable_editing=True, enable_keys=True,
minified_js=True):
"""Launch a web browser to view the map.
Arguments
--------
js_source: Can be one of the following:
'web' - use js files from zakandrewking.github.io/escher.
'local' - use compiled js files in the local escher installation. Works offline.
'dev' - use the local, uncompiled development files. Works offline.
menu: Menu bar options include:
'none' - No menu or buttons.
'zoom' - Just zoom buttons (does not require bootstrap).
'all' - Menu and button bar (requires bootstrap).
scroll_behavior: Scroll behavior options:
'pan' - (Default) Pan the map.
'zoom' - Zoom the map.
'none' - No scroll events.
enable_editing: Enable the editing modes (build, rotate, etc.).
enable_keys: Enable keyboard shortcuts.
minified_js: If True, use the minified version of js files. If js_source
is 'dev', then this option is ignored.
height: Height of the HTML container.
"""
html = self._get_html(js_source=js_source, menu=menu, scroll_behavior=scroll_behavior,
html_wrapper=True, enable_editing=enable_editing, enable_keys=enable_keys,
minified_js=minified_js, fill_screen=True, height="100%")
serve_and_open(html, ip=ip, port=port, n_retries=n_retries)
def save_html(self, filepath=None, js_source='web', menu='all', scroll_behavior='pan',
enable_editing=True, enable_keys=True, minified_js=True):
"""Save an HTML file containing the map.
Arguments
--------
js_source: Can be one of the following:
'web' - use js files from zakandrewking.github.io/escher.
'local' - use compiled js files in the local escher installation. Works offline.
'dev' - use the local, uncompiled development files. Works offline.
menu: Menu bar options include:
'none' - No menu or buttons.
'zoom' - Just zoom buttons (does not require bootstrap).
'all' - Menu and button bar (requires bootstrap).
scroll_behavior: Scroll behavior options:
'pan' - (Default) Pan the map.
'zoom' - Zoom the map.
'none' - No scroll events.
enable_editing: Enable the editing modes (build, rotate, etc.).
enable_keys: Enable keyboard shortcuts.
minified_js: If True, use the minified version of js files. If js_source
is 'dev', then this option is ignored.
height: Height of the HTML container.
"""
html = self._get_html(js_source=js_source, menu=menu, scroll_behavior=scroll_behavior,
html_wrapper=True, enable_editing=enable_editing, enable_keys=enable_keys,
minified_js=minified_js, fill_screen=True, height="100%")
if filepath is not None:
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(html)
return filepath
else:
from tempfile import mkstemp
from os import write, close
os_file, filename = mkstemp(suffix=".html")
write(os_file, unicode(html).encode('utf-8'))
close(os_file)
return filename
```
#### File: escher/escher/server.py
```python
from escher.ko_server import koHandler
from escher.plots import Builder
import os, subprocess
from os.path import join
import tornado.ioloop
from tornado.web import RequestHandler, asynchronous, HTTPError, Application
from tornado.httpclient import AsyncHTTPClient
from tornado import gen
import tornado.escape
from tornado.options import define, options, parse_command_line
import json
import re
from jinja2 import Environment, PackageLoader
from mimetypes import guess_type
# set up jinja2 template location
env = Environment(loader=PackageLoader('escher', 'templates'))
# set directory to server
directory = os.path.abspath(os.path.dirname(__file__)).strip(os.pathsep)
directory = re.sub(r'escher$', '', directory)
NO_CACHE = True
PORT = 7778
PUBLIC = False
def run(port=PORT, public=PUBLIC):
global PORT
global PUBLIC
PORT = port
PUBLIC = public
print 'serving directory %s on port %d' % (directory, PORT)
application.listen(port, None if public else "localhost")
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print "bye!"
def stop():
tornado.ioloop.IOLoop.instance().stop()
class BaseHandler(RequestHandler):
def serve_path(self, path):
# make sure the path exists
if not os.path.isfile(path):
raise HTTPError(404)
# serve it
with open(path, "rb") as file:
data = file.read()
# set the mimetype
self.set_header("Content-Type", guess_type(path, strict=False)[0])
self.serve(data)
def serve(self, data):
if (NO_CACHE):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
self.write(data)
self.finish()
class IndexHandler(BaseHandler):
def get(self):
template = env.get_template('index.html')
data = template.render()
self.set_header("Content-Type", "text/html")
self.serve(data)
class BuilderHandler(BaseHandler):
@asynchronous
@gen.engine
def get(self, dev_path, offline_path, kind, path):
# builder vs. viewer & dev vs. not dev
js_source = ('dev' if (dev_path is not None) else
('local' if (offline_path is not None) else
'web'))
enable_editing = (kind=='builder')
# Builder options
builder_kwargs = {}
for a in ['starting_reaction', 'model_name', 'map_name', 'map_json']:
args = self.get_arguments(a)
if len(args)==1:
builder_kwargs[a] = args[0]
# make the builder
builder = Builder(safe=True, **builder_kwargs)
# display options
display_kwargs = {'minified_js': True,
'scroll_behavior': 'pan',
'menu': 'all'}
# keyword
for a in ['menu', 'scroll_behavior', 'minified_js']:
args = self.get_arguments(a)
if len(args)==1:
display_kwargs[a] = args[0]
# get the html
html = builder._get_html(js_source=js_source, enable_editing=enable_editing,
enable_keys=True, html_wrapper=True, fill_screen=True,
height='100%', **display_kwargs)
self.set_header("Content-Type", "text/html")
self.serve(html)
class LibHandler(BaseHandler):
def get(self, path):
full_path = join(directory, 'escher', 'lib', path)
if os.path.isfile(full_path):
path = full_path
else:
raise HTTPError(404)
self.serve_path(path)
class StaticHandler(BaseHandler):
def get(self, path):
path = join(directory, 'escher', path)
print 'getting path %s' % path
self.serve_path(path)
settings = {"debug": "False"}
application = Application([
(r".*/knockout-map/(.*)", koHandler),
(r".*/lib/(.*)", LibHandler),
(r".*/(fonts/.*)", LibHandler),
(r".*/(js/.*)", StaticHandler),
(r".*/(css/.*)", StaticHandler),
(r".*/(resources/.*)", StaticHandler),
(r"/(dev/)?(offline/)?(builder|viewer)(.*)", BuilderHandler),
(r".*/(map_spec.json)", StaticHandler),
(r".*/(escher[^/]+js)", LibHandler),
(r"/", IndexHandler),
], **settings)
if __name__ == "__main__":
# define port
define("port", default=PORT, type=int, help="Port to serve on.")
define("public", default=PUBLIC, type=bool,
help=("If False, listen only on localhost. If True, listen on "
"all available addresses."))
parse_command_line()
run(port=options.port, public=options.public)
``` |
{
"source": "jlesage/TygerCaddy",
"score": 2
} |
#### File: TygerCaddy/hosts/caddyfile.py
```python
import subprocess
from config.models import Config
from django.conf import settings
from django.contrib.auth.models import User
from dns.models import EVariables
from proxies.models import Header
from .models import Host
def reload_caddy():
subprocess.call('pkill -USR1 caddy', shell=True)
return True
def generate_caddyfile():
user = User.objects.get(pk=1)
project = settings.BASE_DIR
caddyfilepath = project + '/data/caddyfile.conf'
caddyfile = open(caddyfilepath, "w+")
config = Config.objects.get(pk=1)
if config.dns_provider:
dns = config.dns_provider
caddyname = dns.caddy_name
set_evariables(config=config, dns=dns)
hosts = Host.objects.all()
if hosts:
for caddyhost in hosts:
# if caddyhost.dns_verification:
# set_evariables(config=config, dns=caddyhost.dns_provider)
headerlist = Header.objects.filter(proxy_id=caddyhost.proxy.id)
block = caddyhost.host_name + ' { \n'
block += '\t root ' + caddyhost.root_path + '\n'
block += '\t\t proxy ' + caddyhost.proxy.proxy_from + ' ' + caddyhost.proxy.proxy_to + ' { \n'
print(caddyhost.proxy)
if caddyhost.proxy.load_policy:
block += '\t\t\t load_policy ' + str(caddyhost.proxy.load_policy.name) + '\n'
if caddyhost.proxy.fail_timeout:
block += '\t\t\t fail_timeout ' + str(caddyhost.proxy.fail_timeout) + '\n'
if caddyhost.proxy.max_fails:
block += '\t\t\t max_fails ' + str(caddyhost.proxy.max_fails) + '\n'
if caddyhost.proxy.max_conns:
block += '\t\t\t max_conns ' + str(caddyhost.proxy.max_conns) + '\n'
if caddyhost.proxy.try_duration:
block += '\t\t\t try_duration ' + str(caddyhost.proxy.try_duration) + '\n'
if caddyhost.proxy.try_interval:
block += '\t\t\t try_interval ' + str(caddyhost.proxy.try_interval) + '\n'
if caddyhost.proxy.health_check:
block += '\t\t\t health_check ' + str(caddyhost.proxy.health_check) + '\n'
if caddyhost.proxy.health_check_port:
block += '\t\t\t health_check_port ' + str(caddyhost.proxy.health_check_port) + '\n'
if caddyhost.proxy.health_check_interval:
block += '\t\t\t health_check_interval ' + str(caddyhost.proxy.health_check_interval) + '\n'
if caddyhost.proxy.health_check_timeout:
block += '\t\t\t health_check_timeout ' + str(caddyhost.proxy.health_check_timeout) + '\n'
if caddyhost.proxy.keep_alive:
block += '\t\t\t keep_alive ' + str(caddyhost.proxy.keep_alive) + '\n'
if caddyhost.proxy.timeout:
block += '\t\t\t timeout ' + str(caddyhost.proxy.timeout) + '\n'
if caddyhost.proxy.without:
block += '\t\t\t without ' + str(caddyhost.proxy.without) + '\n'
if caddyhost.proxy.exceptions:
block += '\t\t\t exceptions ' + str(caddyhost.proxy.exceptions) + '\n'
if caddyhost.proxy.insecure_skip_verify:
block += '\t\t\t insecure_skip_verify \n'
if caddyhost.proxy.websocket:
block += '\t\t\t websocket \n'
if caddyhost.proxy.transparent:
block += '\t\t\t transparent \n'
if headerlist:
for header in headerlist:
if header.downstream:
block += 'header_downstream ' + header.header + ' ' + header.value + '\n'
if header.upstream:
block += 'header_upstream ' + header.header + ' ' + header.value + '\n'
block += '\t\t } \n'
if caddyhost.tls == False:
block += '\ttls off \n } \n \n'
elif config.dns_challenge:
block += '\ttls ' + caddyname + '\n } \n \n'
elif caddyhost.staging:
block += '\ttls ' + user.email + ' {\n' \
'\t ca https://acme-staging-v02.api.letsencrypt.org/directory\n' \
'\t } \n' \
'} \n'
else:
block += '\ttls ' + user.email + '\n } \n \n'
caddyfile.write(block)
caddyfile.close()
generate_dash()
reload_caddy()
return True
def generate_dash():
project = settings.BASE_DIR
caddyfilepath = project + '/data/caddyfile.conf'
config = Config.objects.get(pk=1)
block = config.interface + ':' + str(config.port) + ' { \n \n' \
'proxy / ' + config.proxy_host + ' { \n' \
'transparent \n' \
'except ' + config.proxy_exception + '\n' \
'} \n \n' \
'root ' + str(config.root_dir) + '\n' \
'} \n'
caddyfile = open(caddyfilepath, "a+")
caddyfile.write(block)
caddyfile.close()
return True
def set_evariables(config, dns):
variables = EVariables.objects.filter(dns_provider_id=dns.id)
project = settings.BASE_DIR
envpath = project + '/data/dns.env'
env = open(envpath, 'w+')
for var in variables:
line = var.variable + '=' + var.value + '\n'
env.write(line)
env.close()
return True
```
#### File: TygerCaddy/proxies/views.py
```python
from django.views.generic import CreateView, ListView, UpdateView, DeleteView, DetailView
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from hosts.caddyfile import generate_caddyfile
from .models import Proxy, Header
class CreateProxy(LoginRequiredMixin, CreateView):
template_name = 'proxies/add_proxy.html'
model = Proxy
success_url = '/proxies/list'
fields = ['name',
'proxy_from',
'proxy_to',
'load_policy',
'fail_timeout',
'max_fails',
'max_conns',
'try_duration',
'try_interval',
'health_check',
'health_check_port',
'health_check_interval',
'health_check_timeout',
'keep_alive',
'timeout',
'without',
'exceptions',
'insecure_skip_verify',
'websocket',
'transparent']
def form_valid(self, form):
form.save()
generate_caddyfile()
return redirect(reverse_lazy('all-proxies'))
class ListProxies(LoginRequiredMixin, ListView):
template_name = 'proxies/all_proxies.html'
context_object_name = 'proxies'
queryset = Proxy.objects.order_by('id')
paginate_by = 10
title = 'All Proxies'
class DetailProxy(LoginRequiredMixin, DetailView):
template_name = 'proxies/proxy_detail.html'
title = 'Proxy Detail'
model = Proxy
class UpdateProxy(LoginRequiredMixin, UpdateView):
model = Proxy
fields = ['name',
'proxy_from',
'proxy_to',
'load_policy',
'fail_timeout',
'max_fails',
'max_conns',
'try_duration',
'try_interval',
'health_check',
'health_check_port',
'health_check_interval',
'health_check_timeout',
'keep_alive',
'timeout',
'without',
'exceptions',
'insecure_skip_verify',
'websocket',
'transparent']
slug_field = 'name'
success_url = reverse_lazy('all-proxies')
def form_valid(self, form):
form.save()
generate_caddyfile()
return redirect(reverse_lazy('all-proxies'))
class DeleteProxy(LoginRequiredMixin, DeleteView):
model = Proxy
title = "Delete Proxy"
success_url = reverse_lazy('all-proxies')
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
self.object.delete()
generate_caddyfile()
return HttpResponseRedirect(self.get_success_url())
class CreateHeader(LoginRequiredMixin, CreateView):
template_name = 'proxies/headers/add_header.html'
model = Header
success_url = '/proxies/headers/list'
fields = ['header',
'upstream',
'downstream',
'value',
'proxy']
def form_valid(self, form):
form.save()
generate_caddyfile()
return redirect(reverse_lazy('all-headers'))
class ListHeaders(LoginRequiredMixin, ListView):
template_name = 'proxies/headers/all_headers.html'
context_object_name = 'headers'
queryset = Header.objects.order_by('id')
paginate_by = 10
title = 'All Headers'
class DetailHeader(LoginRequiredMixin, DetailView):
template_name = 'proxies/headers/header_detail.html'
title = 'Header Detail'
model = Proxy
class UpdateHeader(LoginRequiredMixin, UpdateView):
model = Header
fields = ['header',
'upstream',
'downstream',
'value',
'proxy']
slug_field = 'header'
success_url = reverse_lazy('all-headers')
def form_valid(self, form):
form.save()
return redirect(reverse_lazy('all-headers'))
class DeleteHeader(LoginRequiredMixin, DeleteView):
model = Header
title = "Delete Header"
template_name = 'proxies/headers/header_confirm_delete.html'
success_url = reverse_lazy('all-headers')
def delete(self, request, *args, **kwargs):
"""
Calls the delete() method on the fetched object and then
redirects to the success URL.
"""
self.object = self.get_object()
self.object.delete()
generate_caddyfile()
return HttpResponseRedirect(self.get_success_url())
``` |
{
"source": "jlesquembre/rst-tables.nvim",
"score": 3
} |
#### File: rplugin/python3/rst-tables.py
```python
import re
import textwrap
import neovim
from neovim.api import NvimError
def mapl(f, x):
return list(map(f, x))
def line_has_sep(line):
"""Line has a `-` before a `=`
"""
a = line.find('-') # not header
b = line.find('=') # header
if a == -1: # No `-`
return False
elif b == -1: # No `=`, but `-`
return True
else:
return a < b
def table_has_header(chunk):
if len(chunk) == 1:
return False
try:
if line_has_sep(chunk[2]) or line_has_sep(chunk[1]):
return False
except IndexError:
return True
return True
def join_rows(rows, sep='\n'):
"""Given a list of rows (a list of lists) this function returns a
flattened list where each the individual columns of all rows are joined
together using the line separator.
"""
output = []
for row in rows:
# grow output array, if necessary
if len(output) <= len(row):
for i in range(len(row) - len(output)):
output.extend([[]])
for i, field in enumerate(row):
field_text = field.strip()
if field_text:
output[i].append(field_text)
return mapl(lambda lines: sep.join(lines), output)
def line_is_separator(line):
return re.match('^[\t +=-]+$', line)
def has_line_seps(raw_lines):
for line in raw_lines:
if line_is_separator(line):
return True
return False
def partition_raw_lines(raw_lines):
"""Partitions a list of raw input lines so that between each partition, a
table row separator can be placed.
"""
if not has_line_seps(raw_lines):
return mapl(lambda x: [x], raw_lines)
curr_part = []
parts = [curr_part]
for line in raw_lines:
if line_is_separator(line):
curr_part = []
parts.append(curr_part)
else:
curr_part.append(line)
# remove any empty partitions (typically the first and last ones)
return filter(lambda x: x != [], parts)
def unify_table(table):
"""Given a list of rows (i.e. a table), this function returns a new table
in which all rows have an equal amount of columns. If all full column is
empty (i.e. all rows have that field empty), the column is removed.
"""
max_fields = max(map(lambda row: len(row), table))
empty_cols = [True] * max_fields
output = []
for row in table:
curr_len = len(row)
if curr_len < max_fields:
row += [''] * (max_fields - curr_len)
output.append(row)
# register empty columns (to be removed at the end)
for i in range(len(row)):
if row[i].strip():
empty_cols[i] = False
# remove empty columns from all rows
table = output
output = []
for row in table:
cols = []
for i in range(len(row)):
should_remove = empty_cols[i]
if not should_remove:
cols.append(row[i])
output.append(cols)
return output
def split_table_row(row_string):
if row_string.find("|") >= 0:
# first, strip off the outer table drawings
row_string = re.sub(r'^\s*\||\|\s*$', '', row_string)
return re.split(r'\s*\|\s*', row_string.strip())
return re.split(r'\s\s+', row_string.rstrip())
def parse_table(raw_lines):
row_partition = partition_raw_lines(raw_lines)
lines = mapl(lambda row_string: join_rows(mapl(split_table_row, row_string)),
row_partition)
return unify_table(lines)
def table_line(widths, header=False):
if header:
linechar = '='
else:
linechar = '-'
sep = '+'
parts = []
for width in widths:
parts.append(linechar * width)
if parts:
parts = [''] + parts + ['']
return sep.join(parts)
def str_width(unicode_text):
"""calc string width, support cjk characters."""
from unicodedata import east_asian_width
return sum(1+(east_asian_width(c) in "WF") for c in unicode_text)
def split_row_into_lines(row):
row = mapl(lambda field: field.split('\n'), row)
height = max(map(lambda field_lines: len(field_lines), row))
turn_table = []
for i in range(height):
fields = []
for field_lines in row:
if i < len(field_lines):
fields.append(field_lines[i])
else:
fields.append('')
turn_table.append(fields)
return turn_table
def get_column_widths_from_border_spec(chunk):
border = None
for row in chunk:
if line_is_separator(row):
border = row.strip()
break
if border is None:
raise RuntimeError('Cannot reflow this table. Top table border not found.')
left = right = None
if border[0] == '+':
left = 1
if border[-1] == '+':
right = -1
return mapl(lambda drawing: max(0, len(drawing) - 2), border[left:right].split('+'))
def get_indent(line):
return line[0 : len(line)-len(line.lstrip())]
def apply_indent(table, indent):
for i in range(len(table)):
table[i] = indent + table[i]
return table
def get_field_width(field_text):
return max(map(lambda s: str_width(s), field_text.split('\n')))
def get_column_widths(table):
widths = []
for row in table:
num_fields = len(row)
# dynamically grow
if num_fields >= len(widths):
widths.extend([0] * (num_fields - len(widths)))
for i in range(num_fields):
field_width = get_field_width(row[i])
widths[i] = max(widths[i], field_width)
return widths
def pad_fields(row, widths):
"""Pads fields of the given row, so each field lines up nicely with the
others.
"""
# Pad all fields using the calculated widths
new_row = []
for i in range(len(row)):
unicode_len = str_width(row[i])
col = ' ' + row[i] + ' ' * int(widths[i] - unicode_len + 1)
new_row.append(col)
return new_row
def wrap_text(text, width):
"""wrap text, support cjk characters."""
lines = []
while len(text) > 0:
w = width
# check 1st string, if too wide, then guess again;
guess = textwrap.wrap(text, w)[0]
while str_width(guess) > width:
w -= (str_width(guess) - width + 1)/2
guess = textwrap.wrap(text, w)[0]
lines.append(guess)
text = text[len(guess):].strip()
return lines
def reflow_row_contents(row, widths):
new_row = []
for i, field in enumerate(row):
wrapped_lines = wrap_text(field.replace('\n', ' '), widths[i])
new_row.append("\n".join(wrapped_lines))
return new_row
def draw_table(table, manual_widths=None, header=True):
if table == []:
return []
if manual_widths is None:
col_widths = get_column_widths(table)
else:
col_widths = manual_widths
new_widths = get_column_widths(table)
if len(new_widths) > len(col_widths):
col_widths += new_widths[len(col_widths):]
# Reserve room for the spaces
sep_col_widths = mapl(lambda x: x + 2, col_widths)
header_line = table_line(sep_col_widths, header=True)
normal_line = table_line(sep_col_widths, header=False)
output = [normal_line]
first = True
for row in table:
if manual_widths:
row = reflow_row_contents(row, manual_widths)
row_lines = split_row_into_lines(row)
# draw the lines (num_lines) for this row
for row_line in row_lines:
row_line = pad_fields(row_line, col_widths)
output.append("|".join([''] + row_line + ['']))
# then, draw the separator
if first and header:
output.append(header_line)
first = False
else:
output.append(normal_line)
return output
@neovim.plugin
class Main(object):
def __init__(self, vim):
self.vim = vim
def get_table_bounds(self):
row, col = self.vim.current.window.cursor
upper = lower = row
try:
while self.vim.current.buffer[upper - 1].strip():
upper -= 1
except (IndexError, NvimError):
pass
else:
upper += 1
try:
while self.vim.current.buffer[lower - 1].strip():
lower += 1
except (IndexError, NvimError):
pass
else:
lower -= 1
return (upper, lower)
@neovim.command('TableRstFormat')
def reformat_table(self):
upper, lower = self.get_table_bounds()
chunk = self.vim.current.buffer[upper - 1:lower]
indent = get_indent(chunk[0])
table = parse_table(chunk)
has_header = table_has_header(chunk)
chunk = draw_table(table, header=has_header)
self.vim.current.buffer[upper - 1:lower] = apply_indent(chunk, indent)
@neovim.command('TableRstReflow')
def reflow_table(self):
upper, lower = self.get_table_bounds()
chunk = self.vim.current.buffer[upper - 1:lower]
indent = get_indent(chunk[0])
table = parse_table(chunk)
widths = get_column_widths_from_border_spec(chunk)
table = parse_table(chunk)
has_header = table_has_header(chunk)
chunk = draw_table(table, widths, header=has_header)
self.vim.current.buffer[upper - 1:lower] = apply_indent(chunk, indent)
``` |
{
"source": "jlestel/quickstart-datalake-47lining",
"score": 2
} |
#### File: assets/analysis/glue.py
```python
import boto3
import logging
from time import sleep
from multiprocessing import Process
from botocore.errorfactory import ClientError
def get_crawler_state(glue_client, crawler_name):
response = glue_client.get_crawler(
Name=crawler_name
)
return response['Crawler']['State']
def crawl_after_job(glue_client, job_name, job_run_id, crawler_name):
log = logging.getLogger('job_poll')
while True:
log.info('Waiting')
sleep(30)
response = glue_client.get_job_run(
JobName=job_name,
RunId=job_run_id
)
job_run_state = response['JobRun']['JobRunState']
log.info('Job run state: {}'.format(job_run_state))
if job_run_state == 'SUCCEEDED':
log.info('SUCCEEDED so break')
break
try:
log.info('Trying to run crawler')
glue_client.start_crawler(
Name=crawler_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'CrawlerRunningException':
log.info('Crawler already running. Waiting additional 30 seconds')
sleep(30)
crawl_after_job(glue_client, job_name, job_run_id, crawler_name)
else:
log.exception(e)
log.info('Finished')
def run_aws_glue_crawler(config):
log = logging.getLogger(__name__)
client = boto3.client('glue', region_name=config['region_name'])
curated_datasets_crawler_name = config['curated_datasets_crawler_name']
job_name = config['curated_datasets_job_name']
try:
client.start_crawler(
Name=curated_datasets_crawler_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'CrawlerRunningException':
log.info('Crawler already running')
else:
log.exception(e)
raise e
while True:
sleep(10)
curated_state = get_crawler_state(client, curated_datasets_crawler_name)
log.info("Curated crawler state: {}".format(curated_state))
if curated_state != 'RUNNING':
break
try:
response = client.start_job_run(
JobName=job_name
)
job_run_id = response['JobRunId']
p = Process(target=crawl_after_job, args=(client, job_name, job_run_id, curated_datasets_crawler_name))
p.start()
except ClientError as e:
if e.response['Error']['Code'] == 'ConcurrentRunsExceededException':
log.info('Job already running')
else:
log.exception(e)
raise e
```
#### File: assets/analysis/learn_more.py
```python
import json
import boto3
from analysis.exceptions import PublishTopicException
def learn_more(config, form):
print('SNS form {}'.format(form))
topic_arn = config['sns_learn_more_topic_arn']
payload = {
'name': form['name'],
'role': form['role'],
'email': form['email'],
'company': form['company'],
'message': form['message']
}
region = topic_arn.split(':')[3]
client = boto3.client('sns', region_name=region)
response = client.publish(
TopicArn=topic_arn,
Message=json.dumps(payload),
Subject='Data Lake Learn More request from {}'.format(payload['name']))
print('SNS response {}'.format(response))
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise PublishTopicException()
``` |
{
"source": "jletienne/yff",
"score": 3
} |
#### File: yff/archive/get_player_points.py
```python
def get_player_points(player_season_id='390.p.28389')
url = 'https://fantasysports.yahooapis.com/fantasy/v2/player/{}/stats;week=10;type=week'.format(player_season_id)
response = oauth.session.get(url, params={'format': 'json'})
r = response.json()
stats = r['fantasy_content']['player'][1]['player_stats']['stats']
player_stats = {}
for i in stats:
player_stats[i['stat']['stat_id']] = i['stat']['value']
player_stats_df = pd.DataFrame(list(player_stats.items()), index=None)
header = ['stat_id', 'value']
player_stats_df.columns = header
league_multiplier = get_league_settings()
player_scores = player_stats_df.merge(league_multiplier, on='stat_id', how='left').fillna(0)
player_scores["stat_multiplier"] = player_scores.stat_multiplier.astype(float)
player_scores["value"] = player_scores.value.astype(float)
player_scores["fantasy_points"] = player_scores["stat_multiplier"] * player_scores["value"]
return sum(player_scores["fantasy_points"])
```
#### File: yff/archive/get_roster_points.py
```python
def get_roster_points():
url = 'https://fantasysports.yahooapis.com/fantasy/v2/team/390.l.XXXXXX.t.1/roster;week=10'
response = oauth.session.get(url, params={'format': 'json'})
r = response.json()
team_info = r['fantasy_content']['team'][1]['roster']['0']['players']
num_players = team_info['count']
for i in range(num_players):
player_info = team_info[str(i)]['player']
print(player_info[0][2]['name']['full'], player_info[1]['selected_position'][1]['position'])
```
#### File: jletienne/yff/get_stats.py
```python
import pandas as pd
from yahoo_oauth import OAuth2
import logging
import json
from json import dumps
import datetime
week_num = 16
class Yahoo_Api():
def __init__(self,
consumer_key,
consumer_secret,
access_token
):
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token = access_token
self._authorization = None
def _login(self):
global oauth
oauth = OAuth2(None, None, from_file='oauth2yahoo.json')
if not oauth.token_is_valid():
oauth.refresh_access_token()
with open('oauth2yahoo.json') as json_yahoo_file:
auths = json.load(json_yahoo_file)
yahoo_consumer_key = auths['consumer_key']
yahoo_consumer_secret = auths['consumer_secret']
yahoo_access_key = auths['access_token']
json_yahoo_file.close()
yahoo_api = Yahoo_Api(yahoo_consumer_key, yahoo_consumer_secret, yahoo_access_key)
yahoo_api._login()
weekly_team_stats = []
opponent = {'0': '1', '1': '0'}
for week in range(1, week_num+1):
url = 'https://fantasysports.yahooapis.com/fantasy/v2/league/390.l.XXXXXX/scoreboard;week={}'.format(week)
response = oauth.session.get(url, params={'format': 'json'})
r = response.json()
for game in ['0', '1', '2', '3', '4']:
try:
for team in ['0','1']:
name = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups'][game]['matchup']['0']['teams'][team]['team'][0][2]['name']
manager = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups'][game]['matchup']['0']['teams'][team]['team'][0][-1]['managers'][0]['manager']['nickname']
points = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups'][game]['matchup']['0']['teams'][team]['team'][1]['team_points']['total']
points_against = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups'][game]['matchup']['0']['teams'][opponent[team]]['team'][1]['team_points']['total']
projected_points = r['fantasy_content']['league'][1]['scoreboard']['0']['matchups'][game]['matchup']['0']['teams'][team]['team'][1]['team_projected_points']['total']
stats={'week': week, 'manager': manager, 'team_name': name, 'points': points, 'points_against': points_against, 'projected_points': projected_points}
weekly_team_stats.append(stats)
except:
pass
pd.DataFrame(weekly_team_stats).to_csv("rawdata/2019_fantasy_stats.csv", index=None)
``` |
{
"source": "jlettman/adventofcode",
"score": 4
} |
#### File: 2021/02/day02.py
```python
from os.path import join, dirname, realpath
from argparse import ArgumentParser
from functools import reduce
from sys import stderr
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
CHALLENGE = """
Now, you need to figure out how to pilot this thing.
It seems like the submarine can take a series of commands like forward 1,
down 2, or up 3:
forward X increases the horizontal position by X units.
down X increases the depth by X units.
up X decreases the depth by X units.
Note that since you're on a submarine, down and up affect your depth, and so
they have the opposite result of what you might expect.
The submarine seems to already have a planned course (your puzzle input). You
should probably figure out where it's going. For example:
forward 5
down 5
forward 8
up 3
down 8
forward 2
Your horizontal position and depth both start at 0. The steps above would then
modify them as follows:
forward 5 adds 5 to your horizontal position, a total of 5.
down 5 adds 5 to your depth, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13.
up 3 decreases your depth by 3, resulting in a value of 2.
down 8 adds 8 to your depth, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15.
After following these instructions, you would have a horizontal position of 15
and a depth of 10. (Multiplying these together produces 150.)
--- Part Two ---
Based on your calculations, the planned course doesn't seem to make any sense.
You find the submarine manual and discover that the process is actually slightly
more complicated.
In addition to horizontal position and depth, you'll also need to track a third
value, aim, which also starts at 0. The commands also mean something entirely
different than you first thought:
down X increases your aim by X units.
up X decreases your aim by X units.
forward X does two things:
It increases your horizontal position by X units.
It increases your depth by your aim multiplied by X.
Again note that since you're on a submarine, down and up do the opposite of what
you might expect: "down" means aiming in the positive direction.
Now, the above example does something different:
forward 5 adds 5 to your horizontal position, a total of 5.
Because your aim is 0, your depth does not change.
down 5 adds 5 to your aim, resulting in a value of 5.
forward 8 adds 8 to your horizontal position, a total of 13.
Because your aim is 5, your depth increases by 8*5=40.
up 3 decreases your aim by 3, resulting in a value of 2.
down 8 adds 8 to your aim, resulting in a value of 10.
forward 2 adds 2 to your horizontal position, a total of 15.
Because your aim is 10, your depth increases by 2*10=20 to a total of 60.
After following these new instructions, you would have a horizontal position of
15 and a depth of 60. (Multiplying these produces 900.)
"""
MOVE_UP = "up"
MOVE_DOWN = "down"
MOVE_FORWARD = "forward"
def interpret(instruction: str) -> tuple:
"""
Split and interpret a piloting instruction and return a tuple of action and
units.
Paramaters:
instruction (str): Instruction to interpret
Returns
tuple: Tuple of action and units
"""
action, units = instruction.split(" ", 1)
units = int(units)
return (action, units)
def process_simple(last: tuple, instruction: str) -> tuple:
"""
Process instructions using simple mode (non-aim) and return the new tuple of
horizontal and depth.
Parameters:
last (tuple): Last values of horizontal and depth
instruction (str): Current instruction
Returns:
tuple: New values of horizontal and depth
"""
# extract last values
horiz, depth = last
# interpret the instruction
action, units = interpret(instruction)
if action == MOVE_UP:
depth -= units
elif action == MOVE_DOWN:
depth += units
elif action == MOVE_FORWARD:
horiz += units
else:
raise ValueError(f"Unknown pilot command: {action}")
return (horiz, depth)
def process_advanced(last: tuple, instruction: str) -> tuple:
"""
Process instructions using advanced mode (aim) and return the new tuple of
horizontal, depth, and aim.
Parameters:
last (tuple): Last values of horizontal, depth, and aim
instruction (str): Current instruction
Returns:
tuple: New values of horizontal, depth, and aim
"""
# extract last values
horiz, depth, aim = last
# interpret the instruction
action, units = interpret(instruction)
if action == MOVE_UP:
aim -= units
elif action == MOVE_DOWN:
aim += units
elif action == MOVE_FORWARD:
horiz += units
depth += units * aim
else:
raise ValueError(f"Unknown pilot command: {action}")
return (horiz, depth, aim)
def pilot(instructions: iter, advanced: bool = False) -> int:
"""
Pilot a submarine using a list of instructions.
Parameters:
instructions (iter): Iterable list of piloting instructions
advanced (bool): Use advanced (aim) mode as per part 2
Returns:
int: Value of (horizontal * depth) distance measurements
"""
if advanced:
horiz, depth, _ = reduce(process_advanced, instructions, (0, 0, 0))
else:
horiz, depth = reduce(process_simple, instructions, (0, 0))
return (horiz * depth)
def main():
"""Command-line interface main function."""
parser = ArgumentParser(prog="day02", description=__doc__)
parser.add_argument("-c", "--challenge",
action="store_true", help="show the Advent of Code challenge and exit")
parser.add_argument("-i", "--instructions", metavar="FILE", default=join(
dirname(realpath(__file__)), "input.txt"), help="path to the instructions file")
parser.add_argument("-a", "--advanced", action="store_true",
help="use advanced (aim) mode")
args = parser.parse_args()
if args.challenge:
print(CHALLENGE)
return
with open(args.instructions, 'r') as lines:
instructions = map(lambda line: line.strip(), lines)
res = pilot(instructions, args.advanced)
print(res)
if __name__ == "__main__":
main()
``` |
{
"source": "jleung1/goal_modelling_rl",
"score": 2
} |
#### File: agents/gnet_agent/gnet_agent.py
```python
from agents.base.agent import Agent
from agents.gnet_agent.model import GNetGoalDQN
import torch
from tqdm import tqdm
import numpy as np
import pickle
import torch.optim as optim
from skimage.color import rgb2gray
class GNetAgent(Agent):
def __init__(
self,
env,
gnet_manager,
action_model,
goal_model,
action_memory,
goal_memory,
action_space,
goal_space,
num_goals,
idx2goalnet,
use_ga,
env_name,
device,
):
super(GNetAgent, self).__init__(
env,
gnet_manager,
action_model,
action_memory,
action_space,
goal_space,
num_goals,
idx2goalnet,
env_name,
device,
)
self.use_ga = use_ga
if self.env_name == "two_keys" or self.env_name == "four_rooms_3d":
if self.env_name == "two_keys":
self.goal_clone_interval = 10
elif self.env_name == "four_rooms_3d":
self.goal_clone_interval = 50
self.goal_replay_memory = goal_memory
self.goal_model = goal_model
self.goal_tau = 1.0
self.goal_lr = 0.001
self.eps_hi = 0.5
self.goal_opt = optim.Adam(self.goal_model.parameters(), lr=self.goal_lr)
self.clone_goal_model = (
GNetGoalDQN(num_goals, self.device, env_name).eval().to(self.device)
)
self.clone(self.clone_goal_model, self.goal_model, 1)
def select_action(self, state, goal, c_goal, actions=None):
state = torch.as_tensor(state, device=self.device).float().unsqueeze(0)
goal = torch.as_tensor(goal, device=self.device).float().unsqueeze(0)
c_goal = torch.as_tensor(c_goal, device=self.device).float().unsqueeze(0)
if actions is not None:
actions = torch.as_tensor(actions, device=self.device).float().unsqueeze(0)
q_values = self.action_model(state, goal, c_goal, actions)
action = torch.argmax(q_values)
return action.item()
def select_subgoal(self, state, goalnet_code, mask, state_extra=None):
state = torch.as_tensor(state, device=self.device).float().unsqueeze(0)
goalnet_code = torch.as_tensor(goalnet_code, device=self.device)
mask = torch.as_tensor(mask, device=self.device)
if state_extra is not None:
state_extra = torch.as_tensor(state_extra, device=self.device).float()
q_values = self.goal_model(state, goalnet_code, mask, state_extra)
subgoal = torch.argmax(q_values)
subgoal_coords = self.gnet_manager.get_goal_state(self.idx2goalnet[subgoal])
return subgoal, subgoal_coords
def update_action_model(self):
if self.env_name == "two_keys":
(
state,
action,
reward,
terminal,
next_state,
goal,
c_goal,
n_goal,
) = self.action_replay_memory.retrieve()
q = (
self.action_model(state, goal, c_goal)
.gather(1, action.view(self.minibatch_size, 1))
.squeeze(1)
)
a_max = self.action_model(next_state, goal, n_goal).max(dim=1)[1].detach()
qmax = (
self.clone_action_model(next_state, goal, n_goal)
.detach()
.gather(1, a_max.view(self.minibatch_size, 1))
.squeeze(1)
)
elif self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
(
reward,
terminal,
goal,
indexes,
) = self.action_replay_memory.retrieve_frame_stack()
(
state,
action,
c_goal,
next_state,
n_goal,
) = self.action_replay_memory.get_frame_stack(indexes)
q = (
self.action_model(state, goal, c_goal, action[:, :-1])
.squeeze(1)
.gather(1, action[:, -1].view(self.minibatch_size, 1))
.squeeze(1)
)
a_max = (
self.action_model(next_state, goal, n_goal, action[:, 1:])
.squeeze(1)
.max(dim=1)[1]
.detach()
)
qmax = (
self.clone_action_model(next_state, goal, n_goal, action[:, 1:])
.squeeze(1)
.detach()
.gather(1, a_max.view(self.minibatch_size, 1))
.squeeze(1)
)
nonterminal_target = reward + self.gamma * qmax
terminal_target = reward
target = (
terminal.float() * terminal_target
+ (~terminal).float() * nonterminal_target
)
loss = self.loss(q, target)
self.action_opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.action_model.parameters(), 1.0)
self.action_opt.step()
def update_goal_model(self):
if self.env_name == "two_keys":
(
state,
action,
reward,
terminal,
next_state,
gnet_state,
gnet_mask,
next_gnet_state,
next_gnet_mask,
steps,
_,
_,
) = self.goal_replay_memory.retrieve()
q = (
self.goal_model(state, gnet_state, gnet_mask)
.gather(1, action.view(self.minibatch_size, 1))
.squeeze(1)
)
g_max = (
self.goal_model(next_state, next_gnet_state, next_gnet_mask)
.max(dim=1)[1]
.detach()
)
qmax = (
self.clone_goal_model(next_state, next_gnet_state, next_gnet_mask)
.detach()
.gather(1, g_max.view(self.minibatch_size, 1))
.squeeze(1)
)
elif self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
(
state,
action,
reward,
terminal,
next_state,
gnet_state,
gnet_mask,
next_gnet_state,
next_gnet_mask,
steps,
state_extra,
next_state_extra,
) = self.goal_replay_memory.retrieve()
q = (
self.goal_model(state, gnet_state, gnet_mask, state_extra)
.gather(1, action.view(self.minibatch_size, 1))
.squeeze(1)
)
g_max = (
self.goal_model(
next_state, next_gnet_state, next_gnet_mask, next_state_extra
)
.max(dim=1)[1]
.detach()
)
qmax = (
self.clone_goal_model(
next_state, next_gnet_state, next_gnet_mask, next_state_extra
)
.detach()
.gather(1, g_max.view(self.minibatch_size, 1))
.squeeze(1)
)
nonterminal_target = (self.gamma ** steps) * reward + (
self.gamma ** steps
) * qmax
terminal_target = (self.gamma ** steps) * reward
target = (
terminal.float() * terminal_target
+ (~terminal).float() * nonterminal_target
)
loss = self.loss(q, target)
self.goal_opt.zero_grad()
loss.backward()
self.goal_opt.step()
def save(self, result_data):
super(GNetAgent, self).save(result_data)
if self.env_name != "ai2thor_kitchen":
torch.save(self.goal_model.state_dict(), self.save_dir + "/high_level.pt")
torch.save(
self.clone_goal_model.state_dict(),
self.save_dir + "/high_level_clone.pt",
)
torch.save(self.goal_opt.state_dict(), self.save_dir + "/high_level_opt.pt")
self.goal_replay_memory.save_to_disk()
def run(
self,
episodes,
train=False,
load=False,
eval=False,
episode=0,
start_frame=0,
save_checkpoints=False,
do_print=True,
floor_idx=-1,
seed=-1,
):
if not eval:
self.frame = start_frame
result_data = dict(episode=[], reward=[], steps=[], frames=[])
reward_history = []
steps_history = []
subgoal_progress = {}
for key in self.gnet_manager.gnet_goals.keys():
subgoal_progress[key] = []
if load:
self.load()
progress_bar = tqdm(range(episode, episodes), unit="episode", disable=eval)
goal_steps = 0
if self.env_name == "four_rooms_3d":
channels = 3
img_height = 60
img_width = 80
if self.use_ga:
state_extra_size = 11
else:
state_extra_size = 8
elif self.env_name == "ai2thor_kitchen":
channels = 4
img_height = 100
img_width = 100
if self.use_ga:
state_extra_size = 10
else:
state_extra_size = 8
for episode in progress_bar:
total_steps = 0
if self.env_name == "ai2thor_kitchen":
obs = self.env.reset(train=train, idx=floor_idx, seed=seed)
else:
obs = self.env.reset()
state = self.process_obs(obs)
if self.env_name == "two_keys":
obs = state
self.gnet_manager.reset()
state_goal_selection = state
trajectory_data = []
her_trajectory_data = []
episode_done = False
total_reward = 0
c_goal_stack = None
state_stack = None
action_stack = None
if self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
c_goal_stack = np.zeros((state_extra_size * self.frame_stack))
state_stack = np.zeros(
(channels * self.frame_stack, img_height, img_width)
)
action_stack = np.zeros((self.frame_stack - 1))
# Outer loop for high level
while not episode_done:
extrinsic_reward = 0
if self.env_name == "four_rooms_3d":
hi_c_goal = [
self.env.agent.pos[0],
self.env.agent.pos[2],
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
]
else:
hi_c_goal = None
if (
train
and self.env_name != "ai2thor_kitchen"
and np.random.uniform() < self.eps_hi
):
goalnet_code, mask = self.gnet_manager.generate_goal_mask()
choices = self.gnet_manager.generate_options_list()
probs = []
target_choices = []
for init_g, target_g in choices:
success_rate = (
self.gnet_manager.goal_successes[init_g][target_g].sum()
/ self.gnet_manager.last_num_goals
if len(self.gnet_manager.goal_successes[init_g][target_g])
> 0
else 0
)
probs.append(1.0 - success_rate + 0.1)
target_choices.append(target_g)
probs = probs / np.array(probs).sum()
chosen = np.random.choice(target_choices, p=probs)
subgoal_id = self.idx2goalnet.index(chosen)
subgoal_coord = self.gnet_manager.get_goal_state(chosen)
self.gnet_manager.gnet_state = self.gnet_manager.get_parent_goal(
chosen
)
elif self.env_name == "ai2thor_kitchen":
chosen = self.gnet_manager.generate_options_list()[0][-1]
self.gnet_manager.gnet_state = self.gnet_manager.get_parent_goal(
chosen
)
subgoal_id = self.idx2goalnet.index(chosen)
subgoal_coord = self.gnet_manager.get_goal_state(chosen)
else:
goalnet_code, mask = self.gnet_manager.generate_goal_mask()
subgoal_id, subgoal_coord = self.select_subgoal(
state, goalnet_code, mask, hi_c_goal
)
self.gnet_manager.gnet_state = self.gnet_manager.get_parent_goal(
self.idx2goalnet[subgoal_id]
)
goal_data = []
subgoal_data = []
subgoal_achieved = False
other_subgoal_achieved = False
achieved_subgoal_id = 0
# Inner loop for low level
while (
not subgoal_achieved
and not episode_done
and not other_subgoal_achieved
):
c_goal = self.gnet_manager.current_goal_state()
if self.env_name == "four_rooms_3d":
if self.use_ga:
c_goal = [
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
] + c_goal
else:
c_goal = [
self.env.agent.pos[0],
self.env.agent.pos[2],
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
]
state_stack = np.concatenate((state_stack[channels:], state))
c_goal_stack = np.concatenate(
(c_goal_stack[state_extra_size:], c_goal)
)
elif self.env_name == "ai2thor_kitchen":
if self.use_ga:
c_goal = self.env.fridge_pos + self.env.light_pos + c_goal
else:
c_goal = (
[
self.env.last_meta["agent"]["position"]["x"],
self.env.last_meta["agent"]["position"]["z"],
]
+ self.env.fridge_pos
+ self.env.light_pos
)
state_stack = np.concatenate((state_stack[channels:], state))
c_goal_stack = np.concatenate(
(c_goal_stack[state_extra_size:], c_goal)
)
if (
train
and np.random.uniform()
< self.gnet_manager.get_exploration_rate(
self.gnet_manager.gnet_state, self.idx2goalnet[subgoal_id]
)
):
action = np.random.choice(self.num_actions)
elif (
eval
and self.env_name == "four_rooms_3d"
and np.random.uniform() < self.eps_eval
):
action = np.random.choice(self.num_actions)
else:
if self.env_name == "two_keys":
action = self.select_action(state, subgoal_coord, c_goal)
else:
action = self.select_action(
state_stack, subgoal_coord, c_goal_stack, action_stack
)
if (
self.env_name == "four_rooms_3d"
or self.env_name == "ai2thor_kitchen"
):
action_stack = np.append(action_stack[1:], action)
new_obs, reward, episode_done, info = self.env.step(action)
new_state = self.process_obs(new_obs)
n_goal = self.gnet_manager.current_goal_state()
if self.env_name == "two_keys":
new_obs = new_state
if self.env_name == "four_rooms_3d":
if self.use_ga:
n_goal = [
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
] + n_goal
else:
n_goal = [
self.env.agent.pos[0],
self.env.agent.pos[2],
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
]
elif self.env_name == "ai2thor_kitchen":
if self.use_ga:
n_goal = self.env.fridge_pos + self.env.light_pos + n_goal
else:
n_goal = (
[
self.env.last_meta["agent"]["position"]["x"],
self.env.last_meta["agent"]["position"]["z"],
]
+ self.env.fridge_pos
+ self.env.light_pos
)
total_steps += 1
if self.gnet_manager.check_goal_satisfied(
self.idx2goalnet[subgoal_id]
):
subgoal_achieved = True
subgoal_reward = 1
else:
for path in self.gnet_manager.goal_paths:
for gnet_state in self.gnet_manager.gnet_goals[
path.current_gnet_goal
]["goal_selection_options"]:
if (
self.gnet_manager.check_goal_satisfied(gnet_state)
and gnet_state != "end"
):
other_subgoal_achieved = True
achieved_parent_goal = path.current_gnet_goal
achieved_subgoal_id = self.idx2goalnet.index(
gnet_state
)
subgoal_reward = 0
if train:
if episode_done or subgoal_achieved or other_subgoal_achieved:
done = True
else:
done = False
subgoal_data.append(
(
obs,
action,
done,
subgoal_reward,
new_obs,
self.gnet_manager.target_goal_state,
c_goal,
n_goal,
)
)
goal_data.append((obs, episode_done))
if self.env_name == "two_keys":
self.action_replay_memory.save(
obs,
action,
done,
subgoal_reward,
new_obs,
self.gnet_manager.target_goal_state,
c_goal,
n_goal,
)
else:
trajectory_data.append(
(
obs,
action,
done,
subgoal_reward,
new_obs,
self.gnet_manager.target_goal_state,
c_goal,
n_goal,
)
)
state = new_state
obs = new_obs
total_reward += reward
extrinsic_reward += reward
if not eval:
self.frame += 1
if (
train
and self.frame > self.min_buffer
and self.frame > self.minibatch_size
):
if self.frame % self.update_interval == 0:
self.update_action_model()
if self.frame % self.clone_interval == 0:
self.clone(
self.clone_action_model, self.action_model, self.tau
)
if self.frame % 100 == 0:
progress_bar.set_description("frame = {}".format(self.frame))
if subgoal_achieved:
r_gnet_goal = self.gnet_manager.gnet_state
r_subgoal_id = subgoal_id
elif (
self.env_name == "two_keys"
and self.env.agent_pos.tolist() == self.env.goal_pos.tolist()
):
r_gnet_goal = self.gnet_manager.gnet_state
r_subgoal_id = self.idx2goalnet.index("end")
elif self.env_name == "four_rooms_3d" and self.env.near(self.env.goal):
r_gnet_goal = self.gnet_manager.gnet_state
r_subgoal_id = self.idx2goalnet.index("end")
elif other_subgoal_achieved:
r_gnet_goal = achieved_parent_goal
r_subgoal_id = achieved_subgoal_id
if train:
add_experience = False
new_goal = self.gnet_manager.current_goal_state()
first = True
if self.env_name == "two_keys":
for (
r_state,
r_action,
r_done,
_,
r_new_state,
_,
c_goal,
n_goal,
) in subgoal_data[::-1]:
if first:
new_reward = 1
first = False
else:
new_reward = 0
self.action_replay_memory.save(
r_state,
r_action,
r_done,
new_reward,
r_new_state,
new_goal,
c_goal,
n_goal,
)
else:
(
old_state,
old_action,
old_done,
old_reward,
old_new_state,
old_goal,
old_c_goal,
old_n_goal,
) = subgoal_data[-1]
subgoal_data[-1] = (
old_state,
old_action,
old_done,
1,
old_new_state,
old_goal,
old_c_goal,
old_n_goal,
)
for (
r_state,
r_action,
r_done,
r_reward,
r_new_state,
_,
r_c_goal,
r_n_goal,
) in subgoal_data:
her_trajectory_data.append(
(
r_state,
r_action,
r_done,
r_reward,
r_new_state,
new_goal,
r_c_goal,
r_n_goal,
)
)
self.gnet_manager.update_success_rate(
self.idx2goalnet[subgoal_id], subgoal_achieved
)
goal_steps += 1
if subgoal_achieved:
subgoal_progress[self.idx2goalnet[subgoal_id]].append(1)
add_experience = True
# Add a relabelled transition for the goal selection model
elif (
self.env_name == "two_keys"
and self.env.agent_pos.tolist() == self.env.goal_pos.tolist()
):
add_experience = True
elif self.env_name == "four_rooms_3d" and self.env.near(
self.env.goal
):
add_experience = True
elif other_subgoal_achieved:
subgoal_progress[self.idx2goalnet[subgoal_id]].append(0)
add_experience = True
else:
subgoal_progress[self.idx2goalnet[subgoal_id]].append(0)
if add_experience:
steps = 1
next_gnet_state = self.idx2goalnet[r_subgoal_id]
self.gnet_manager.set_state(r_gnet_goal, next_gnet_state)
if self.env_name != "ai2thor_kitchen":
(
next_goalnet_code,
next_mask,
) = self.gnet_manager.generate_goal_mask()
if self.env_name == "four_rooms_3d":
hi_n_goal = [
self.env.agent.pos[0],
self.env.agent.pos[2],
self.env.goal.pos[0],
self.env.goal.pos[2],
self.env.yellow_subgoal.pos[0],
self.env.yellow_subgoal.pos[2],
self.env.blue_subgoal.pos[0],
self.env.blue_subgoal.pos[2],
]
for r_state, r_done in goal_data[::-1]:
if self.env_name == "two_keys":
self.goal_replay_memory.save(
r_state,
r_subgoal_id,
episode_done,
extrinsic_reward,
new_state,
goalnet_code,
self.gnet_manager.gnet_goals[next_gnet_state][
"code"
],
mask,
next_mask,
steps,
)
elif self.env_name == "two_keys":
self.goal_replay_memory.save(
r_state,
r_subgoal_id,
episode_done,
extrinsic_reward,
new_obs,
goalnet_code,
gnet_goals[next_gnet_state]["code"],
mask,
next_mask,
steps,
hi_c_goal,
hi_n_goal,
)
steps += 1
if self.env_name != "ai2thor_kitchen":
if self.goal_replay_memory.current_size > self.minibatch_size:
self.update_goal_model()
if goal_steps % self.goal_clone_interval == 0:
self.clone(
self.clone_goal_model, self.goal_model, self.goal_tau
)
elif (subgoal_achieved or other_subgoal_achieved) and not episode_done:
self.gnet_manager.set_state(
r_gnet_goal, self.idx2goalnet[r_subgoal_id]
)
state_goal_selection = new_state
if self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
for (
r_state,
r_action,
r_done,
r_reward,
r_new_state,
r_target_goal,
r_c_goal,
r_n_goal,
) in trajectory_data:
self.action_replay_memory.save(
r_state,
r_action,
r_done,
r_reward,
r_new_state,
r_target_goal,
r_c_goal,
r_n_goal,
)
self.action_replay_memory.update_trajectory_id()
for (
r_state,
r_action,
r_done,
r_reward,
r_new_state,
r_target_goal,
r_c_goal,
r_n_goal,
) in her_trajectory_data:
self.action_replay_memory.save(
r_state,
r_action,
r_done,
r_reward,
r_new_state,
r_target_goal,
r_c_goal,
r_n_goal,
)
self.action_replay_memory.update_trajectory_id()
reward_history.append(total_reward)
steps_history.append(total_steps)
if not eval and episode % self.eval_every == 0 and episode > 0:
if self.env_name != "ai2thor_kitchen":
self.goal_model.eval()
self.action_model.eval()
with torch.no_grad():
eval_rewards = []
eval_steps = []
if self.env_name == "ai2thor_kitchen":
for i in range(1, 31):
results = self.run(
1, train=False, eval=True, floor_idx=i, seed=1
)
eval_rewards.append(results["reward"][0])
eval_steps.append(results["steps"][0])
else:
for i in range(self.eval_episodes):
results = self.run(1, train=False, eval=True)
eval_rewards.append(results["reward"][0])
eval_steps.append(results["steps"][0])
result_data["episode"].append(episode)
result_data["reward"].append(eval_rewards)
result_data["steps"].append(eval_steps)
result_data["frames"].append(self.frame)
if self.env_name != "ai2thor_kitchen":
self.goal_model.train()
self.action_model.train()
if do_print:
self.print_stats(
reward_history, steps_history, result_data, subgoal_progress
)
elif eval:
result_data["episode"].append(episode)
result_data["reward"].append(total_reward)
result_data["steps"].append(total_steps)
if save_checkpoints and (
episode % self.save_every == 0 and episode > 0 and not eval
):
self.save(result_data)
return result_data
```
#### File: agents/gnet_agent/replay_memory.py
```python
from agents.base.replay_memory import ReplayMemory
import numpy as np
import random
import torch
class GNetActionReplayMemory(ReplayMemory):
def __init__(
self,
size,
goal_space_size,
device,
minibatch_size,
env_name,
use_ga,
load=False,
):
super(GNetActionReplayMemory, self).__init__(
size, device, minibatch_size, env_name, load, "./save/low_replay"
)
self.goals = np.empty(self.size, dtype=(np.int32, goal_space_size))
self.use_ga = use_ga
self.gnet_model = True
if self.env_name == "two_keys":
self.cur_goal_s = np.empty(self.size, dtype=(np.int32, goal_space_size))
self.next_goal_s = np.empty(self.size, dtype=(np.int32, goal_space_size))
elif self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
if use_ga:
size = goal_space_size + 6
else:
size = 8
self.cur_goal_s = np.empty(self.size, dtype=(np.float32, size))
self.next_goal_s = np.empty(self.size, dtype=(np.float32, size))
if load:
self.load_from_disk()
def save(self, state, action, done, reward, next_state, goal, c_goal, n_goal):
super(GNetActionReplayMemory, self).save(
state, action, done, reward, next_state
)
self.goals[self.current_index] = goal
self.cur_goal_s[self.current_index] = c_goal
self.next_goal_s[self.current_index] = n_goal
self.current_size = max(self.current_size, self.current_index + 1)
self.current_index = (self.current_index + 1) % self.size
def load_from_disk(self):
super(GNetActionReplayMemory, self).load_from_disk()
infile = open(self.save_dir + "/goals.npy", "rb")
self.goals = np.load(infile)
infile.close()
infile = open(self.save_dir + "/cur_goal_s.npy", "rb")
self.cur_goal_s = np.load(infile)
infile.close()
infile = open(self.save_dir + "/next_goal_s.npy", "rb")
self.next_goal_s = np.load(infile)
infile.close()
def save_to_disk(self):
super(GNetActionReplayMemory, self).save_to_disk()
outfile = open(self.save_dir + "/goals.npy", "wb")
np.save(outfile, self.goals)
outfile.close()
outfile = open(self.save_dir + "/cur_goal_s.npy", "wb")
np.save(outfile, self.cur_goal_s)
outfile.close()
outfile = open(self.save_dir + "/next_goal_s.npy", "wb")
np.save(outfile, self.next_goal_s)
outfile.close()
def retrieve(self):
indexes = [
random.randint(0, self.current_size - 1) for i in range(self.minibatch_size)
]
return (
torch.as_tensor(self.states[indexes], device=self.device).float(),
torch.as_tensor(self.actions[indexes], device=self.device).long(),
torch.as_tensor(self.rewards[indexes], device=self.device),
torch.as_tensor(self.done_flags[indexes], device=self.device),
torch.as_tensor(self.next_states[indexes], device=self.device).float(),
torch.as_tensor(self.goals[indexes], device=self.device).long(),
torch.as_tensor(self.cur_goal_s[indexes], device=self.device).long(),
torch.as_tensor(self.next_goal_s[indexes], device=self.device).long(),
)
def get_frame_stack(self, ref_indexes):
if self.use_ga:
if self.env_name == "four_rooms_3d":
size = 11
elif self.env_name == "ai2thor_kitchen":
size = 10
else:
size = 8
return super(GNetActionReplayMemory, self).get_frame_stack(ref_indexes, size)
# Retrieve function for environments that use frame stacking
def retrieve_frame_stack(self):
indexes = [
np.random.randint(self.current_size) for i in range(self.minibatch_size)
]
return (
torch.as_tensor(self.rewards[indexes], device=self.device),
torch.as_tensor(self.done_flags[indexes], device=self.device),
torch.as_tensor(self.goals[indexes], device=self.device).long(),
indexes,
)
class GNetGoalReplayMemory(ReplayMemory):
def __init__(self, size, num_goals, device, minibatch_size, env_name, load=False):
super(GNetGoalReplayMemory, self).__init__(
size, device, minibatch_size, env_name, load, "./save/high_replay"
)
self.gnet_states = np.empty((self.size, num_goals), dtype=np.int32)
self.gnet_masks = np.empty((self.size, num_goals), dtype=np.float32)
self.next_gnet_states = np.empty((self.size, num_goals), dtype=np.int32)
self.next_gnet_masks = np.empty((self.size, num_goals), dtype=np.float32)
self.steps = np.empty(self.size, dtype=np.int32)
if self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
self.state_extra = np.empty(self.size, dtype=(np.float32, 8))
self.next_state_extra = np.empty(self.size, dtype=(np.float32, 8))
if load:
self.load_from_disk()
def save(
self,
state,
action,
done,
reward,
next_state,
gnet_state,
next_gnet_state,
gnet_mask,
next_gnet_mask,
steps,
state_extra=None,
next_state_extra=None,
):
super(GNetGoalReplayMemory, self).save(state, action, done, reward, next_state)
self.gnet_states[self.current_index] = gnet_state
self.gnet_masks[self.current_index] = gnet_mask
self.next_gnet_states[self.current_index] = next_gnet_state
self.next_gnet_masks[self.current_index] = next_gnet_mask
self.steps[self.current_index] = steps
if self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
self.state_extra[self.current_index] = state_extra
self.next_state_extra[self.current_index] = next_state_extra
self.current_size = max(self.current_size, self.current_index + 1)
self.current_index = (self.current_index + 1) % self.size
def load_from_disk(self):
super(GNetGoalReplayMemory, self).load_from_disk()
infile = open(self.save_dir + "/gnet_states.npy", "rb")
self.gnet_states = np.load(infile)
infile.close()
infile = open(self.save_dir + "/gnet_masks.npy", "rb")
self.gnet_masks = np.load(infile)
infile.close()
infile = open(self.save_dir + "/next_gnet_states.npy", "rb")
self.next_gnet_states = np.load(infile)
infile.close()
infile = open(self.save_dir + "/next_gnet_masks.npy", "rb")
self.next_gnet_masks = np.load(infile)
infile.close()
infile = open(self.save_dir + "/steps.npy", "rb")
self.steps = np.load(infile)
infile.close()
infile = open(self.save_dir + "/state_extra.npy", "rb")
self.state_extra = np.load(infile)
infile.close()
infile = open(self.save_dir + "/next_state_extra.npy", "rb")
self.next_state_extra = np.load(infile)
infile.close()
def save_to_disk(self):
super(GNetGoalReplayMemory, self).save_to_disk()
outfile = open(self.save_dir + "/gnet_states.npy", "wb")
np.save(outfile, self.gnet_states)
outfile.close()
outfile = open(self.save_dir + "/gnet_masks.npy", "wb")
np.save(outfile, self.gnet_masks)
outfile.close()
outfile = open(self.save_dir + "/next_gnet_states.npy", "wb")
np.save(outfile, self.next_gnet_states)
outfile.close()
outfile = open(self.save_dir + "/next_gnet_masks.npy", "wb")
np.save(outfile, self.next_gnet_masks)
outfile.close()
outfile = open(self.save_dir + "/steps.npy", "wb")
np.save(outfile, self.steps)
outfile.close()
outfile = open(self.save_dir + "/state_extra.npy", "wb")
np.save(outfile, self.state_extra)
outfile.close()
outfile = open(self.save_dir + "/next_state_extra.npy", "wb")
np.save(outfile, self.next_state_extra)
outfile.close()
def retrieve(self):
indexes = [
np.random.randint(self.current_size) for i in range(self.minibatch_size)
]
if self.env_name == "four_rooms_3d" or self.env_name == "ai2thor_kitchen":
state_extra = torch.as_tensor(
self.state_extra[indexes], device=self.device
).float()
next_state_extra = torch.as_tensor(
self.next_state_extra[indexes], device=self.device
).float()
else:
state_extra = None
next_state_extra = None
return (
torch.as_tensor(self.states[indexes], device=self.device).float(),
torch.as_tensor(self.actions[indexes], device=self.device).long(),
torch.as_tensor(self.rewards[indexes], device=self.device),
torch.as_tensor(self.done_flags[indexes], device=self.device),
torch.as_tensor(self.next_states[indexes], device=self.device).float(),
torch.as_tensor(self.gnet_states[indexes], device=self.device).long(),
torch.as_tensor(self.gnet_masks[indexes], device=self.device).float(),
torch.as_tensor(self.next_gnet_states[indexes], device=self.device).long(),
torch.as_tensor(self.next_gnet_masks[indexes], device=self.device).float(),
torch.as_tensor(self.steps[indexes], device=self.device).long(),
state_extra,
next_state_extra,
)
```
#### File: goal_modelling_rl/env/four_rooms_subgoals_3d.py
```python
from gym_miniworld.params import DEFAULT_PARAMS
from gym_miniworld.entity import Box
from gym_miniworld.miniworld import MiniWorldEnv
from gym import spaces
import numpy as np
class FourRoomsSubgoals3D(MiniWorldEnv):
def __init__(
self,
max_episode_steps=300,
preset=False,
forward_step=0.6,
turn_step=30,
**kwargs
):
self.preset = preset
params = DEFAULT_PARAMS.no_random()
params.set("forward_step", forward_step, forward_step - 0.1, forward_step + 0.1)
params.set("turn_step", turn_step, turn_step - 10, turn_step + 10)
super().__init__(max_episode_steps=max_episode_steps, params=params, **kwargs)
self.action_space = spaces.Discrete(self.actions.move_back + 1)
def _gen_world(self):
# Taken from the gym miniworld fourrooms env
# Top-left room
room0 = self.add_rect_room(min_x=-7, max_x=-1, min_z=1, max_z=7)
# Top-right room
room1 = self.add_rect_room(min_x=1, max_x=7, min_z=1, max_z=7)
# Bottom-right room
room2 = self.add_rect_room(min_x=1, max_x=7, min_z=-7, max_z=-1)
# Bottom-left room
room3 = self.add_rect_room(min_x=-7, max_x=-1, min_z=-7, max_z=-1)
# Add openings to connect the rooms together
self.connect_rooms(room0, room1, min_z=3, max_z=5, max_y=2.2)
self.connect_rooms(room1, room2, min_x=3, max_x=5, max_y=2.2)
self.connect_rooms(room2, room3, min_z=-5, max_z=-3, max_y=2.2)
self.connect_rooms(room3, room0, min_x=-5, max_x=-3, max_y=2.2)
# Custom part
self.reached_yellow_subgoal = False
self.reached_blue_subgoal = False
if self.preset:
self.blue_subgoal = self.place_entity(Box(color="blue"), room=self.rooms[1])
self.yellow_subgoal = self.place_entity(
Box(color="yellow"), room=self.rooms[2]
)
self.goal = self.place_entity(Box(color="green"), room=self.rooms[3])
self.place_agent(dir=0, room=self.rooms[0])
else:
goal_agent_rooms = np.random.choice([0, 1, 2, 3], 2, replace=False)
subgoal_rooms = np.random.choice([0, 1, 2, 3], 2)
self.goal = self.place_entity(
Box(color="green"), room=self.rooms[goal_agent_rooms[0]]
)
self.place_agent(room=self.rooms[goal_agent_rooms[1]])
self.blue_subgoal = self.place_entity(
Box(color="blue"), room=self.rooms[subgoal_rooms[0]]
)
while self.near(self.blue_subgoal, self.goal, 2.5) or self.near(
self.blue_subgoal, self.agent, 2.5
):
self.entities.remove(self.blue_subgoal)
self.blue_subgoal = self.place_entity(
Box(color="blue"), room=self.rooms[subgoal_rooms[0]]
)
self.yellow_subgoal = self.place_entity(
Box(color="yellow"), room=self.rooms[subgoal_rooms[1]]
)
while (
self.near(self.yellow_subgoal, self.goal, 2.5)
or self.near(self.yellow_subgoal, self.agent, 2.5)
or self.near(self.yellow_subgoal, self.blue_subgoal, 2.5)
):
self.entities.remove(self.yellow_subgoal)
self.yellow_subgoal = self.place_entity(
Box(color="yellow"), room=self.rooms[subgoal_rooms[1]]
)
def step(self, action):
obs, reward, done, info = super().step(action)
if self.near(self.yellow_subgoal) and not self.reached_yellow_subgoal:
self.reached_yellow_subgoal = True
self.entities.remove(self.yellow_subgoal)
elif self.near(self.blue_subgoal) and not self.reached_blue_subgoal:
self.reached_blue_subgoal = True
self.entities.remove(self.blue_subgoal)
elif self.near(self.goal):
done = True
reward = 1 - 0.9 * (self.step_count / self.max_episode_steps)
if done and not (self.reached_blue_subgoal and self.reached_yellow_subgoal):
reward = 0
return obs, reward, done, info
# Replace the near function
def near(self, ent0, ent1=None, limit=1.5):
if ent1 == None:
ent1 = self.agent
dist = np.linalg.norm(ent0.pos - ent1.pos)
return dist < limit
``` |
{
"source": "JLeung46/Customer-Segmentation",
"score": 4
} |
#### File: JLeung46/Customer-Segmentation/kmeans_model.py
```python
from pyspark.ml.clustering import KMeans
from pyspark.ml.feature import VectorAssembler
class KMeansModel:
"""
Class to train a KMeans model and return the predictions
"""
def __init__(self):
self.assembled_df = None
def assemble_features(self, df, feature_names, output_col_name):
vec_assembler = VectorAssembler(inputCols=feature_names, outputCol=output_col_name)
assembler_df = vec_assembler.transform(df)
self.assembled_df = assembler_df
def train(self, k=5, seed=3):
kmeans = KMeans().setK(k).setSeed(seed)
model = kmeans.fit(new_df.select("features"))
transformed = model.transform(new_df)
preds = transformed.select("prediction")
return preds
```
#### File: JLeung46/Customer-Segmentation/split_files.py
```python
import os
import sqlite3
import csv
def split_file(db_name, table_name, output_path, num_samples):
"""
Splits sql database table into mutiple csv files.
"""
# Define database name and table name
db_name = db_name
table_name = table_name
# Define output path
output_path = output_path
output_name_template='output_%s.csv'
current_piece = 1
# Create a connection and get a cursor
connection = sqlite3.connect(db_name)
cursor = connection.cursor()
# Execute the query
cursor.execute('select * from %s', table_name)
# Get data in batches
while True:
current_out_path = os.path.join(
output_path,
output_name_template % current_piece
)
f = open(current_out_path, 'w', encoding="utf-8", newline='')
outcsv = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
rows = cursor.fetchmany(num_samples)
if len(rows) == 0:
break
else:
outcsv.writerows(rows)
current_piece += 1
# Clean up
f.close()
cursor.close()
connection.close()
if __name__ == '__main__':
split_file('database.sqlite', 'SearchInfo', 'search_info_files')
split_file('database.sqlite', 'trainSearchStream', 'train_search_stream_files')
``` |
{
"source": "jleung51-coursework/phaser",
"score": 4
} |
#### File: build/scripts-auth/authfields.py
```python
import json
# Return sort key for AuthTable entries
def key (a):
return (a['Partition'], a['Row'])
# Return a list of non-required fields in an AuthTable entry
def other_fields(e):
result = []
for fn in e:
if fn not in {'Partition', 'Row', 'DataPartition', 'DataRow', 'Password'}:
result.append((fn,e[fn]))
return result
# Return an AuthTable entry as a tuple with its fields in a specified order
def entry_to_list(e):
return [e['Row'], e['Password'], e['DataPartition'], e['DataRow']] + other_fields(e)
# Main routine
array = json.loads(input ())
array.sort(key=key)
for obj in array:
print (entry_to_list(obj))
``` |
{
"source": "jleung51/scripts",
"score": 3
} |
#### File: scripts/battery_notifier/battery_notifier.py
```python
import configparser
import os
import subprocess
import sys
import time
# Custom modules:
from logger import Logger
from slack_messenger import SlackMessenger
# Change the values in this array to modify at what percentages the
# notification should be sent.
alert_percentages = [20, 50]
def report_battery_level(slack_config, battery_level):
if slack_config.getboolean("reporting") is True:
slack_messenger = SlackMessenger(
slack_config["report_slack_token"],
slack_config["report_channel"],
slack_config["report_slackbot_name"]
)
slack_messenger.message(
"Current laptop battery level: " + str(battery_level) + "%."
)
def alert_battery_level(slack_config, alert_level):
slack_messenger = SlackMessenger(
slack_config["alert_slack_token"],
slack_config["alert_channel"],
slack_config["alert_slackbot_name"]
)
slack_messenger.notify(
slack_config["alert_list"],
"Laptop battery is below " + str(alert_level) + "%."
)
def alert_error(slack_config):
slack_messenger = SlackMessenger(
slack_config["alert_slack_token"],
slack_config["alert_channel"],
slack_config["alert_slackbot_name"]
)
slack_messenger.notify(
slack_config["alert_list"],
"Internal error for Battery Notifier, please check the logs."
)
def run_cmd(args):
'''Executes a set of arguments in the command line.
Arguments:
args -- Arguments to execute.
Returns:
string -- Output (stdout) from the execution.
'''
return subprocess.run(args, stdout=subprocess.PIPE).stdout.decode("utf-8")
def find_line_with(lines, str):
'''Returns the first line with the given search term.
Arguments:
lines (string) -- Set of lines, separated by newlines.
str (string) -- The given search term.
Returns:
string -- The first occurrence of a line containing the search term.s
'''
for line in lines.split("\n"):
if str in line:
return line
def get_battery_percentage():
'''Returns the current battery level in percent.
Returns:
int -- The current battery level in percent.
'''
# Parse power data from OS
power_files = run_cmd(["upower", "-e"])
power_file = find_line_with(power_files, "BAT")
power_data = run_cmd(["upower", "-i", power_file])
percentage_line = find_line_with(power_data, "percentage")
# Parse percentage from string with various characters into number
current_percent = ""
for char in percentage_line:
if char.isdigit():
current_percent += char
return int(current_percent)
def main(config):
slack_config = config["Slack"]
current_percent = get_battery_percentage()
Logger.debug("Current battery: " + str(current_percent) + "%")
report_battery_level(slack_config, current_percent)
battery_level_filename = "/tmp/last_battery_level"
# Open file for reading and writing, or create one if it does not exist
try:
# Read and write existing file
file = open(battery_level_filename, mode='r+')
except IOError:
Logger.debug("Battery state file does not exist; creating new file.")
# Read and write new file
file = open(battery_level_filename, mode='x+')
file.write(str(current_percent))
file.flush()
file.seek(0) # Rewind to beginning of file
file_contents = file.read()
try:
last_percent = int(file_contents)
except ValueError:
Logger.error("Last battery level could not be parsed. Contents: ")
Logger.error(file.read(file_contents))
Logger.error("Recreating battery state file.")
last_percent = current_percent
# # Tester
# current_percent = 0
# last_percent = 100
if current_percent < last_percent:
alert_percentages.sort() # Only alert for the lowest percentage
for i in alert_percentages:
if current_percent <= i and i < last_percent:
Logger.info("Alert: Battery is below " + str(i) + "%.")
alert_battery_level(slack_config, i)
break;
# Replace previous percentage with new percentage
file.seek(0)
file.write(str(current_percent))
file.truncate()
file.close()
if __name__ == "__main__":
location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_filename = os.path.join(location, "battery_notifier.cfg")
config = configparser.ConfigParser()
config.read(config_filename)
try:
main(config)
except Exception as e:
alert_error(config["Slack"])
raise
```
#### File: mp3_formatter/mp3_formatter/url_scrape_div.py
```python
import lxml.html
import requests
import sys
def validate_url(url):
"""Ensure the URL is non-empty and uses the HTTP protocol.
"""
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
"""Return the content of the div at the given URL.
"""
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
def extract_tracklist_begin_num(content):
"""Return list of track names extracted from messy web content.
The name of a track is defined as a line which begins with a number
(excluding whitespace).
"""
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
def strip_leading_number(tracklist):
"""Remove the leading numbers for each track.
"""
for track in tracklist:
i = 0
while(track[i].isdigit()):
i += 1
tracklist[tracklist.index(track)] = track[i:]
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_number(tracklist)
for track in tracklist:
print(track.strip())
```
#### File: scripts/traffic_monitor/traffic_monitor.py
```python
import sys
# Custom modules
from logger import Logger
from bing_api import BingApi
from google_api import GmailApi
# Configuration for traffic incident data:
# Bing Maps authentication key for map data requests.
# Details about the key parameter for the HTTP request:
# https://msdn.microsoft.com/en-ca/library/ff701720.aspx
#
# Go to the Bing Maps Portal to retrieve your key:
# https://www.bingmapsportal.com/
# Sign in, go to "My Account" -> "My Keys", create a new key, and fill out
# the form. Paste the key into the variable below.
bing_maps_auth_key = ""
# Coordinates of the bounding box where traffic incidents are to be monitored.
# Details about the bounding box:
# https://msdn.microsoft.com/en-us/library/ff701726.aspx
#
# To find a coordinate, go to Google Maps (yes, I'm aware of the irony):
# https://maps.google.com/
# Right-click on any point and select "What's here?". A small box will appear
# with the coordinate at that location
coordinate_southwest = "45.219, -122.325"
coordinate_northeast = "46.610, -122.107"
# Severity and type of traffic incident.
# Details about severity and type:
# https://msdn.microsoft.com/en-ca/library/hh441726.aspx
# See the lists below for the interpretation of each level.
#
# Keep only the security levels and types which you want to be notified of,
# and remove the rest.
severity = "1, 2, 3, 4"
incident_type = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11"
# Configuration for email notifications:
# Application name from which the email is sent
mail_source_application_name = "Traffic Monitor"
# Gmail account from which your notification emails will be sent.
#
# This should include the "@gmail.com".
mail_source_email = "<EMAIL>"
# Destination email account to which your notification emails will be sent.
#
# This should include the "@email.com".
mail_target_email = "<EMAIL>"
# Configuration for a report to a Slack channel:
# Change this to True and fill in the following fields if you would like
# to send a report; otherwise, ignore the following fields
report = False
# The API token of the Slackbot (see README:Setup:Reports to a Slack Channel)
# E.g. "<KEY>"
report_slack_token = ""
# The name of the channel (without a "#")
# E.g. "random"
report_channel = ""
# The name of the Slackbot user which will send the message
# E.g. "Traffic Monitor Slackbot"
report_slackbot_name = ""
# The usernames of Slack users who should be alerted upon a failure
# Each username must begin with a "@"
# E.g. "@jleung51 | @jleung52 | @jleung53"
report_alert_list = ""
# Conditional imports
# Do not modify if you are setting up this script!
if report:
from slack_messenger import SlackMessenger
def slack_report_message(operation_status, message_text):
if report:
s = SlackMessenger(
report_slack_token, report_channel, report_slackbot_name
)
s.operation_report(operation_status, message_text)
Logger.debug("Slack report sent.")
def slack_notify_users(alert_users, message_text):
if report:
s = SlackMessenger(
report_slack_token, report_channel, report_slackbot_name
)
s.notify(alert_users, message_text)
Logger.debug("Slack alert sent.")
def string_list_from(original_list):
string_list = []
for i in original_list:
string_list.append(str(i))
return string_list
def send_email_with_incidents(incidents):
message_text = ""
for i in incidents:
description = i.get("description")
description = description[0].lower() + description[1:]
message_text += \
results.get("severity") + " traffic disruption. " + \
results.get("type") + " " + \
description + " Coordinates: (" + \
", ".join(string_list_from(i.get("coordinates"))) + ")" + \
".\n\n"
message_text += \
"\nSincerely,\n\n" + \
"- Your friendly neighborhood Traffic Monitor"
email_sender = GmailApi(
mail_source_email, mail_source_application_name
)
email_sender.send_email(
mail_target_email, "Traffic Incident Alert", message_text
)
slack_report_message(
"*SUCCESS*",
"Traffic incident alert sent to " + mail_target_email + "."
)
def main():
b = BingApi(bing_maps_auth_key)
incidents = b.get_traffic_data_readable(
coordinate_southwest, coordinate_northeast,
severity, incident_type)
if len(incidents) > 0:
send_email_with_incidents(incidents)
log_message = "Traffic check completed. " + str(len(incidents)) + \
" incidents reported."
else:
log_message = "Traffic check completed. No incidents reported."
Logger.success(log_message)
slack_report_message("*SUCCESS*", log_message)
if __name__ == "__main__":
try:
main()
except Exception as e:
slack_report_message(
report_alert_list, "*ERROR*: Please check the logs."
)
raise
``` |
{
"source": "jleuschn/adler",
"score": 2
} |
#### File: adler/tensorflow/training.py
```python
import demandimport
with demandimport.enabled():
import tensorflow as tf
import numpy as np
__all__ = ('cosine_decay', 'ema_wrapper', 'EMAHelper')
def cosine_decay(learning_rate, global_step, maximum_steps,
name=None):
"""
"""
from tensorflow.python.ops import math_ops
from tensorflow.python.framework import ops
if global_step is None:
raise ValueError("global_step is required for cosine_decay.")
with ops.name_scope(name, "CosineDecay",
[learning_rate, global_step, maximum_steps]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
maximum_steps = math_ops.cast(maximum_steps, dtype)
p = tf.mod(global_step / maximum_steps, 1)
return learning_rate * (0.5 + 0.5 * math_ops.cos(p * np.pi))
class EMAHelper(object):
def __init__(self, decay=0.99, session=None):
if session is None:
self.session = tf.get_default_session()
else:
self.session = session
self.all_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.ema = tf.train.ExponentialMovingAverage(decay=decay)
self.apply = self.ema.apply(self.all_vars)
self.averages = [self.ema.average(var) for var in self.all_vars]
def average_dict(self):
ema_averages_results = self.session.run(self.averages)
return {var: value for var, value in
zip(self.all_vars, ema_averages_results)}
def variables_to_restore(self):
return self.ema.variables_to_restore(tf.moving_average_variables())
def ema_wrapper(is_training, decay=0.99, scope='ema_wrapper', reuse=False):
"""Use Exponential Moving Average of weights during testing.
Parameters
----------
is_training : bool or `tf.Tensor` of type bool
Indicates if the EMA should be applied or not
decay:
Examples
--------
During training, the current value of a is used. During testing, the
exponential moving average is applied instead.
>>> @ema_wrapper(is_training)
... def function(x):
.... a = tf.get_variable('a', [], tf.float32)
... return a * x
"""
def function(fun):
def fun_wrapper(*args, **kwargs):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
# Regular call
with tf.variable_scope('function_call') as sc:
result_train = fun(*args, **kwargs)
# Set up exponential moving average
ema = tf.train.ExponentialMovingAverage(decay=decay)
var_class = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
sc.name)
ema_op = ema.apply(var_class)
# Add to collection so they are updated
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ema_op)
# Getter for the variables with EMA applied
def ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
ema_var = ema.average(var)
return ema_var if ema_var else var
# Call with EMA applied
with tf.variable_scope('function_call',
reuse=True,
custom_getter=ema_getter):
result_test = fun(*args, **kwargs)
# Return the correct version depending on if we're training or
# not
return tf.cond(is_training,
lambda: result_train, lambda: result_test)
return fun_wrapper
return function
```
#### File: adler/tensorflow/util.py
```python
import os
import shutil
from os.path import join, expanduser, exists
import demandimport
with demandimport.enabled():
import tensorflow as tf
__all__ = ('get_base_dir',
'default_checkpoint_path', 'default_tensorboard_dir',
'summary_writers')
def get_base_dir():
"""Get the data directory."""
base_odl_dir = os.environ.get('ADLER_HOME',
expanduser(join('~', '.adler')))
data_home = join(base_odl_dir, 'tensorflow')
if not exists(data_home):
os.makedirs(data_home)
return data_home
def default_checkpoint_path(name):
checkpoint_dir = join(get_base_dir(), 'checkpoints')
if not exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
checkpoint_path = join(checkpoint_dir,
'{}.ckpt'.format(name))
return checkpoint_path
def default_tensorboard_dir(name):
tensorboard_dir = join(get_base_dir(), 'tensorboard', name)
if not exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
return tensorboard_dir
def summary_writers(name, cleanup=False, session=None, write_graph=True):
if session is None:
session = tf.get_default_session()
dname = default_tensorboard_dir(name)
if cleanup and os.path.exists(dname):
shutil.rmtree(dname, ignore_errors=True)
if write_graph:
graph = session.graph
else:
graph = None
test_summary_writer = tf.summary.FileWriter(dname + '/test', graph)
train_summary_writer = tf.summary.FileWriter(dname + '/train')
return test_summary_writer, train_summary_writer
def run_with_profile(ops, feed_dict, name='profile.json', session=None):
from tensorflow.python.client import timeline
if session is None:
session = tf.get_default_session()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
result = session.run(ops, feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(name, 'w') as f:
f.write(chrome_trace)
return result
if __name__ == '__main__':
print('base dir: {}'.format(get_base_dir()))
``` |
{
"source": "jleuschn/dival",
"score": 2
} |
#### File: dival/datasets/lodopab_dataset.py
```python
import os
from warnings import warn
from math import ceil
import numpy as np
import h5py
from zipfile import ZipFile
from tqdm import tqdm
from odl import uniform_discr
import odl.tomo
from dival.datasets.dataset import Dataset
from dival.config import CONFIG, set_config
from dival.util.constants import MU_MAX
from dival.util.zenodo_download import download_zenodo_record
from dival.util.input import input_yes_no
try:
DATA_PATH = CONFIG['lodopab_dataset']['data_path']
except Exception:
raise RuntimeError(
'Could not retrieve config value `lodopab_dataset/data_path`, '
'maybe the configuration (e.g. in ~/.dival/config.json) is corrupt.')
NUM_SAMPLES_PER_FILE = 128
PHOTONS_PER_PIXEL = 4096
ORIG_MIN_PHOTON_COUNT = 0.1
MIN_PT = [-0.13, -0.13]
MAX_PT = [0.13, 0.13]
LEN = {
'train': 35820,
'validation': 3522,
'test': 3553}
NUM_PATIENTS = {
'train': 632,
'validation': 60,
'test': 60}
PATIENT_ID_OFFSETS = {
'train': 0,
'validation': NUM_PATIENTS['train'],
'test': NUM_PATIENTS['train'] + NUM_PATIENTS['validation']}
def download_lodopab():
global DATA_PATH
print('Before downloading, please make sure to have enough free disk '
'space (~150GB). After unpacking, 114.7GB will be used.')
print("path to store LoDoPaB-CT dataset (default '{}'):".format(DATA_PATH))
inp = input()
if inp:
DATA_PATH = inp
set_config('lodopab_dataset/data_path', DATA_PATH)
os.makedirs(DATA_PATH, exist_ok=True)
ZENODO_RECORD_ID = '3384092'
success = download_zenodo_record(ZENODO_RECORD_ID, DATA_PATH)
print('download of LoDoPaB-CT dataset {}'.format('successful' if success
else 'failed'))
if not success:
return False
file_list = ['observation_train.zip', 'ground_truth_train.zip',
'observation_validation.zip', 'ground_truth_validation.zip',
'observation_test.zip', 'ground_truth_test.zip']
print('unzipping zip files, this can take several minutes', flush=True)
for file in tqdm(file_list, desc='unzip'):
filename = os.path.join(DATA_PATH, file)
with ZipFile(filename, 'r') as f:
f.extractall(DATA_PATH)
os.remove(filename)
return True
class LoDoPaBDataset(Dataset):
"""
The LoDoPaB-CT dataset, which is documented in the Data Descriptor article
`<https://www.nature.com/articles/s41597-021-00893-z>`_ and hosted on
`<https://zenodo.org/record/3384092>`_.
It is a simulated low dose CT dataset based on real reconstructions from
the `LIDC-IDRI
<https://wiki.cancerimagingarchive.net/display/Public/LIDC-IDRI>`_ dataset.
The dataset contains 42895 pairs of images and projection data.
For simulation, a ray transform with parallel beam geometry using 1000
angles and 513 detector pixels is used. Poisson noise corresponding to 4096
incident photons per pixel before attenuation is applied to the projection
data. The images have a size of 362x362 px.
An ODL ray transform that corresponds to the noiseless forward operator can
be obtained via the `get_ray_trafo` method of this dataset.
Additionally, the :attr:`ray_trafo` attribute holds a ray transform
instance, which is created during :meth:`__init__`.
*Note:* By default, the ``'astra_cuda'`` implementation backend is used,
which requires both astra and a CUDA-enabled GPU being available.
You can choose a different backend by passing ``impl='skimage'`` or
``impl='astra_cpu'``.
Further functionalities:
* converting the stored post-log observations to pre-log observations
on the fly (cf. `observation_model` parameter of :meth:`__init__`)
* sorting by patient ids (cf. ``sorted_by_patient`` parameter of
:meth:`__init__`)
* changing the zero photon count replacement value of ``0.1`` used for
pre-log observations (cf. ``min_photon_count`` parameter of
:meth:`__init__`)
Attributes
----------
space
``(space[0], space[1])``, where
``space[0]``
``odl.uniform_discr([0., -0.1838], [3.1416, 0.1838],
(1000, 513), dtype='float32')``
``space[1]``
``odl.uniform_discr(min_pt, max_pt, (362, 362),
dtype='float32'))``, with `min_pt` and `max_pt` parameters
passed to :meth:`__init__`
shape
``(362, 362)``
train_len
``35820``
validation_len
``3522``
test_len
``3553``
random_access
``True``
num_elements_per_sample
``2``
ray_trafo : :class:`odl.tomo.RayTransform`
Ray transform corresponding to the noiseless forward operator.
sorted_by_patient : bool
Whether the samples are sorted by patient id.
Default: ``False``.
rel_patient_ids : (dict of array) or `None`
Relative patient ids of the samples in the original non-sorted order
for each part, as returned by :meth:`LoDoPaBDataset.get_patient_ids`.
`None`, if the csv files are not found.
"""
def __init__(self, min_pt=None, max_pt=None, observation_model='post-log',
min_photon_count=None, sorted_by_patient=False,
impl='astra_cuda'):
"""
Parameters
----------
min_pt : [float, float], optional
Minimum values of the lp space. Default: ``[-0.13, -0.13]``.
max_pt : [float, float], optional
Maximum values of the lp space. Default: ``[0.13, 0.13]``.
observation_model : {'post-log', 'pre-log'}, optional
The observation model to use.
The default is ``'post-log'``.
``'post-log'``
Observations are linearly related to the normalized ground
truth via the ray transform, ``obs = ray_trafo(gt) + noise``.
Note that the scaling of the observations matches the
normalized ground truth, i.e., they are divided by the linear
attenuation of 3071 HU.
``'pre-log'``
Observations are non-linearly related to the ground truth, as
given by the Beer-Lambert law.
The model is
``obs = exp(-ray_trafo(gt * MU(3071 HU))) + noise``,
where `MU(3071 HU)` is the factor, by which the ground truth
was normalized.
min_photon_count : float, optional
Replacement value for a simulated photon count of zero.
If ``observation_model == 'post-log'``, a value greater than zero
is required in order to avoid undefined values. The default is 0.1,
both for ``'post-log'`` and ``'pre-log'`` model.
sorted_by_patient : bool, optional
Whether to sort the samples by patient id.
Useful to resplit the dataset.
See also :meth:`get_indices_for_patient`.
Note that the slices of each patient are ordered randomly wrt.
the z-location in any case.
Default: ``False``.
impl : {``'skimage'``, ``'astra_cpu'``, ``'astra_cuda'``},\
optional
Implementation passed to :class:`odl.tomo.RayTransform` to
construct :attr:`ray_trafo`.
"""
global DATA_PATH
NUM_ANGLES = 1000
NUM_DET_PIXELS = 513
self.shape = ((NUM_ANGLES, NUM_DET_PIXELS), (362, 362))
self.num_elements_per_sample = 2
if min_pt is None:
min_pt = MIN_PT
if max_pt is None:
max_pt = MAX_PT
domain = uniform_discr(min_pt, max_pt, self.shape[1], dtype=np.float32)
if observation_model == 'post-log':
self.post_log = True
elif observation_model == 'pre-log':
self.post_log = False
else:
raise ValueError("`observation_model` must be 'post-log' or "
"'pre-log', not '{}'".format(observation_model))
if min_photon_count is None or min_photon_count <= 1.:
self.min_photon_count = min_photon_count
else:
self.min_photon_count = 1.
warn('`min_photon_count` changed from {} to 1.'.format(
min_photon_count))
self.sorted_by_patient = sorted_by_patient
self.train_len = LEN['train']
self.validation_len = LEN['validation']
self.test_len = LEN['test']
self.random_access = True
while not LoDoPaBDataset.check_for_lodopab():
print('The LoDoPaB-CT dataset could not be found under the '
"configured path '{}'.".format(
CONFIG['lodopab_dataset']['data_path']))
print('Do you want to download it now? (y: download, n: input '
'other path)')
download = input_yes_no()
if download:
success = download_lodopab()
if not success:
raise RuntimeError('lodopab dataset not available, '
'download failed')
else:
print('Path to LoDoPaB dataset:')
DATA_PATH = input()
set_config('lodopab_dataset/data_path', DATA_PATH)
self.rel_patient_ids = None
try:
self.rel_patient_ids = LoDoPaBDataset.get_patient_ids()
except OSError as e:
if self.sorted_by_patient:
raise RuntimeError(
'Can not load patient ids, required for sorting. '
'OSError: {}'.format(e))
warn(
'Can not load patient ids (OSError: {}). '
'Therefore sorting is not possible, so please keep the '
'attribute `sorted_by_patient = False` for the LoDoPaBDataset.'
.format(e))
if self.rel_patient_ids is not None:
self._idx_sorted_by_patient = (
LoDoPaBDataset.get_idx_sorted_by_patient(
self.rel_patient_ids))
self.geometry = odl.tomo.parallel_beam_geometry(
domain, num_angles=NUM_ANGLES, det_shape=(NUM_DET_PIXELS,))
range_ = uniform_discr(self.geometry.partition.min_pt,
self.geometry.partition.max_pt,
self.shape[0], dtype=np.float32)
super().__init__(space=(range_, domain))
self.ray_trafo = self.get_ray_trafo(impl=impl)
def __get_observation_trafo(self, num_samples=1):
if (self.min_photon_count is None or
self.min_photon_count == ORIG_MIN_PHOTON_COUNT):
if self.post_log:
def observation_trafo(out):
pass
else:
def observation_trafo(obs):
obs *= MU_MAX
np.exp(-obs, out=obs)
else:
shape = (self.shape[0] if num_samples == 1 else
(num_samples,) + self.shape[0])
mask = np.empty(shape, dtype=np.bool)
thres0 = 0.5 * (
-np.log(ORIG_MIN_PHOTON_COUNT/PHOTONS_PER_PIXEL)
- np.log(1/PHOTONS_PER_PIXEL)) / MU_MAX
if self.post_log:
def observation_trafo(obs):
np.greater_equal(obs, thres0, out=mask)
obs[mask] = -np.log(self.min_photon_count
/ PHOTONS_PER_PIXEL) / MU_MAX
else:
def observation_trafo(obs):
np.greater_equal(obs, thres0, out=mask)
obs *= MU_MAX
np.exp(-obs, out=obs)
obs[mask] = self.min_photon_count/PHOTONS_PER_PIXEL
return observation_trafo
def generator(self, part='train'):
"""Yield pairs of low dose observations and (virtual) ground truth.
Parameters
----------
part : {``'train'``, ``'validation'``, ``'test'``}, optional
The data part. Default is ``'train'``.
Yields
------
(observation, ground_truth)
`observation` : odl element with shape ``(1000, 513)``
The values depend on the
`observation_model` and `min_photon_count` parameters that were
passed to :meth:`__init__`.
`ground_truth` : odl element with shape ``(362, 362)``
The values lie in the range ``[0., 1.]``.
"""
if self.sorted_by_patient:
# fall back to default implementation
yield from super().generator(part=part)
return
num_files = ceil(self.get_len(part) / NUM_SAMPLES_PER_FILE)
observation_trafo = self.__get_observation_trafo()
for i in range(num_files):
with h5py.File(
os.path.join(DATA_PATH, 'ground_truth_{}_{:03d}.hdf5'
.format(part, i)), 'r') as file:
ground_truth_data = file['data'][:]
with h5py.File(
os.path.join(DATA_PATH, 'observation_{}_{:03d}.hdf5'
.format(part, i)), 'r') as file:
observation_data = file['data'][:]
for gt_arr, obs_arr in zip(ground_truth_data, observation_data):
ground_truth = self.space[1].element(gt_arr)
observation = self.space[0].element(obs_arr)
observation_trafo(observation)
yield (observation, ground_truth)
def get_ray_trafo(self, **kwargs):
"""
Return the ray transform that is a noiseless version of the forward
operator.
Parameters
----------
impl : {``'skimage'``, ``'astra_cpu'``, ``'astra_cuda'``}, optional
The backend implementation passed to
:class:`odl.tomo.RayTransform`.
Returns
-------
ray_trafo : odl operator
The ray transform that corresponds to the noiseless map from
362 x 362 images to the ``-log`` of their projections (sinograms).
"""
return odl.tomo.RayTransform(self.space[1], self.geometry, **kwargs)
def get_sample(self, index, part='train', out=None):
"""
Get single sample of the dataset.
Returns a pair of (virtual) ground truth and its low dose observation,
of which either part can be left out by option.
Parameters
----------
index : int
The index into the dataset part.
part : {``'train'``, ``'validation'``, ``'test'``}, optional
The data part. Default is ``'train'``.
out : tuple of array-likes or bools, optional
``out==(out_observation, out_ground_truth)``
out_observation : array-like or bool
Shape ``(1000, 513)``.
If an odl element or array is passed, the observation is
written to it.
If ``True``, a new odl element holding the observation is
created (the default).
If ``False``, no observation is returned.
out_ground_truth : array-like or bool
Shape ``(362, 362)``.
If an odl element or array is passed, the ground truth is
written to it.
If ``True``, a new odl element holding the ground truth is
created (the default).
If ``False``, no ground truth is returned.
Returns
-------
``(observation, ground_truth)``
observation : odl element or :class:`np.ndarray` or `None`
Depending on the value of ``out_observation`` (see parameter
`out`), a newly created odl element, ``out_observation`` or
`None` is returned.
The observation values depend on the `observation_model` and
`min_photon_count` parameters that were given to the
constructor.
ground_truth : odl element or :class:`np.ndarray` or `None`
Depending on the value of ``out_ground_truth`` (see parameter
`out`), a newly created odl element, ``out_ground_truth`` or
`None` is returned.
The values lie in the range ``[0., 1.]``.
"""
len_part = self.get_len(part)
if index >= len_part or index < -len_part:
raise IndexError("index {} out of bounds for part '{}' ({:d})"
.format(index, part, len_part))
if index < 0:
index += len_part
if out is None:
out = (True, True)
(out_observation, out_ground_truth) = out
if self.sorted_by_patient:
index = self._idx_sorted_by_patient[part][index]
file_index = index // NUM_SAMPLES_PER_FILE
index_in_file = index % NUM_SAMPLES_PER_FILE
if isinstance(out_observation, bool):
obs = self.space[0].zero() if out_observation else None
else:
obs = out_observation
if isinstance(out_ground_truth, bool):
gt = self.space[1].zero() if out_ground_truth else None
else:
gt = out_ground_truth
if obs is not None:
with h5py.File(
os.path.join(DATA_PATH,
'observation_{}_{:03d}.hdf5'
.format(part, file_index)), 'r') as file:
file['data'].read_direct(np.asarray(obs)[np.newaxis],
np.s_[index_in_file:index_in_file+1],
np.s_[0:1])
observation_trafo = self.__get_observation_trafo()
observation_trafo(obs)
if gt is not None:
with h5py.File(
os.path.join(DATA_PATH,
'ground_truth_{}_{:03d}.hdf5'
.format(part, file_index)), 'r') as file:
file['data'].read_direct(np.asarray(gt)[np.newaxis],
np.s_[index_in_file:index_in_file+1],
np.s_[0:1])
return (obs, gt)
def get_samples(self, key, part='train', out=None):
"""
Get slice of the dataset.
Returns a pair of (virtual) ground truth data and its low dose
observation data, of which either part can be left out by option.
Parameters
----------
key : slice or range
The indices into the dataset part.
part : {``'train'``, ``'validation'``, ``'test'``}, optional
The data part. Default is ``'train'``.
out : tuple of arrays or bools, optional
``out==(out_observation, out_ground_truth)``
out_observation : :class:`np.ndarray` or bool
If an array is passed, the observation data is written to it.
If ``True``, a new array holding the observation data is
created (the default).
If ``False``, no observation data is returned.
out_ground_truth : :class:`np.ndarray` or bool
If an array is passed, the ground truth data is written to it.
If ``True``, a new array holding the ground truth data is
created (the default).
If ``False``, no ground truth data is returned.
Returns
-------
``(observation, ground_truth)``
observation : :class:`np.ndarray` or `None`
Shape ``(samples, 1000, 513)``.
Depending on the value of ``out_observation`` (see parameter
`out`), a newly created array, ``out_observation`` or `None`
is returned.
The observation values depend on the `observation_model` and
`min_photon_count` parameters that were given to the
constructor.
ground_truth : :class:`np.ndarray` or `None`
Shape ``(samples, 362, 362)``.
Depending on the value of ``out_ground_truth`` (see parameter
`out`), a newly created array, ``out_ground_truth`` or `None`
is returned.
The values lie in the range ``[0., 1.]``.
"""
if self.sorted_by_patient:
# fall back to default implementation
return super().get_samples(key, part=part, out=out)
len_part = self.get_len(part)
if isinstance(key, slice):
key_start = (0 if key.start is None else
(key.start if key.start >= 0 else
max(0, len_part+key.start)))
key_stop = (len_part if key.stop is None else
(key.stop if key.stop >= 0 else
max(0, len_part+key.stop)))
range_ = range(key_start, key_stop, key.step or 1)
elif isinstance(key, range):
range_ = key
else:
raise TypeError('`key` expected to have type `slice` or `range`')
if range_.step < 0:
raise ValueError('key {} invalid, negative steps are not '
'implemented yet'.format(key))
if range_[-1] >= len_part:
raise IndexError("key {} out of bounds for part '{}' ({:d})"
.format(key, part, len_part))
range_files = range(range_[0] // NUM_SAMPLES_PER_FILE,
range_[-1] // NUM_SAMPLES_PER_FILE + 1)
if out is None:
out = (True, True)
(out_observation, out_ground_truth) = out
# compute slice objects
slices_files = []
slices_data = []
data_count = 0
for i in range_files:
if i == range_files.start:
start = range_.start % NUM_SAMPLES_PER_FILE
else:
start = (range_.start - i*NUM_SAMPLES_PER_FILE) % range_.step
if i == range_files[-1]:
stop = range_[-1] % NUM_SAMPLES_PER_FILE + 1
else:
__next_start = ((range_.start - (i+1)*NUM_SAMPLES_PER_FILE)
% range_.step)
stop = (__next_start - range_.step) % NUM_SAMPLES_PER_FILE + 1
s = slice(start, stop, range_.step)
slices_files.append(s)
len_slice = ceil((s.stop-s.start) / s.step)
slices_data.append(slice(data_count, data_count+len_slice))
data_count += len_slice
# read data
if isinstance(out_observation, bool):
obs_arr = (np.empty((len(range_),) + self.shape[0],
dtype=np.float32) if out_observation else None)
else:
obs_arr = out_observation
if isinstance(out_ground_truth, bool):
gt_arr = (np.empty((len(range_),) + self.shape[1],
dtype=np.float32) if out_ground_truth else None)
else:
gt_arr = out_ground_truth
if obs_arr is not None:
for i, slc_f, slc_d in zip(range_files, slices_files, slices_data):
with h5py.File(
os.path.join(DATA_PATH,
'observation_{}_{:03d}.hdf5'
.format(part, i)), 'r') as file:
file['data'].read_direct(obs_arr, slc_f, slc_d)
observation_trafo = self.__get_observation_trafo(
num_samples=len(obs_arr))
observation_trafo(obs_arr)
if gt_arr is not None:
for i, slc_f, slc_d in zip(range_files, slices_files, slices_data):
with h5py.File(
os.path.join(DATA_PATH,
'ground_truth_{}_{:03d}.hdf5'
.format(part, i)), 'r') as file:
file['data'].read_direct(gt_arr, slc_f, slc_d)
return (obs_arr, gt_arr)
def get_indices_for_patient(self, rel_patient_id, part='train'):
"""
Return the indices of the samples from one patient.
If ``self.sorted_by_patient`` is ``True``, the indices will be
subsequent.
Parameters
----------
rel_patient_id : int
Patient id, relative to the part.
part : {``'train'``, ``'validation'``, ``'test'``}, optional
Whether to return the number of train, validation or test patients.
Default is ``'train'``.
Returns
-------
indices : array
The indices of the samples from the patient.
"""
if self.sorted_by_patient:
num_samples_by_patient = np.bincount(self.rel_patient_ids[part])
first_sample = np.sum(num_samples_by_patient[:rel_patient_id])
indices = np.array(range(
first_sample,
first_sample + num_samples_by_patient[rel_patient_id]))
else:
indices = np.nonzero(
self.rel_patient_ids[part] == rel_patient_id)[0]
return indices
@staticmethod
def check_for_lodopab():
"""Fast check whether first and last file of each dataset part exist
under the configured data path.
Returns
-------
exists : bool
Whether LoDoPaB seems to exist.
"""
for part in ['train', 'validation', 'test']:
first_file = os.path.join(
DATA_PATH, 'observation_{}_000.hdf5'.format(part))
last_file = os.path.join(
DATA_PATH, 'observation_{}_{:03d}.hdf5'.format(
part, ceil(LEN[part] / NUM_SAMPLES_PER_FILE) - 1))
if not (os.path.exists(first_file) and os.path.exists(last_file)):
return False
return True
@staticmethod
def get_num_patients(part='train'):
"""
Return the number of patients in a dataset part.
Parameters
----------
part : {``'train'``, ``'validation'``, ``'test'``}, optional
Whether to return the number of train, validation or test patients.
Default is ``'train'``.
"""
return NUM_PATIENTS[part]
@staticmethod
def _abs_to_rel_patient_id(abs_patient_id, part):
return abs_patient_id - PATIENT_ID_OFFSETS[part]
@staticmethod
def _rel_to_abs_patient_id(rel_patient_id, part):
return rel_patient_id + PATIENT_ID_OFFSETS[part]
@staticmethod
def get_patient_ids(relative=True):
"""
Return the (relative) patient id for all samples of all dataset parts.
Parameters
----------
relative : bool, optional
Whether to use ids relative to the dataset part.
The csv files store absolute indices, where
"train_ids < validation_ids < test_ids".
If ``False``, these absolute indices are returned.
If ``True``, the smallest absolute id of the part is subtracted,
giving zero-based (relative) patient ids.
Default: ``True``
Returns
-------
ids : dict of array
For each part: an array with the (relative) patient ids for all
samples (length: number of samples in the corresponding part).
Raises
------
OSError
An `OSError` is raised if one of the csv files containing the
patient ids is missing in the configured data path.
"""
ids = {}
for part in ['train', 'validation', 'test']:
ids[part] = np.loadtxt(
os.path.join(DATA_PATH,
'patient_ids_rand_{}.csv'.format(part)),
dtype=np.int)
if relative:
ids[part] = LoDoPaBDataset._abs_to_rel_patient_id(ids[part],
part)
return ids
@staticmethod
def get_idx_sorted_by_patient(ids=None):
"""
Return indices that allow access to each dataset part in patient id
order.
*Note:* in most cases this method should not be called directly. Rather
specify ``sorted_by_patient=True`` to the constructor if applicable.
A plausible use case of this method, however, is to access existing
cache files that were created with ``sorted_by_patient=False``.
In this case, the dataset should be constructed with
``sorted_by_patient=False``, wrapped by a :class:`CachedDataset`
and then reordered with :class:`ReorderedDataset` using the indices
returned by this method.
Parameters
----------
ids : dict of array-like, optional
Patient ids as returned by :meth:`get_patient_ids`. It is not
relevant to this function whether they are relative.
Returns
-------
idx : dict of array
Indices that allow access to each dataset part in patient id order.
Each array value is an index into the samples in original order
(as stored in the HDF5 files).
I.e.: By iterating the samples with index ``idx[part][i]`` for
``i = 0, 1, 2, ...`` one first obtains all samples from one
patient, then continues with the samples of the second patient, and
so on.
Raises
------
OSError
An `OSError` is raised if ``ids is None`` and one of the csv files
containing the patient ids is missing in the configured data path.
"""
if ids is None:
ids = LoDoPaBDataset.get_patient_ids()
idx = {}
for part in ['train', 'validation', 'test']:
idx[part] = np.argsort(ids[part], kind='stable')
return idx
```
#### File: dival/reconstructors/learnedgd_reconstructor.py
```python
from copy import deepcopy
import odl
import torch
from odl.contrib.torch import OperatorModule
from odl.tomo import fbp_op
from odl.operator.operator import OperatorRightScalarMult
from dival.reconstructors.standard_learned_reconstructor import (
StandardLearnedReconstructor)
from dival.reconstructors.networks.iterative import IterativeNet
class LearnedGDReconstructor(StandardLearnedReconstructor):
"""
CT reconstructor applying a learned gradient descent iterative scheme.
Note that the weights are not shared across the blocks, like presented in
the original paper [1]_.
This implementation rather follows
https://github.com/adler-j/learned_primal_dual/blob/master/ellipses/learned_primal.py.
References
----------
.. [1] <NAME> & <NAME> (2017). Solving ill-posed inverse problems
using iterative deep neural networks. Inverse Problems, 33(12), 124007.
"""
HYPER_PARAMS = deepcopy(StandardLearnedReconstructor.HYPER_PARAMS)
HYPER_PARAMS.update({
'epochs': {
'default': 20,
'retrain': True
},
'batch_size': {
'default': 32,
'retrain': True
},
'lr': {
'default': 0.01,
'retrain': True
},
'normalize_by_opnorm': {
'default': True,
'retrain': True
},
'niter': {
'default': 5,
'retrain': True
},
'init_fbp': {
'default': True,
'retrain': True
},
'init_filter_type': {
'default': 'Hann',
'retrain': True
},
'init_frequency_scaling': {
'default': 0.4,
'retrain': True
},
'use_sigmoid': {
'default': False,
'retrain': True
},
'nlayer': {
'default': 3,
'retrain': True
},
'internal_ch': {
'default': 32,
'retrain': True
},
'kernel_size': {
'default': 3,
'retrain': True
},
'batch_norm': {
'default': False,
'retrain': True
},
'prelu': {
'default': False,
'retrain': True
},
'lrelu_coeff': {
'default': 0.2,
'retrain': True
},
'lr_time_decay_rate': {
'default': 3.2,
'retrain': True
},
'init_weight_xavier_normal': {
'default': False,
'retrain': True
},
'init_weight_gain': {
'default': 1.0,
'retrain': True
}
})
def __init__(self, ray_trafo, **kwargs):
"""
Parameters
----------
ray_trafo : :class:`odl.tomo.RayTransform`
Ray transform (the forward operator).
Further keyword arguments are passed to ``super().__init__()``.
"""
super().__init__(ray_trafo, **kwargs)
def init_model(self):
self.op_mod = OperatorModule(self.op)
self.op_adj_mod = OperatorModule(self.op.adjoint)
partial0 = odl.PartialDerivative(self.op.domain, axis=0)
partial1 = odl.PartialDerivative(self.op.domain, axis=1)
self.reg_mod = OperatorModule(partial0.adjoint * partial0 +
partial1.adjoint * partial1)
if self.hyper_params['init_fbp']:
fbp = fbp_op(
self.non_normed_op,
filter_type=self.hyper_params['init_filter_type'],
frequency_scaling=self.hyper_params['init_frequency_scaling'])
if self.normalize_by_opnorm:
fbp = OperatorRightScalarMult(fbp, self.opnorm)
self.init_mod = OperatorModule(fbp)
else:
self.init_mod = None
self.model = IterativeNet(
n_iter=self.niter,
n_memory=5,
op=self.op_mod,
op_adj=self.op_adj_mod,
op_init=self.init_mod,
op_reg=self.reg_mod,
use_sigmoid=self.hyper_params['use_sigmoid'],
n_layer=self.hyper_params['nlayer'],
internal_ch=self.hyper_params['internal_ch'],
kernel_size=self.hyper_params['kernel_size'],
batch_norm=self.hyper_params['batch_norm'],
prelu=self.hyper_params['prelu'],
lrelu_coeff=self.hyper_params['lrelu_coeff'])
def weights_init(m):
if isinstance(m, torch.nn.Conv2d):
m.bias.data.fill_(0.0)
if self.hyper_params['init_weight_xavier_normal']:
torch.nn.init.xavier_normal_(
m.weight, gain=self.hyper_params['init_weight_gain'])
self.model.apply(weights_init)
if self.use_cuda:
# WARNING: using data-parallel here doesn't work, probably
# astra_cuda is not thread-safe
self.model = self.model.to(self.device)
# def init_optimizer(self, dataset_train):
# self.optimizer = torch.optim.RMSprop(self.model.parameters(),
# lr=self.lr, alpha=0.9)
# def init_scheduler(self, dataset_train):
# self.scheduler = None
``` |
{
"source": "jleutgeb/privilege",
"score": 3
} |
#### File: oTree/privilege/tests.py
```python
from otree.api import Currency as c, currency_range, expect, Bot
from . import *
import random
class PlayerBot(Bot):
def play_round(self):
yield Instructions
yield Decision1, dict(
choice=random.choice(range(0, Constants.number_of_choices)),
)
yield Info, dict(
beliefs_high_ability=random.random(),
beliefs_privileged=random.random(),
beliefs_partner_high_ability=random.random(),
beliefs_partner_privileged=random.random(),
)
if self.player.id_in_group == 1:
yield FirstMover, dict(leads=random.choice([True, False]))
if self.player.id_in_group == 2 and self.player.session.config["follower_choice"] and \
self.player.get_others_in_group()[0].leads:
yield SecondMover, dict(follows=random.choice([True, False]))
if self.player.makes_leadership_choice:
yield Decision2, dict(leadership_choice=random.choice([True, False]))
yield Feedback
```
#### File: oTree/subject_email/__init__.py
```python
from otree.api import *
c = Currency
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'subject_email'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
subject_email = models.StringField(
label="Please enter your Email address"
)
# PAGES
class MyPage(Page):
form_model = 'player'
form_fields = ['subject_email']
@staticmethod
def before_next_page(player: Player, timeout_happened):
player.participant.label = player.subject_email
page_sequence = [MyPage]
```
#### File: oTree/subject_email/tests.py
```python
from otree.api import Currency as c, currency_range, expect, Bot
from . import *
import random
class PlayerBot(Bot):
def play_round(self):
yield MyPage, dict(
subject_email="<EMAIL>",
)
```
#### File: oTree/survey/__init__.py
```python
from otree.api import *
c = Currency
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'survey'
players_per_group = None
num_rounds = 1
survey_payoff = c(5)
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
gender = models.StringField(
choices=[
['m', 'male'],
['f', 'female'],
['o', 'other'],
],
label="Gender"
)
age = models.IntegerField(min=0, max=100, label="How old are you?")
field = models.LongStringField(label="Which field of study?")
semesters = models.IntegerField(min=0, max=100, label="How many semesters have you been studying?")
strategy = models.LongStringField(blank=True,
label="Please describe your thought process or your strategy in this experiment.")
comments = models.LongStringField(blank=True,
label="Do you have any other comments or questions about this experiment?")
# PAGES
class Survey(Page):
form_model = "player"
form_fields = ['gender', 'age', 'field', 'semesters', 'strategy', 'comments']
@staticmethod
def before_next_page(player: Player, timeout_happened):
player.payoff = Constants.survey_payoff
page_sequence = [Survey]
```
#### File: oTree/survey/tests.py
```python
from otree.api import Currency as c, currency_range, expect, Bot
from . import *
import random
class PlayerBot(Bot):
def play_round(self):
yield Survey, dict(
gender=random.choice(['m', 'f', 'o']),
age=random.randint(0, 100),
field="I'm a bot",
semesters=random.randint(0, 100),
strategy="I'm a bot",
comments="I'm a bot",
)
``` |
{
"source": "jleven/httpx",
"score": 3
} |
#### File: httpx/httpx/_exceptions.py
```python
import contextlib
import typing
if typing.TYPE_CHECKING:
from ._models import Request, Response # pragma: nocover
class HTTPError(Exception):
"""
Base class for `RequestError` and `HTTPStatusError`.
Useful for `try...except` blocks when issuing a request,
and then calling `.raise_for_status()`.
For example:
```
try:
response = httpx.get("https://www.example.com")
response.raise_for_status()
except httpx.HTTPError as exc:
print(f"HTTP Exception for {exc.request.url} - {exc}")
```
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class RequestError(HTTPError):
"""
Base class for all exceptions that may occur when issuing a `.request()`.
"""
def __init__(self, message: str, *, request: "Request" = None) -> None:
super().__init__(message)
# At the point an exception is raised we won't typically have a request
# instance to associate it with.
#
# The 'request_context' context manager is used within the Client and
# Response methods in order to ensure that any raised exceptions
# have a `.request` property set on them.
self._request = request
@property
def request(self) -> "Request":
if self._request is None:
raise RuntimeError("The .request property has not been set.")
return self._request
@request.setter
def request(self, request: "Request") -> None:
self._request = request
class TransportError(RequestError):
"""
Base class for all exceptions that occur at the level of the Transport API.
"""
# Timeout exceptions...
class TimeoutException(TransportError):
"""
The base class for timeout errors.
An operation has timed out.
"""
class ConnectTimeout(TimeoutException):
"""
Timed out while connecting to the host.
"""
class ReadTimeout(TimeoutException):
"""
Timed out while receiving data from the host.
"""
class WriteTimeout(TimeoutException):
"""
Timed out while sending data to the host.
"""
class PoolTimeout(TimeoutException):
"""
Timed out waiting to acquire a connection from the pool.
"""
# Core networking exceptions...
class NetworkError(TransportError):
"""
The base class for network-related errors.
An error occurred while interacting with the network.
"""
class ReadError(NetworkError):
"""
Failed to receive data from the network.
"""
class WriteError(NetworkError):
"""
Failed to send data through the network.
"""
class ConnectError(NetworkError):
"""
Failed to establish a connection.
"""
class CloseError(NetworkError):
"""
Failed to close a connection.
"""
# Other transport exceptions...
class ProxyError(TransportError):
"""
An error occurred while establishing a proxy connection.
"""
class UnsupportedProtocol(TransportError):
"""
Attempted to make a request to an unsupported protocol.
For example issuing a request to `ftp://www.example.com`.
"""
class ProtocolError(TransportError):
"""
The protocol was violated.
"""
class LocalProtocolError(ProtocolError):
"""
A protocol was violated by the client.
For example if the user instantiated a `Request` instance explicitly,
failed to include the mandatory `Host:` header, and then issued it directly
using `client.send()`.
"""
class RemoteProtocolError(ProtocolError):
"""
The protocol was violated by the server.
For example, returning malformed HTTP.
"""
# Other request exceptions...
class DecodingError(RequestError):
"""
Decoding of the response failed, due to a malformed encoding.
"""
class TooManyRedirects(RequestError):
"""
Too many redirects.
"""
# Client errors
class HTTPStatusError(HTTPError):
"""
The response had an error HTTP status of 4xx or 5xx.
May be raised when calling `response.raise_for_status()`
"""
def __init__(
self, message: str, *, request: "Request", response: "Response"
) -> None:
super().__init__(message)
self.request = request
self.response = response
class InvalidURL(Exception):
"""
URL is improperly formed or cannot be parsed.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class CookieConflict(Exception):
"""
Attempted to lookup a cookie by name, but multiple cookies existed.
Can occur when calling `response.cookies.get(...)`.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
# Stream exceptions...
# These may occur as the result of a programming error, by accessing
# the request/response stream in an invalid manner.
class StreamError(RuntimeError):
"""
The base class for stream exceptions.
The developer made an error in accessing the request stream in
an invalid way.
"""
def __init__(self, message: str) -> None:
super().__init__(message)
class StreamConsumed(StreamError):
"""
Attempted to read or stream content, but the content has already
been streamed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream some content, but the content has "
"already been streamed. For requests, this could be due to passing "
"a generator as request content, and then receiving a redirect "
"response or a secondary request as part of an authentication flow."
"For responses, this could be due to attempting to stream the response "
"content more than once."
)
super().__init__(message)
class StreamClosed(StreamError):
"""
Attempted to read or stream response content, but the request has been
closed.
"""
def __init__(self) -> None:
message = (
"Attempted to read or stream content, but the stream has " "been closed."
)
super().__init__(message)
class ResponseNotRead(StreamError):
"""
Attempted to access streaming response content, without having called `read()`.
"""
def __init__(self) -> None:
message = "Attempted to access streaming response content, without having called `read()`."
super().__init__(message)
class RequestNotRead(StreamError):
"""
Attempted to access streaming request content, without having called `read()`.
"""
def __init__(self) -> None:
message = "Attempted to access streaming request content, without having called `read()`."
super().__init__(message)
@contextlib.contextmanager
def request_context(request: "Request" = None) -> typing.Iterator[None]:
"""
A context manager that can be used to attach the given request context
to any `RequestError` exceptions that are raised within the block.
"""
try:
yield
except RequestError as exc:
if request is not None:
exc.request = request
raise exc
``` |
{
"source": "jlevente/django-allauth",
"score": 2
} |
#### File: providers/strava/provider.py
```python
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class StravaAccount(ProviderAccount):
def to_str(self):
dflt = super(StravaAccount, self).to_str()
return self.account.extra_data.get('username', dflt)
class StravaProvider(OAuth2Provider):
id = 'strava'
name = 'Strava'
account_class = StravaAccount
def get_auth_params(self, request, action):
data = super(StravaProvider, self).get_auth_params(request, action)
data['scope'] = "view_private"
return data
def extract_uid(self, data):
return str(data['id'])
def extract_common_fields(self, data):
return dict(
email=data.get('email'),
username=data.get('username'),
first_name=data.get('firstname'),
last_name=data.get('lastname'),
name="%s %s" % (data.get('firstname'), data.get('lastname')),
)
provider_classes = [StravaProvider]
``` |
{
"source": "jlevente/social",
"score": 2
} |
#### File: socialcollector/socialcollector/collector.py
```python
import params
import psycopg2, psycopg2.extras
import requests
import json
import oauth2
from datetime import datetime
from dateutil import parser, tz
from xml.etree import ElementTree as ET
import sys
import math
utc_zone = tz.gettz('UTC')
INSTA_LIMIT = 20
TWEET_LIMIT = 200
FOURSQUARE_LIMIT = 250
FLICKR_LIMIT = 400
OSM_LIMIT = 100
MAPILLARY_LIMIT = 1000
STRAVA_LIMIT = 100
INAT_LIMIT = 200
MEETUP_LIMIT = 200
FB_LIMIT = 100
GOOGLE_POINT_BATCH = 20000
GOOGLE_LINE_BATCH = 1000
class DBHandler():
def __init__(self):
self.data_db = psycopg2.connect(host=params.environ['DJANGO_SOCIAL_DATA_DB_HOST'], port=params.environ['DJANGO_SOCIAL_DATA_DB_PORT'], user=params.environ['DJANGO_SOCIAL_DATA_DB_USER'], password=<PASSWORD>['<PASSWORD>'], dbname=params.environ['DJANGO_SOCIAL_DATA_DB_NAME'])
self.django_db = psycopg2.connect(host=params.environ['DJANGO_SOCIAL_DEFAULT_DB_HOST'], port=params.environ['DJANGO_SOCIAL_DEFAULT_DB_PORT'], user=params.environ['DJANGO_SOCIAL_DEFAULT_DB_USER'], password=params.environ['<PASSWORD>_SOCIAL_DEFAULT_DB_PASS'], dbname=params.environ['DJANGO_SOCIAL_DEFAULT_DB_NAME'])
def getAllParams(self):
sql = '''
select provider.id as acc_id, provider.provider, provider.user_id, provider.uid, token.token, token.token_secret, provider.client_id, provider.secret, provider.extra_data::json
from (select distinct on (provider, user_id) acc.id, acc.provider, acc.user_id, acc.uid, app.id app_id, app.client_id, app.secret, acc.extra_data
from socialaccount_socialaccount acc, socialaccount_socialapp app where
acc.provider = app.provider order by provider, user_id, id asc) provider,
socialaccount_socialtoken token
where provider.id = token.account_id
'''
cur = self.django_db.cursor()
data = []
try:
cur.execute(sql)
user = cur.fetchall()
for u in user:
params = {
"platform": u[1],
"user_django": u[2],
"user_platform": u[3],
"access_token": u[4],
"token_secret": u[5],
"client_id": u[6],
"client_secret": u[7],
}
#print params
try:
login = u[8]['login']
print login
params['login'] = login
data.append(params)
except:
data.append(params)
except Exception, e:
print(Exception, e)
return data
def getUserParams(self, user_id, platform):
sql = '''
select provider.id as acc_id, provider.provider, provider.user_id, provider.uid, token.token, token.token_secret, provider.client_id, provider.secret, provider.extra_data::json
from
(
select distinct on (provider, user_id) acc.id, acc.provider, acc.user_id, acc.uid, app.id app_id, app.client_id, app.secret, acc.extra_data from
socialaccount_socialaccount acc,
(select * from socialaccount_socialapp where provider = %s) app
where
acc.provider = app.provider and
acc.user_id = %s order by provider, user_id, id asc
) provider,
socialaccount_socialtoken token
where provider.id = token.account_id
'''
cur = self.django_db.cursor()
try:
cur.execute(sql, (platform, user_id))
user = cur.fetchone()
params = {
"platform": user[1],
"user_django": user[2],
"user_platform": user[3],
"access_token": user[4],
"token_secret": user[5],
"client_id": user[6],
"client_secret": user[7],
}
try:
login = json.loads(user[8])['login']
params['login'] = login
except:
pass
except Exception, e:
print(Exception, e)
return params
def downloadData(self, params, collector):
print 'User id: %s,platform: %s' % (params['user_django'], params['platform'])
if params['platform'] == 'instagram':
collector.getInstaMedia(params, self.data_db)
elif params['platform'] == 'twitter':
collector.getTweets(params, self.data_db)
elif params['platform'] == 'foursquare':
collector.getFoursquareCheckins(params, self.data_db)
elif params['platform'] == 'flickr':
collector.getFlickrPhotos(params, self.data_db)
elif params['platform'] == 'openstreetmap':
collector.getOSMChangesets(params, self.data_db)
elif params['platform'] == 'mapillary':
collector.getMapillarySequences(params, self.data_db)
elif params['platform'] == 'strava':
collector.getStravaActivities(params, self.data_db)
elif params['platform'] == 'inaturalist':
collector.getInatObservations(params, self.data_db)
elif params['platform'] == 'meetup':
collector.getMeetups(params, self.data_db)
def getNewUserParams(self):
data_cur = self.data_db.cursor()
params = self.getAllParams()
fb_users = data_cur.execute('select array_agg(distinct user_id) from facebook_places')
return params
def setupTables(self):
cursor = self.data_db.cursor()
insta_table_sql = '''
CREATE TABLE insta_media (
pid serial primary key,
'''
cursor.execute(insta_table_sql)
class DataCollector():
def getInstaMedia(self, user_params, db):
url = 'https://api.instagram.com/v1/users/self/media/recent/?count=20&access_token=' + user_params['access_token']
cursor = db.cursor()
insert_sql = '''
INSERT INTO insta_media (user_id, created_at, location_name, geom, raw) VALUES (%s, %s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
insert_sql_noloc = '''
INSERT INTO insta_media (user_id, created_at, raw) VALUES (%s, %s, %s::json)
'''
curr_url = url
more = True
while more:
resp = requests.get(curr_url)
if resp.status_code == 200:
data = resp.json()
for media in data['data']:
id = media['id']
if media['location'] and 'latitude' in media['location'].keys():
loc_name = media['location']['name']
lat = media['location']['latitude']
lng = media['location']['longitude']
cursor.execute(insert_sql, (user_params['user_django'], datetime.utcfromtimestamp(int(media['created_time'])), loc_name, lng, lat, json.dumps(media)))
else:
cursor.execute(insert_sql_noloc, (user_params['user_django'], datetime.utcfromtimestamp(int(media['created_time'])), json.dumps(media)))
db.commit()
if len(data['data']) == INSTA_LIMIT:
more = True
curr_url = url + "&max_id=" + id
db.commit()
else:
more = False
else:
more = False
def getTweets(self, user_params, db):
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=' + str(TWEET_LIMIT)
consumer = oauth2.Consumer(key=user_params['client_id'], secret=user_params['client_secret'])
token = oauth2.Token(key=user_params['access_token'], secret=user_params['token_secret'])
client = oauth2.Client(consumer, token)
cursor = db.cursor()
insert_sql = '''
INSERT INTO tweets (user_id, created_at, coordinates, place_name, place_bbox, raw) VALUES (%s, %s, st_setsrid(st_geomfromgeojson(%s), 4326), %s, st_makevalid(st_setsrid(st_geomfromgeojson(%s), 4326)), %s::json)
'''
more = True
curr_url = url
while more:
resp, content = client.request(curr_url, method='GET', headers=None)
if resp.status == 200:
content = json.loads(content)
for tweet in content:
created_at = parser.parse(tweet['created_at'])
id = tweet['id']
if tweet['coordinates']:
coordinates = json.dumps(tweet['coordinates'])
else:
coordinates = None
if tweet['place']:
place_name = tweet['place']['full_name']
place_bbox = json.dumps(tweet['place']['bounding_box'])
else:
place_name = None
place_bbox = None
cursor.execute(insert_sql, (user_params['user_django'], created_at, coordinates, place_name, place_bbox, json.dumps(tweet)))
if len(content) == TWEET_LIMIT:
more = True
curr_url = url + '&max_id=' + str(id)
db.commit()
else:
more = False
else:
print resp
db.commit()
def getFoursquareCheckins(self, user_params, db):
url ='https://api.foursquare.com/v2/users/self/checkins?v=20180401&limit=' + str(FOURSQUARE_LIMIT) + '&oauth_token=' + user_params['access_token']
cursor = db.cursor()
insert_sql = '''
INSERT INTO foursquare_checkins (user_id, created_at, venue_name, geom, raw) VALUES (%s, %s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
insert_sql_private = '''
INSERT INTO foursquare_checkins (user_id, created_at, venue_name, raw) VALUES (%s, %s, %s, %s::json)
'''
curr_url = url
more = True
while more:
checkins = requests.get(curr_url)
if checkins.status_code == 200:
checkins = checkins.json()
for checkin in checkins['response']['checkins']['items']:
created_at = checkin['createdAt']
if 'venue' in checkin.keys():
venue_name = checkin['venue']['name']
if 'private' in checkin['venue'].keys():
lat = None
lng = None
else:
lat = checkin['venue']['location']['lat']
lng = checkin['venue']['location']['lng']
if lat is None:
cursor.execute(insert_sql_private, (user_params['user_django'], datetime.utcfromtimestamp(created_at), venue_name, json.dumps(checkin)))
else:
cursor.execute(insert_sql, (user_params['user_django'], datetime.utcfromtimestamp(created_at), venue_name, lng, lat, json.dumps(checkin)))
if len(checkins['response']['checkins']['items']) == FOURSQUARE_LIMIT:
more = True
curr_url = url + '&beforeTimestamp=' + str(created_at)
db.commit()
else:
more = False
else:
print checkins.text
db.commit()
def getFlickrPhotos(self, user_params, db):
url = url = 'https://api.flickr.com/services/rest/?oauth_consumer_key=' + user_params['client_id'] + '&method=flickr.people.getPhotos&user_id=me&extras=date_upload,date_taken,geo,url_m&format=json&nojsoncallback=1&oauth_token=' + user_params['access_token'] + '&oauth_signature=' + user_params['token_secret']+ '&per_page=' + str(FLICKR_LIMIT)
cursor = db.cursor()
insert_sql = '''
insert into flickr_photos (user_id, created_at, geom, raw) values (%s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
insert_sql_noloc = '''
insert into flickr_photos (user_id, created_at, raw) values (%s, %s, %s::json)
'''
more = True
curr_url = url
while more:
resp = requests.get(curr_url)
if resp.status_code == 200:
photos = resp.json()
for photo in photos['photos']['photo']:
created_at = datetime.utcfromtimestamp(int(photo['dateupload']))
lat = photo['latitude']
lng = photo['longitude']
if lat == 0 and lng == 0:
cursor.execute(insert_sql_noloc, (user_params['user_django'], created_at, json.dumps(photo)))
else:
cursor.execute(insert_sql, (user_params['user_django'], created_at, lng, lat, json.dumps(photo)))
if photos['photos']['page'] < photos['photos']['pages']:
more = True
curr_url = url + '&page=' + str(photos['photos']['page'] + 1)
db.commit()
if created_at < datetime(2014, 1, 1):
more = False
else:
more = False
else:
print resp.text
db.commit()
def getOSMChangesets(self, user_params, db):
url = 'https://api.openstreetmap.org/api/0.6/changesets?user=' + user_params['user_platform'] + '&time=2014-01-01,'
cursor = db.cursor()
insert_sql = '''
insert into osm_changesets (user_id, created_at, geom) values (%s, %s, st_setsrid(st_makeenvelope(%s, %s, %s, %s), 4326))
'''
more = True
curr_url = url + str(datetime.now().date())
while more:
resp = requests.get(curr_url)
if resp.status_code == 200:
root = ET.fromstring(resp.content)
changesets = list(root.iter('changeset'))
for changeset in changesets:
attr = changeset.attrib
try:
min_lon = attr['min_lon']
min_lat = attr['min_lat']
max_lon = attr['max_lon']
max_lat = attr['max_lat']
except KeyError:
continue
created_at = parser.parse(attr['created_at'])
cursor.execute(insert_sql, (user_params['user_django'], created_at, min_lon, min_lat, max_lon, max_lat))
if len(changesets) == OSM_LIMIT:
more = True
curr_url = url + str(created_at)
db.commit()
else:
more = False
if changesets and created_at.replace(tzinfo=None) < datetime(2015, 1, 1):
more = False
db.commit()
def getMapillarySequences(self, user_params, db):
url = 'https://a.mapillary.com/v3/sequences?userkeys=' + user_params['user_platform'] + '&client_id=' + user_params['client_id'] + '&per_page=' + str(MAPILLARY_LIMIT)
cursor = db.cursor()
insert_sql = '''
insert into mapillary_sequences (user_id, created_at, geom, raw) values (%s, %s, st_setsrid(st_geomfromgeojson(%s), 4326), %s::json)
'''
insert_sql_fast = '''
insert into mapillary_sequences (user_id, created_at, geom, raw) values %s
'''
more = True
curr_url = url
data_insert = []
while more:
resp = requests.get(curr_url)
if resp.status_code == 200:
headers = resp.headers
resp = resp.json()
for sequence in resp['features']:
created_at = parser.parse(sequence['properties']['created_at'])
geom = sequence['geometry']
data_insert.append((user_params['user_django'], created_at, json.dumps(geom), json.dumps(sequence)))
#cursor.execute(insert_sql, (user_params['user_django'], created_at, json.dumps(geom), json.dumps(sequence)))
next_link = findNextLink(headers)
if next_link:
more = True
curr_url = next_link
#db.commit()
else:
more = False
psycopg2.extras.execute_values(cursor, insert_sql_fast, data_insert, template='(%s, %s, st_setsrid(st_geomfromgeojson(%s), 4326), %s::json)')
db.commit()
def getStravaActivities(self, user_params, db):
import polyline
auth = {"Authorization": "Bearer " + user_params['access_token']}
url = 'https://www.strava.com/api/v3/athlete/activities?per_page=' + str(STRAVA_LIMIT)
cursor = db.cursor()
insert_sql = '''
insert into strava_activities (user_id, created_at, geom, raw) values (%s, %s, st_setsrid(st_geomfromgeojson(%s), 4326), %s::json)
'''
more = True
curr_url = url
while more:
resp = requests.get(curr_url, headers=auth)
if resp.status_code == 200:
resp = resp.json()
for activity in resp:
created_at = parser.parse(activity['start_date'])
if 'map' in activity.keys():
if 'polyline' in activity['map'].keys():
line = polyline.decode(activity['map']['polyline'])
elif 'summary_polyline' in activity['map'].keys():
line = polyline.decode(activity['map']['summary_polyline'])
# Watch order in geojson. reverse latlng with [::-1]
geom = {
"type": "LineString",
"coordinates": [list(coords)[::-1] for coords in line]
}
else:
continue
cursor.execute(insert_sql, (user_params['user_django'], created_at, json.dumps(geom), json.dumps(activity)))
if len(resp) == STRAVA_LIMIT:
more = True
curr_url = url + '&before=' + str(int((created_at.replace(tzinfo=None) - datetime(1970,1,1)).total_seconds()))
db.commit()
else:
more = False
db.commit()
def getInatObservations(self, user_params, db):
auth = {"Authorization": "Bearer " + user_params['access_token']}
url = 'https://www.inaturalist.org/observations/' + user_params['login'] + '.json?per_page=' + str(INAT_LIMIT) + "&has[]=geo"
cursor = db.cursor()
insert_sql = '''
insert into inat_observations (user_id, created_at, geom, raw) values (%s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
more = True
curr_url = url
while more:
resp = requests.get(curr_url, headers=auth)
if resp.status_code == 200:
headers = resp.headers
resp = resp.json()
for obs in resp:
created_at = parser.parse(obs['created_at']).astimezone(utc_zone).replace(tzinfo=None)
lat = float(obs['latitude'])
lng = float(obs['longitude'])
cursor.execute(insert_sql, (user_params['user_django'], created_at, lng, lat, json.dumps(obs)))
cont = findNextPage(headers)
if cont:
more = True
curr_url = url + '&page=' + str(cont)
db.commit()
else:
more = False
db.commit()
def getMeetups(self, user_params, db):
auth = {"Authorization": "Bearer " + user_params['access_token']}
url = 'https://api.meetup.com/self/events?desc=true&page=' + str(MEETUP_LIMIT)
cursor = db.cursor()
insert_sql = '''
insert into meetups (user_id, created_at, venue_name, geom, raw) values (%s, %s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
curr_url = url
more = True
while more:
resp = requests.get(curr_url, headers=auth)
if resp.status_code == 200:
headers = resp.headers
resp = resp.json()
for meetup in resp:
if 'created' in meetup.keys():
created_at = datetime.fromtimestamp(meetup['created']/1000.0)
else:
try:
created_at = datetime.fromtimestamp(meetup['time']/1000.0)
except KeyError:
created_at = None
if 'venue' in meetup.keys():
venue_name = meetup['venue']['name']
lat = meetup['venue']['lat']
lng = meetup['venue']['lon']
else:
try:
venue_name = meetup['group']['name']
lat = meetup['group']['lat']
lng = meetup['venue']['lon']
except KeyError:
venue_name = None
lat = None
lng = None
cursor.execute(insert_sql, (user_params['user_django'], created_at, venue_name, lng, lat, json.dumps(meetup)))
next_link = findNextLink(headers)
if next_link:
more = True
curr_url = next_link
db.commit()
else:
more = False
db.commit()
def getFacebookPlaces(self, user_params, db):
url = "https://graph.facebook.com/v2.12/me/tagged_places?access_token=" + user_params['access_token'] + "&limit=" + str(FB_LIMIT)
cursor = db.cursor()
insert_sql = '''
insert into facebook_places (user_id, created_at, name, geom, raw) values (%s, %s, %s, st_setsrid(st_makepoint(%s, %s), 4326), %s::json)
'''
insert_sql_nogeom = '''
insert into facebook_places (user_id, created_at, name, raw) values (%s, %s, %s, %s::json)
'''
curr_url = url
more = True
while more:
resp = requests.get(curr_url)
if resp.status_code == 200:
resp = resp.json()
for place in resp:
created_at = parser.parse(place['created_time'])
name = place['place']['name']
try:
lat = place['place']['location']['latitude']
lng = place['place']['location']['longitude']
except KeyError:
lat = None
if not lat:
cursor.execute(insert_sql_nogeom, (user_params['user_django'], created_at, name, json.dumps(place)))
else:
cursor.execute(insert_sql, (user_params['user_django'], created_at, name, lng, lat, json.dumps(place)))
if 'next' in resp['paging'].keys():
more = True
curr_url = resp['paging']['next']
else:
more = False
db.commit()
def findNextLink(headers):
try:
links = headers['link']
except KeyError:
try:
links = headers['Link']
except KeyError:
return False
for x in links.split(','):
if 'next' in x:
next_link = x.split('<')[1].split('>')[0]
else:
next_link = False
return next_link
def findNextPage(headers):
tot = int(headers['X-Total-Entries'])
perpage = int(headers['X-Per-Page'])
curr_page = int(headers['X-Page'])
if curr_page * perpage < tot:
return curr_page + 1
else:
return False
def get_args():
import argparse
p = argparse.ArgumentParser(description="Control from the command line")
p.add_argument('-a', '--all', help='Get params from all users', action='store_true')
p.add_argument('-i', '--index', help='run collector for this index (comma separated) in params list')
return p.parse_args()
def deg2rad(deg):
return deg * (math.pi/180)
def getDistanceFromLatLonInKm(lat1,lon1,lat2,lon2):
R = 6371 # Radius of the earth in km
dlat = deg2rad(lat2-lat1)
dlon = deg2rad(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + \
math.cos(deg2rad(lat1)) * math.cos(deg2rad(lat2)) * \
math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c # Distance in km
return d
def parseGoogleLocationHistory(user_id, file, db_handler, types=['point', 'line']):
import io
db = db_handler.data_db
with io.open(file, encoding='utf-8') as f:
data = json.load(f)
geom = {
"type": "LineString",
"coordinates": [
# [102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]
]
}
last = None
point_cur = db.cursor()
line_cur = db.cursor()
insert_point_sql = '''
INSERT INTO google_loc_points (user_id, created_at, accuracy, geom) values %s
'''
insert_line_sql = '''
INSERT INTO google_loc_lines (user_id, start_time, end_time, geom) values %s
'''
points_insert = []
lines_insert = []
point_counter = 0
line_counter = 0
for loc in data['locations']:
time = datetime.utcfromtimestamp(int(loc['timestampMs'])/1000.0)
point = [loc['longitudeE7']/10000000.0, loc['latitudeE7']/10000000.0]
if 'accuracy' in loc.keys():
acc = loc['accuracy']
else:
acc= None
if 'point' in types:
points_insert.append((user_id, time, acc) + tuple(point))
point_counter += 1
if point_counter % GOOGLE_POINT_BATCH == 0:
# write something here
print 'Writing %s points to table (total: %s)' % (str(GOOGLE_POINT_BATCH), str(point_counter))
psycopg2.extras.execute_values(point_cur, insert_point_sql, points_insert, template='(%s, %s, %s, st_setsrid(st_makepoint(%s, %s),4326))')
points_insert = []
db.commit()
if 'line' in types:
if last:
prev_point = [last['longitudeE7']/10000000.0, last['latitudeE7']/10000000.0]
timedelta = (int(loc['timestampMs']) - int(last['timestampMs'])) / 1000.0 / 60.0
distancedelta = getDistanceFromLatLonInKm(point[1], point[0], prev_point[1], prev_point[0])
if timedelta < -20 or distancedelta > 10:
# build line here from last
segment_start = datetime.utcfromtimestamp(int(last['timestampMs'])/1000.0)
#print 'timedelta: %s, distancedelta: %s' % (str(timedelta), str(distancedelta))
#print 'Segment started: %s, ended: %s' % (str(segment_start), str(segment_end))
#print 'Segment points: %s' % str(len(geom['coordinates']))
lines_insert.append((user_id, segment_start, segment_end, json.dumps(geom)))
line_counter += 1
# Insert into postgres
if line_counter % GOOGLE_LINE_BATCH == 0:
print 'Writing %s lines to table (total: %s)' % (str(GOOGLE_LINE_BATCH), str(line_counter))
psycopg2.extras.execute_values(line_cur, insert_line_sql, lines_insert, template='(%s, %s, %s,st_setsrid(st_geomfromgeojson(%s),4326))')
db.commit()
lines_insert = []
geom['coordinates'] = []
if len(geom['coordinates']) == 0:
segment_end = time
geom['coordinates'].append(point)
last = loc
# insert last
if 'point' in types:
psycopg2.extras.execute_values(point_cur, insert_point_sql, points_insert, template='(%s, %s, %s, st_setsrid(st_makepoint(%s, %s),4326))')
if 'line' in types:
psycopg2.extras.execute_values(line_cur, insert_line_sql, lines_insert, template='(%s, %s, %s,st_setsrid(st_geomfromgeojson(%s),4326))')
db.commit()
def main():
args = vars(get_args())
all = args['all']
accounts = args['index']
db = DBHandler()
coll = DataCollector()
if all:
userparams = db.getAllParams()
else:
sys.exit(0)
if accounts:
accounts = [int(i) for i in accounts.split(',')]
for acc in accounts:
db.downloadData(userparams[acc], coll)
else:
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "jleveque/sonic-mgmt",
"score": 2
} |
#### File: tests/bgp/conftest.py
```python
import os
import contextlib
import ipaddress
import json
import logging
import netaddr
import pytest
import random
from jinja2 import Template
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.helpers.generators import generate_ips
from tests.common.helpers.parallel import parallel_run
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.utilities import wait_until
from tests.common.utilities import wait_tcp_connection
from tests.common import config_reload
from bgp_helpers import define_config
from bgp_helpers import apply_default_bgp_config
from bgp_helpers import DUT_TMP_DIR
from bgp_helpers import TEMPLATE_DIR
from bgp_helpers import BGP_PLAIN_TEMPLATE
from bgp_helpers import BGP_NO_EXPORT_TEMPLATE
from bgp_helpers import DUMP_FILE, CUSTOM_DUMP_SCRIPT, CUSTOM_DUMP_SCRIPT_DEST, BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT
from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.dualtor.dual_tor_utils import mux_cable_server_ip
logger = logging.getLogger(__name__)
@pytest.fixture(scope='module')
def setup_keepalive_and_hold_timer(duthosts, rand_one_dut_hostname, nbrhosts):
duthost = duthosts[rand_one_dut_hostname]
# incrase the keepalive and hold timer
duthost.command("vtysh -c \"configure terminal\" \
-c \"router bgp {}\" \
-c \"neighbor {} timers 60 180\"".format(
metadata['localhost']['bgp_asn'], \
bgp_nbr_ip))
for k, nbr in nbrhosts.items():
nbr['host'].eos_config(lines=["timers 60 180"], parents=["router bgp {}".format(bgp_nbr['asn'])])
yield
def check_results(results):
"""Helper function for checking results of parallel run.
Args:
results (Proxy to shared dict): Results of parallel run, indexed by node name.
"""
failed_results = {}
for node_name, node_results in results.items():
failed_node_results = [res for res in node_results if res['failed']]
if len(failed_node_results) > 0:
failed_results[node_name] = failed_node_results
if failed_results:
logger.error('failed_results => {}'.format(json.dumps(failed_results, indent=2)))
pt_assert(False, 'Some processes for updating nbr hosts configuration returned failed results')
@pytest.fixture(scope='module')
def setup_bgp_graceful_restart(duthosts, rand_one_dut_hostname, nbrhosts):
duthost = duthosts[rand_one_dut_hostname]
config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
bgp_neighbors = config_facts.get('BGP_NEIGHBOR', {})
@reset_ansible_local_tmp
def configure_nbr_gr(node=None, results=None):
"""Target function will be used by multiprocessing for configuring VM hosts.
Args:
node (object, optional): A value item of the dict type fixture 'nbrhosts'. Defaults to None.
results (Proxy to shared dict, optional): An instance of multiprocessing.Manager().dict(). Proxy to a dict
shared by all processes for returning execution results. Defaults to None.
"""
if node is None or results is None:
logger.error('Missing kwarg "node" or "results"')
return
node_results = []
logger.info('enable graceful restart on neighbor host {}'.format(node['host'].hostname))
logger.info('bgp asn {}'.format(node['conf']['bgp']['asn']))
node_results.append(node['host'].eos_config(
lines=['graceful-restart restart-time 300'], \
parents=['router bgp {}'.format(node['conf']['bgp']['asn'])], \
module_ignore_errors=True)
)
node_results.append(node['host'].eos_config(
lines=['graceful-restart'], \
parents=['router bgp {}'.format(node['conf']['bgp']['asn']), 'address-family ipv4'], \
module_ignore_errors=True)
)
node_results.append(node['host'].eos_config(
lines=['graceful-restart'], \
parents=['router bgp {}'.format(node['conf']['bgp']['asn']), 'address-family ipv6'], \
module_ignore_errors=True)
)
results[node['host'].hostname] = node_results
@reset_ansible_local_tmp
def restore_nbr_gr(node=None, results=None):
"""Target function will be used by multiprocessing for restoring configuration for the VM hosts.
Args:
node (object, optional): A value item of the dict type fixture 'nbrhosts'. Defaults to None.
results (Proxy to shared dict, optional): An instance of multiprocessing.Manager().dict(). Proxy to a dict
shared by all processes for returning execution results. Defaults to None.
"""
if node is None or results is None:
logger.error('Missing kwarg "node" or "results"')
return
# start bgpd if not started
node_results = []
node['host'].start_bgpd()
logger.info('disable graceful restart on neighbor {}'.format(node))
node_results.append(node['host'].eos_config(
lines=['no graceful-restart'], \
parents=['router bgp {}'.format(node['conf']['bgp']['asn']), 'address-family ipv4'], \
module_ignore_errors=True)
)
node_results.append(node['host'].eos_config(
lines=['no graceful-restart'], \
parents=['router bgp {}'.format(node['conf']['bgp']['asn']), 'address-family ipv6'], \
module_ignore_errors=True)
)
results[node['host'].hostname] = node_results
results = parallel_run(configure_nbr_gr, (), {}, nbrhosts.values(), timeout=120)
check_results(results)
logger.info("bgp neighbors: {}".format(bgp_neighbors.keys()))
res = True
err_msg = ""
if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()):
res = False
err_msg = "not all bgp sessions are up after enable graceful restart"
if res and not wait_until(100, 5, duthost.check_default_route):
res = False
err_msg = "ipv4 or ipv6 default route not available"
if not res:
# Disable graceful restart in case of failure
parallel_run(restore_nbr_gr, (), {}, nbrhosts.values(), timeout=120)
pytest.fail(err_msg)
yield
results = parallel_run(restore_nbr_gr, (), {}, nbrhosts.values(), timeout=120)
check_results(results)
if not wait_until(300, 10, duthost.check_bgp_session_state, bgp_neighbors.keys()):
pytest.fail("not all bgp sessions are up after disable graceful restart")
@pytest.fixture(scope="module")
def setup_interfaces(duthosts, rand_one_dut_hostname, ptfhost, request, tbinfo):
"""Setup interfaces for the new BGP peers on PTF."""
def _is_ipv4_address(ip_addr):
return ipaddress.ip_address(ip_addr).version == 4
def _duthost_cleanup_ip(duthost, namespace, ip):
"""
Search if "ip" is configured on any DUT interface. If yes, remove it.
"""
for line in duthost.shell("ip addr show | grep 'inet '")['stdout_lines']:
# Example line: ''' inet 10.0.0.2/31 scope global Ethernet104'''
fields = line.split()
intf_ip = fields[1].split("/")[0]
if intf_ip == ip:
intf_name = fields[-1]
duthost.shell("config interface %s ip remove %s %s" % (namespace, intf_name, ip))
ip_intfs = duthost.show_and_parse('show ip {} interface'.format(namespace))
# For interface that has two IP configured, the output looks like:
# admin@vlab-03:~$ show ip int
# Interface Master IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP
# --------------- -------- ------------------- ------------ -------------- -------------
# Ethernet100 10.0.0.50/31 up/up ARISTA10T0 10.0.0.51
# Ethernet104 10.0.0.2/31 up/up N/A N/A
# 10.0.0.52/31 ARISTA11T0 10.0.0.53
# Ethernet108 10.0.0.54/31 up/up ARISTA12T0 10.0.0.55
# Ethernet112 10.0.0.56/31 up/up ARISTA13T0 10.0.0.57
#
# For interface Ethernet104, it has two entries in the output list:
# [{
# "ipv4 address/mask": "10.0.0.2/31",
# "neighbor ip": "N/A",
# "master": "",
# "admin/oper": "up/up",
# "interface": "Ethernet104",
# "bgp neighbor": "N/A"
# },
# {
# "ipv4 address/mask": "10.0.0.52/31",
# "neighbor ip": "10.0.0.53",
# "master": "",
# "admin/oper": "",
# "interface": "",
# "bgp neighbor": "ARISTA11T0"
# },]
# The second item has empty value for key "interface". Below code is to fill "Ethernet104" for the second item.
last_interface = ""
for ip_intf in ip_intfs:
if ip_intf["interface"] == "":
ip_intf["interface"] = last_interface
else:
last_interface = ip_intf["interface"]
# Remove the specified IP from interfaces
for ip_intf in ip_intfs:
if ip_intf["ipv4 address/mask"].split("/")[0] == ip:
duthost.shell("config interface %s ip remove %s %s" % (namespace, ip_intf["interface"], ip))
def _find_vlan_intferface(mg_facts):
for vlan_intf in mg_facts["minigraph_vlan_interfaces"]:
if _is_ipv4_address(vlan_intf["addr"]):
return vlan_intf
raise ValueError("No Vlan interface defined in T0.")
def _find_loopback_interface(mg_facts):
loopback_intf_name = "Loopback0"
for loopback in mg_facts["minigraph_lo_interfaces"]:
if loopback["name"] == loopback_intf_name:
return loopback
raise ValueError("No loopback interface %s defined." % loopback_intf_name)
@contextlib.contextmanager
def _setup_interfaces_dualtor(mg_facts, peer_count):
try:
connections = []
vlan_intf = _find_vlan_intferface(mg_facts)
loopback_intf = _find_loopback_interface(mg_facts)
vlan_intf_addr = vlan_intf["addr"]
vlan_intf_prefixlen = vlan_intf["prefixlen"]
loopback_intf_addr = loopback_intf["addr"]
loopback_intf_prefixlen = loopback_intf["prefixlen"]
mux_configs = mux_cable_server_ip(duthost)
local_interfaces = random.sample(mux_configs.keys(), peer_count)
for local_interface in local_interfaces:
connections.append(
{
"local_intf": loopback_intf["name"],
"local_addr": "%s/%s" % (loopback_intf_addr, loopback_intf_prefixlen),
"neighbor_intf": "eth%s" % mg_facts["minigraph_port_indices"][local_interface],
"neighbor_addr": "%s/%s" % (mux_configs[local_interface]["server_ipv4"].split("/")[0], vlan_intf_prefixlen)
}
)
ptfhost.remove_ip_addresses()
for conn in connections:
ptfhost.shell("ifconfig %s %s" % (conn["neighbor_intf"],
conn["neighbor_addr"]))
ptfhost.shell("ip route add %s via %s" % (loopback_intf_addr, vlan_intf_addr))
yield connections
finally:
ptfhost.shell("ip route delete %s" % loopback_intf_addr)
for conn in connections:
ptfhost.shell("ifconfig %s 0.0.0.0" % conn["neighbor_intf"])
@contextlib.contextmanager
def _setup_interfaces_t0(mg_facts, peer_count):
try:
connections = []
vlan_intf = _find_vlan_intferface(mg_facts)
vlan_intf_name = vlan_intf["attachto"]
vlan_intf_addr = "%s/%s" % (vlan_intf["addr"], vlan_intf["prefixlen"])
vlan_members = mg_facts["minigraph_vlans"][vlan_intf_name]["members"]
local_interfaces = random.sample(vlan_members, peer_count)
neighbor_addresses = generate_ips(
peer_count,
vlan_intf["subnet"],
[netaddr.IPAddress(vlan_intf["addr"])]
)
loopback_ip = None
for intf in mg_facts["minigraph_lo_interfaces"]:
if netaddr.IPAddress(intf["addr"]).version == 4:
loopback_ip = intf["addr"]
break
if not loopback_ip:
pytest.fail("ipv4 lo interface not found")
for local_intf, neighbor_addr in zip(local_interfaces, neighbor_addresses):
conn = {}
conn["local_intf"] = vlan_intf_name
conn["local_addr"] = vlan_intf_addr
conn["neighbor_addr"] = neighbor_addr
conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][local_intf]
conn["loopback_ip"] = loopback_ip
connections.append(conn)
ptfhost.remove_ip_addresses() # In case other case did not cleanup IP address configured on PTF interface
for conn in connections:
ptfhost.shell("ifconfig %s %s" % (conn["neighbor_intf"],
conn["neighbor_addr"]))
yield connections
finally:
for conn in connections:
ptfhost.shell("ifconfig %s 0.0.0.0" % conn["neighbor_intf"])
@contextlib.contextmanager
def _setup_interfaces_t1(mg_facts, peer_count):
try:
connections = []
ipv4_interfaces = []
used_subnets = set()
if mg_facts["minigraph_interfaces"]:
for intf in mg_facts["minigraph_interfaces"]:
if _is_ipv4_address(intf["addr"]):
ipv4_interfaces.append(intf["attachto"])
used_subnets.add(ipaddress.ip_network(intf["subnet"]))
ipv4_lag_interfaces = []
if mg_facts["minigraph_portchannel_interfaces"]:
for pt in mg_facts["minigraph_portchannel_interfaces"]:
if _is_ipv4_address(pt["addr"]):
pt_members = mg_facts["minigraph_portchannels"][pt["attachto"]]["members"]
# Only use LAG with 1 member for bgpmon session between PTF,
# It's because exabgp on PTF is bind to single interface
if len(pt_members) == 1:
ipv4_lag_interfaces.append(pt["attachto"])
used_subnets.add(ipaddress.ip_network(pt["subnet"]))
subnet_prefixlen = list(used_subnets)[0].prefixlen
_subnets = ipaddress.ip_network(u"10.0.0.0/24").subnets(new_prefix=subnet_prefixlen)
subnets = (_ for _ in _subnets if _ not in used_subnets)
loopback_ip = None
for intf in mg_facts["minigraph_lo_interfaces"]:
if netaddr.IPAddress(intf["addr"]).version == 4:
loopback_ip = intf["addr"]
break
if not loopback_ip:
pytest.fail("ipv4 lo interface not found")
for intf, subnet in zip(random.sample(ipv4_interfaces + ipv4_lag_interfaces, peer_count), subnets):
conn = {}
local_addr, neighbor_addr = [_ for _ in subnet][:2]
conn["local_intf"] = "%s" % intf
conn["local_addr"] = "%s/%s" % (local_addr, subnet_prefixlen)
conn["neighbor_addr"] = "%s/%s" % (neighbor_addr, subnet_prefixlen)
conn["loopback_ip"] = loopback_ip
conn["namespace"] = DEFAULT_NAMESPACE
if intf.startswith("PortChannel"):
member_intf = mg_facts["minigraph_portchannels"][intf]["members"][0]
conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][member_intf]
conn["namespace"] = mg_facts["minigraph_portchannels"][intf]["namespace"]
else:
conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][intf]
connections.append(conn)
ptfhost.remove_ip_addresses() # In case other case did not cleanup IP address configured on PTF interface
for conn in connections:
namespace = '-n {}'.format(conn["namespace"]) if conn["namespace"] else ''
# Find out if any other interface has the same IP configured. If yes, remove it
# Otherwise, there may be conflicts and test would fail.
_duthost_cleanup_ip(duthost, namespace, conn["local_addr"])
# bind the ip to the interface and notify bgpcfgd
duthost.shell("config interface %s ip add %s %s" % (namespace, conn["local_intf"], conn["local_addr"]))
ptfhost.shell("ifconfig %s %s" % (conn["neighbor_intf"], conn["neighbor_addr"]))
yield connections
finally:
for conn in connections:
namespace = '-n {}'.format(conn["namespace"]) if conn["namespace"] else ''
duthost.shell("config interface %s ip remove %s %s" % (namespace, conn["local_intf"], conn["local_addr"]))
ptfhost.shell("ifconfig %s 0.0.0.0" % conn["neighbor_intf"])
peer_count = getattr(request.module, "PEER_COUNT", 1)
if "dualtor" in tbinfo["topo"]["name"]:
setup_func = _setup_interfaces_dualtor
elif tbinfo["topo"]["type"] == "t0":
setup_func = _setup_interfaces_t0
elif tbinfo["topo"]["type"] == "t1":
setup_func = _setup_interfaces_t1
else:
raise TypeError("Unsupported topology: %s" % tbinfo["topo"]["type"])
duthost = duthosts[rand_one_dut_hostname]
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
with setup_func(mg_facts, peer_count) as connections:
yield connections
duthost.shell("sonic-clear arp")
@pytest.fixture(scope="module")
def deploy_plain_bgp_config(duthost):
"""
Deploy bgp plain config on the DUT
Args:
duthost: DUT host object
Returns:
Pathname of the bgp plain config on the DUT
"""
bgp_plain_template_src_path = os.path.join(TEMPLATE_DIR, BGP_PLAIN_TEMPLATE)
bgp_plain_template_path = os.path.join(DUT_TMP_DIR, BGP_PLAIN_TEMPLATE)
define_config(duthost, bgp_plain_template_src_path, bgp_plain_template_path)
return bgp_plain_template_path
@pytest.fixture(scope="module")
def deploy_no_export_bgp_config(duthost):
"""
Deploy bgp no export config on the DUT
Args:
duthost: DUT host object
Returns:
Pathname of the bgp no export config on the DUT
"""
bgp_no_export_template_src_path = os.path.join(TEMPLATE_DIR, BGP_NO_EXPORT_TEMPLATE)
bgp_no_export_template_path = os.path.join(DUT_TMP_DIR, BGP_NO_EXPORT_TEMPLATE)
define_config(duthost, bgp_no_export_template_src_path, bgp_no_export_template_path)
return bgp_no_export_template_path
@pytest.fixture(scope="module")
def backup_bgp_config(duthost):
"""
Copy default bgp configuration to the DUT and apply default configuration on the bgp
docker after test
Args:
duthost: DUT host object
"""
apply_default_bgp_config(duthost, copy=True)
yield
try:
apply_default_bgp_config(duthost)
except Exception:
config_reload(duthost)
apply_default_bgp_config(duthost)
@pytest.fixture(scope="module")
def bgpmon_setup_teardown(ptfhost, duthost, localhost, setup_interfaces):
connection = setup_interfaces[0]
dut_lo_addr = connection["loopback_ip"].split("/")[0]
peer_addr = connection['neighbor_addr'].split("/")[0]
mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
asn = mg_facts['minigraph_bgp_asn']
# TODO: Add a common method to load BGPMON config for test_bgpmon and test_traffic_shift
logger.info("Configuring bgp monitor session on DUT")
bgpmon_args = {
'db_table_name': 'BGP_MONITORS',
'peer_addr': peer_addr,
'asn': asn,
'local_addr': dut_lo_addr,
'peer_name': BGP_MONITOR_NAME
}
bgpmon_template = Template(open(BGPMON_TEMPLATE_FILE).read())
duthost.copy(content=bgpmon_template.render(**bgpmon_args),
dest=BGPMON_CONFIG_FILE)
# Start bgpmon on DUT
logger.info("Starting bgpmon on DUT")
duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
logger.info("Starting bgp monitor session on PTF")
# Clean up in case previous run failed to clean up.
ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
# Start bgp monitor session on PTF
ptfhost.file(path=DUMP_FILE, state="absent")
ptfhost.copy(src=CUSTOM_DUMP_SCRIPT, dest=CUSTOM_DUMP_SCRIPT_DEST)
ptfhost.exabgp(name=BGP_MONITOR_NAME,
state="started",
local_ip=peer_addr,
router_id=peer_addr,
peer_ip=dut_lo_addr,
local_asn=asn,
peer_asn=asn,
port=BGP_MONITOR_PORT,
dump_script=CUSTOM_DUMP_SCRIPT_DEST)
# Flush neighbor and route in advance to avoid possible "RTNETLINK answers: File exists"
ptfhost.shell("ip neigh flush to %s nud permanent" % dut_lo_addr)
ptfhost.shell("ip route del %s" % dut_lo_addr + "/32", module_ignore_errors=True)
# Add the route to DUT loopback IP and the interface router mac
ptfhost.shell("ip neigh add %s lladdr %s dev %s" % (dut_lo_addr, duthost.facts["router_mac"], connection["neighbor_intf"]))
ptfhost.shell("ip route add %s dev %s" % (dut_lo_addr + "/32", connection["neighbor_intf"]))
pt_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT),
"Failed to start bgp monitor session on PTF")
pt_assert(wait_until(20, 5, duthost.check_bgp_session_state, [peer_addr]), 'BGP session {} on duthost is not established'.format(BGP_MONITOR_NAME))
yield
# Cleanup bgp monitor
duthost.shell("redis-cli -n 4 -c DEL 'BGP_MONITORS|{}'".format(peer_addr))
ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
ptfhost.file(path=DUMP_FILE, state="absent")
# Remove the route to DUT loopback IP and the interface router mac
ptfhost.shell("ip route del %s" % dut_lo_addr + "/32")
ptfhost.shell("ip neigh flush to %s nud permanent" % dut_lo_addr)
``` |
{
"source": "jleverenz/dupi",
"score": 2
} |
#### File: dupi/dupi/commands.py
```python
import os
from dupi import conf, core
_command_dict = dict()
def dispatch(index, command, **kwargs):
_command_dict[command](index, **kwargs)
def _dupi_command(fn):
_command_dict[fn.__name__] = fn
return fn
@_dupi_command
def update(index, **kwargs):
if 'dirs'in kwargs:
core.update_index(index, kwargs['dirs'])
else:
core.purge_removed_files(index)
@_dupi_command
def purge(index, **kwargs):
index.purge()
@_dupi_command
def list(index, **kwargs):
for i in core.list_duplicates(index):
print(i)
@_dupi_command
def report(index, **kwargs):
for orig, *dupes in core.list_duplicates_with_originals(index):
print("o {}".format(orig))
for i in dupes:
print("d {}".format(i))
@_dupi_command
def stats(index, **kwargs):
all = index.all()
print("{} file records".format(len(all)))
print("{} unique file sizes".format(len(set([i['size'] for i in all]))))
print("\nFiles in the following directories:")
for i in sorted(set([os.path.dirname(i['fullpath']) for i in all])):
print(" {}".format(i))
```
#### File: dupi/tests/test_commands.py
```python
from tests.common import * # dupi/tests/common
import io
import re
import sys
from pyfakefs import fake_filesystem_unittest
from dupi import conf, core
from dupi.commands import dispatch
from dupi.index import Index
class TestCommands(fake_filesystem_unittest.TestCase):
# Override run() to wrap each test in a context redirecting stderr
def run(self, result=None):
err_out = io.StringIO()
with redirect_stderr(err_out):
super().run(result)
def setUp(self):
self.setUpPyfakefs()
# Touch the default index file location on fake filesystem,
# to be sure parent dir structure exists.
self.fs.CreateFile(conf.index_file, create_missing_dirs=True)
def test_update_command(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
index = Index(conf.index_file)
params = {'dirs': ['/test']}
dispatch(index, 'update', **params)
self.fs.RemoveObject('/test/file2')
dispatch(index, 'update')
self.assertEqual(1, len(index.all()))
def test_report_command(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
self.fs.CreateFile('/test/afile', contents='abc')
_old_stdout = sys.stdout
stdout_cap = io.StringIO()
sys.stdout = stdout_cap
index = Index(conf.index_file)
core.update_index(index, ['/test'])
dispatch(index, 'report')
sys.stdout = _old_stdout
# Just check that three lines got written..
self.assertEqual(3, len(stdout_cap.getvalue().strip().split("\n")))
def test_report_stats(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
self.fs.CreateFile('/test/afile', contents='abc')
_old_stdout = sys.stdout
stdout_cap = io.StringIO()
sys.stdout = stdout_cap
index = Index(conf.index_file)
core.update_index(index, ['/test'])
dispatch(index, 'stats')
sys.stdout = _old_stdout
self.assertRegex(stdout_cap.getvalue(),
re.compile('file records', re.M))
```
#### File: dupi/tests/test_core.py
```python
from tests.common import * # dupi/tests/common
import io
from pyfakefs import fake_filesystem, fake_filesystem_unittest
from dupi import conf, core
from dupi.index import Index
from unittest.mock import patch
class TestCore(fake_filesystem_unittest.TestCase):
# Override run() to wrap each test in a context redirecting stderr
def run(self, result=None):
err_out = io.StringIO()
with redirect_stderr(err_out):
super().run(result)
def setUp(self):
self.setUpPyfakefs()
# Touch the default index file location on fake filesystem,
# to be sure parent dir structure exists.
self.fs.CreateFile(conf.index_file, create_missing_dirs=True)
# Setup index
self.index = Index(conf.index_file)
def test_update_index_with_single_file(self):
self.fs.CreateFile('/test/file1', contents='abc')
core.update_index(self.index, ['/test'])
self.assertEqual(len(self.index.all()), 1)
def test_update_index_without_change(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='def')
core.update_index(self.index, ['/test'])
orig = dict()
for i in self.index.all():
orig[i['fullpath']] = i
core.update_index(self.index, ['/test'])
for i in self.index.all():
self.assertEqual(i, orig[i['fullpath']])
def test_update_index_on_content_change(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='def')
core.update_index(self.index, ['/test'])
shas = dict()
for i in self.index.all():
shas[i['fullpath']] = i['sha256']
with open('/test/file2', "w") as f:
f.write('ghi')
core.update_index(self.index, ['/test'])
file1_sha = self.index.get('/test/file1')['sha256']
file2_sha = self.index.get('/test/file2')['sha256']
self.assertEqual(shas['/test/file1'], file1_sha)
self.assertNotEqual(shas['/test/file2'], file2_sha)
def test_update_index_deletes_and_updates_files(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='def')
core.update_index(self.index, ['/test'])
sha = self.index.get('/test/file1')['sha256']
with open('/test/file1', "w") as f:
f.write('ghi')
self.fs.RemoveObject('/test/file2')
core.update_index(self.index)
self.assertEqual(1, len(self.index.all()))
self.assertNotEqual(sha, self.index.get('/test/file1')['sha256'])
def _delete_file_before_stat(self, f):
# This function is used in tests below to simulate a file being deleted
# while dupi is processing. Since dupi will build file lists first, and
# then process them, there's a window where a file my be removed and is
# no longer processable. This simulates that behavior by destroying
# files immediately before os.stat is called.
fake_os = fake_filesystem.FakeOsModule(self.fs)
# delete the file right before calling
self.fs.RemoveObject(f)
return fake_os.stat(f)
def test_update_with_dirs_handles_disappearing_files(self):
self.fs.CreateFile('/test/blahfile', contents='abc')
with patch('dupi.index.os.path',
side_effect=self._delete_file_before_stat):
core.update_index(self.index, ['/test'])
# Nothing added .. the file disappeared
self.assertEqual(len(self.index.all()), 0)
def test_update_empty_handles_disappearing_files(self):
self.fs.CreateFile('/test/blahfile', contents='abc')
core.update_index(self.index, ['/test'])
self.assertEqual(len(self.index.all()), 1)
with patch('dupi.index.os.stat',
side_effect=self._delete_file_before_stat):
core.update_index(self.index, [])
self.assertEqual(len(self.index.all()), 0)
def test_update_index_deletes_removed_files(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='def')
core.update_index(self.index, ['/test'])
self.fs.RemoveObject('/test/file2')
core.update_index(self.index, ['/test'])
self.assertEqual(1, len(self.index.all()))
```
#### File: dupi/tests/test_reporting.py
```python
from tests.common import * # dupi/tests/common
import io
import sys
from pyfakefs import fake_filesystem_unittest
from dupi import conf, core
from dupi.index import Index
# TODO order is not deterministic while based on os.walk
class TestReporting(fake_filesystem_unittest.TestCase):
# Override run() to wrap each test in a context redirecting stderr
def run(self, result=None):
err_out = io.StringIO()
with redirect_stderr(err_out):
super().run(result)
def setUp(self):
self.setUpPyfakefs()
# Touch the default index file location on fake filesystem,
# to be sure parent dir structure exists.
self.fs.CreateFile(conf.index_file, create_missing_dirs=True)
# Setup index
self.index = Index(conf.index_file)
def test_list_empty(self):
core.update_index(self.index, ['/test'])
results = core.list_duplicates(self.index)
self.assertEqual(0, len(list(results)))
def test_list_no_duplicates(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
core.update_index(self.index, ['/test'])
results = core.list_duplicates(self.index)
self.assertEqual(0, len(list(results)))
def test_list(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
self.fs.CreateFile('/test/afile', contents='abc')
core.update_index(self.index, ['/test'])
results = core.list_duplicates(self.index)
self.assertSetEqual(set(results), {'/test/file1', '/test/file2'})
def test_list_with_originals(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
self.fs.CreateFile('/test/afile', contents='abc')
core.update_index(self.index, ['/test'])
results = core.list_duplicates_with_originals(self.index)
self.assertEqual(1, len(results))
orig, *dupes = results[0]
self.assertEqual('/test/afile', orig)
self.assertSetEqual(set(dupes),
{'/test/file1', '/test/file2'})
def test_list_empty_with_originals(self):
core.update_index(self.index, ['/test'])
results = core.list_duplicates_with_originals(self.index)
self.assertEqual(0, len(results))
def test_list_no_duplicates_with_originals(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file3', contents='defg')
self.fs.CreateFile('/test/file4', contents='hijk')
core.update_index(self.index, ['/test'])
results = core.list_duplicates_with_originals(self.index)
self.assertEqual(0, len(results))
def test_list_duplicates_with_originals_pairs(self):
self.fs.CreateFile('/test/file1', contents='abc')
self.fs.CreateFile('/test/file2', contents='abc')
core.update_index(self.index, ['/test'])
results = core.list_duplicates_with_originals(self.index)
self.assertEqual(1, len(results))
self.assertSetEqual(set(results[0]), {'/test/file1', '/test/file2'})
``` |
{
"source": "jleverenz/finddup",
"score": 3
} |
#### File: finddup/tests/test_finddup.py
```python
from test_helper import *
from collections import namedtuple
from pyfakefs import fake_filesystem_unittest
class TestCompareFiles(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def test_group_by_size(self):
self.fs.CreateFile("/test/file1", contents='abcdefg')
self.fs.CreateFile("/test/file2", contents='1234567')
filelist = ["/test/file1", "/test/file2"]
size_hash = group_by_size(filelist)
self.assertEqual(list(size_hash.keys()), [7])
self.assertEqual(len(size_hash[7]), 2)
def test_file_comparer(self):
self.fs.CreateFile("/test/file1", contents='abcdefg')
self.fs.CreateFile("/test/file2", contents='abcdefg')
filelist = ["/test/file1", "/test/file2"]
compare_results = compare_files(filelist)
self.assertEqual(compare_results, {"/test/file1": ["/test/file2"]})
``` |
{
"source": "jleverenz/hurl",
"score": 2
} |
#### File: integration/tests_failed/assert_header_not_found.py
```python
from app import app
@app.route("/error-assert-header-not-found")
def error_assert_header_not_found():
return "Hello World!"
```
#### File: integration/tests_failed/assert_query_cookie.py
```python
from flask import request, make_response
from app import app
@app.route("/error-assert-query-cookie")
def error_assert_query_cookie():
resp = make_response()
resp.set_cookie("cookie1", "value1")
resp.set_cookie("cookie2", "value2", secure=True)
return resp
```
#### File: integration/tests_failed/timeout.py
```python
from app import app
import time
@app.route("/timeout")
def timeout():
time.sleep(2)
return ""
```
#### File: integration/tests_ok/cookie_storage.py
```python
from flask import request, make_response
from app import app
@app.route("/cookie-storage/assert-that-cookie1-is-valueA")
def cookiestorage_assert_that_cookie1_is_valuea():
assert request.cookies["cookie1"] == "valueA"
return ""
@app.route("/cookie-storage/assert-that-cookie1-is-not-in-session")
def cookiestorage_assert_that_cookie1_is_not_in_session():
assert "cookie1" not in request.cookies
return ""
```
#### File: integration/tests_ok/delete.py
```python
from app import app
@app.route("/delete", methods=["DELETE"])
def delete():
return ""
```
#### File: integration/tests_ok/hello.py
```python
from app import app
from flask import request
@app.route("/hello")
def hello():
assert "Content-Type" not in request.headers
assert "Content-Length" not in request.headers
assert len(request.data) == 0
return "Hello World!"
```
#### File: integration/tests_ok/output.py
```python
from app import app
from flask import request
@app.route("/output/endpoint1", methods=["POST"])
def output_endpoint1():
assert request.headers["Content-Type"] == "application/json"
s = request.data.decode("utf-8")
assert s == '{ "user": "bob" }'
return app.response_class(
headers={"date": "DATE1"}, response="Response endpoint1\n"
)
@app.route("/output/endpoint2")
def output_endpoint2():
return app.response_class(
headers={"date": "DATE2"}, response="Response endpoint2\n"
)
```
#### File: integration/tests_ok/post_multilines.py
```python
from flask import request
from app import app
@app.route("/post-multilines", methods=["POST"])
def post_multilines():
s = request.data.decode("utf-8")
assert s == "name,age\nbob,10\nbill,22\n"
return ""
@app.route("/get-bob-age", methods=["GET"])
def get_bob_age():
return "10"
```
#### File: integration/tests_ok/predicates_string.py
```python
from flask import request
from app import app
@app.route("/predicates-string")
def predicates_string():
return "Hello World!"
@app.route("/predicates-string-empty")
def predicates_string_empty():
return ""
@app.route("/predicates-string-unicode")
def predicates_string_unicode():
return "\u2708"
```
#### File: integration/tests_ok/user_agent.py
```python
from app import app
from flask import request
@app.route("/user-agent/a")
def useragent_a():
assert "Mozilla/5.0 A" == request.headers["User-Agent"]
return ""
@app.route("/user-agent/b")
def useragent_b():
assert "Mozilla/5.0 B" == request.headers["User-Agent"]
return ""
``` |
{
"source": "jlevman/empyricalRMT",
"score": 3
} |
#### File: empyricalRMT/empyricalRMT/brody.py
```python
import numpy as np
from numpy import ndarray
from scipy.optimize import minimize_scalar
from scipy.special import gamma
from scipy.stats.mstats import gmean
from statsmodels.distributions.empirical_distribution import ECDF
def brody_dist(s: ndarray, beta: float) -> ndarray:
"""See Eq. 8 of
<NAME>., <NAME>., & <NAME>. (2017).
Spectral statistics of random geometric graphs.
EPL (Europhysics Letters), 118(1), 18003.
"""
b1 = beta + 1
alpha = gamma((beta + 2) / b1) ** b1
return b1 * alpha * s ** beta * np.exp(-alpha * s ** b1)
def brody_cdf(s: ndarray, beta: float) -> ndarray:
"""Return the cumulative distribution function of the Brody distribution for beta."""
b1 = beta + 1
alpha = gamma((beta + 2) / b1) ** b1
return 1 - np.exp(-alpha * s ** b1)
def log_brody(s: ndarray, beta: float) -> ndarray:
"""Just a helper re-written to prevent overflows and filter negative spacings"""
b1 = beta + 1.0
alpha = gamma((beta + 2.0) / b1) ** b1
s = s[s > 0.0]
# the lines below are separate for better logging of underflow issues
t1 = np.log(b1 * alpha)
t2 = beta * np.log(s)
t3 = alpha * s ** b1
return np.sum([t1, t2, t3])
def fit_brody(s: ndarray, method: str = "spacing") -> float:
"""Get an estimate for the beta parameter of the Brody distribution
Paramaters
----------
s: ndarray
The array of spacings.
Returns
-------
beta: float
The MLE estimate for beta.
"""
method = method.lower()
if method == "spacing" or method == "spacings":
return fit_brody_max_spacing(s)
if method == "mle":
return fit_brody_mle(s)
raise ValueError("`method` must be one of 'spacing' or 'mle'.")
def fit_brody_mle(s: ndarray) -> float:
"""Return the maximum likelihood estimate for beta.
Paramaters
----------
s: ndarray
The array of spacings.
Returns
-------
beta: float
The MLE estimate for beta.
Notes
-----
Try using https://en.wikipedia.org/wiki/Maximum_spacing_estimation
instead
"""
# use negative log-likelihood because we want to minimize
# log_like = lambda beta: -np.sum(log_brody(s, beta))
log_like = lambda beta: -np.sum(brody_dist(s, beta))
opt_result = minimize_scalar(
log_like, bounds=(1e-5, 1.0 - 1e-5), method="Bounded", tol=1e-10
)
if not opt_result.success:
raise RuntimeError("Optimizer failed to find optimal Brody fit.")
return float(opt_result.x)
def fit_brody_max_spacing(s: ndarray) -> float:
"""Return the maximum likelihood estimate for beta.
Paramaters
----------
s: ndarray
The array of spacings.
Returns
-------
beta: float
The maximum spacings estimate for beta.
Notes
-----
Try using https://en.wikipedia.org/wiki/Maximum_spacing_estimation
instead
"""
n = len(s) - 1
def alpha(beta: float) -> np.float64:
return gamma((beta + 2) / (beta + 1)) ** (beta + 1)
def _positive_diffs(s: ndarray, beta: float) -> np.float64:
s = np.sort(s)
brody_cdf = 1.0 - np.exp(-alpha(beta) * (s ** (beta + 1)))
diffs = np.diff(brody_cdf)
diffs = diffs[diffs > 0] # necessary to prevent over/underflows
return diffs
# use negative log-likelihood because we want to minimize
# log_like = lambda beta: -np.sum(log_brody(s, beta))
# s = np.sort(s)
# brody_cdf = lambda beta: 1.0 - np.exp(-alpha(beta) * (s ** (beta + 1)))
# diffs = lambda beta: np.diff(brody_cdf(beta))
log_spacings = lambda beta: np.log(_positive_diffs(s, beta))
S_n = lambda beta: -np.sum(log_spacings(beta)) / (n + 1)
opt_result = minimize_scalar(
S_n, bounds=(1e-5, 1.0 - 1e-5), method="Bounded", tol=1e-10
)
if not opt_result.success:
raise RuntimeError("Optimizer failed to find optimal Brody fit.")
return float(opt_result.x)
def brody_fit_evaluate(s: ndarray, method: str = "spacing") -> dict:
beta = fit_brody(s, method)
ecdf = ECDF(s)
ecdf_x = ecdf.x[1:] # ECDF always makes first x value -inf if `side`=="left"
ecdf_y = ecdf.y[1:]
bcdf = brody_cdf(ecdf_x, beta)
mad = np.mean(np.abs(ecdf_y - bcdf))
msqd = np.mean((ecdf_y - bcdf) ** 2)
return {
"beta": beta,
"mad": mad,
"msqd": msqd,
"spacings": ecdf_x,
"ecdf": ecdf_y,
"brody_cdf": bcdf,
}
```
#### File: empyricalRMT/empyricalRMT/compare.py
```python
import numpy as np
import pandas as pd
from numba import jit
from numpy import ndarray
from pandas import DataFrame
from typing import Any, List, Tuple
from typing_extensions import Literal
from empyricalRMT._validate import make_1d_array
Metric = Literal["mad", "msqd", "corr"]
class Compare:
"""A helper class for implementing various curve comparison methods."""
def __init__(
self,
curves: List[ndarray],
labels: List[str],
base_curve: ndarray = None,
base_label: str = None,
):
"""Construct a Compare object for accessing various comparison methods.
Parameters
----------
curves: List[ndarray]
A list of unidimensional numpy arrays of values to compare. For most
comparison methods besides some piecewise / quantile comparison methods, the
curves must have identical lengths.
labels: List[str]
A list of strings identifying each curve. Must be the same length as
curves, and labels[i] must be the label for curves[i], for all valid
values of i.
base_curve: ndarray
The base curve against which each curve of `curves` will be compared, if the
desire is to compare multiple curves only to one single curve.
base_label: str
The label for identifying the base_curve.
"""
self.curves = [make_1d_array(curve) for curve in curves]
self.labels = labels.copy()
self.base_curve = make_1d_array(base_curve) if base_curve is not None else None
self.base_label = base_label # don't need to copy strings in Python
self.__validate_curve_lengths()
self.dict = dict(zip(self.labels, self.curves))
def correlate(self) -> DataFrame:
"""Return the grid of correlations across curves. """
self.__validate_curve_lengths(
message="Comparing via correlation requires all curves have identical lengths",
check_all_equal=True,
)
if self.base_curve is not None:
# index with [0, 1:], since [0, :] give first row of correlations, and since
# [0, 0] is just the correlation of the base_curve with itself
data = np.corrcoef(self.base_curve, self.curves)[0, 1:]
return pd.DataFrame(data=data, index=self.labels, columns=[self.base_label])
data = np.corrcoef(self.curves)
return pd.DataFrame(data=data, index=self.labels, columns=self.labels)
def mean_sq_difference(self) -> DataFrame:
"""Return the grid of mean square differences across curves."""
self.__validate_curve_lengths(
message="Comparing via mean squared differences requires all curves have identical lengths",
check_all_equal=True,
)
curves = np.array(self.curves)
if self.base_curve is not None:
diffs = np.empty(curves.shape[0])
for i in range(len(diffs)):
diffs[i] = np.mean((self.base_curve - curves[i]) ** 2)
return pd.DataFrame(
data=diffs, index=self.labels, columns=[self.base_label]
)
data = self.__fast_msqd(curves)
return pd.DataFrame(data=data, index=self.labels, columns=self.labels)
def mean_abs_difference(self) -> DataFrame:
"""Return the grid of mean absolute differences across curves."""
self.__validate_curve_lengths(
message="Comparing via mean absolute differences requires all curves have identical lengths",
check_all_equal=True,
)
curves = np.array(self.curves)
if self.base_curve is not None:
diffs = np.empty(curves.shape[0])
for i in range(len(diffs)):
diffs[i] = np.mean(np.abs(self.base_curve - curves[i]))
return pd.DataFrame(
data=diffs, index=self.labels, columns=[self.base_label]
)
data = self.__fast_mad(curves)
return pd.DataFrame(data=data, index=self.labels, columns=self.labels)
def _test_validate(self, **kwargs: Any) -> None:
self.__validate_curve_lengths(**kwargs)
@staticmethod
@jit(nopython=True, fastmath=True)
def __fast_msqd(curves: ndarray) -> ndarray:
n = curves.shape[0]
data = np.empty((n, n), dtype=np.float64)
for j in range(n):
for i in range(n):
data[i, j] = np.mean((curves[i] - curves[j]) ** 2)
return data
@staticmethod
@jit(nopython=True, fastmath=True)
def __fast_mad(curves: ndarray) -> ndarray:
n = curves.shape[0]
data = np.empty((n, n), dtype=np.float64)
for j in range(n):
for i in range(n):
data[i, j] = np.mean(np.abs(curves[i] - curves[j]))
return data
@staticmethod
def __histograms(
curve1: ndarray, curve2: ndarray, n_bins: int = 10
) -> Tuple[ndarray, ndarray, ndarray]:
"""Compute a histogram over [min(curve1, curve2), max(curve1, curve2)].
Returns
-------
counts1: ndarray
The bin counts for curve1.
counts2: ndarray
The bin counts for curve2.
endpoints: ndarray
The (sorted) ndarray of bin endpoints.
"""
vals1 = np.sort(curve1)
vals2 = np.sort(curve2)
endpoints = np.linspace(
min(vals1[0], vals2[0]), max(vals1[-1], vals2[-1]), n_bins + 1
)
n, counts1, counts2 = 0, np.arange(n_bins), np.arange(n_bins)
for val in vals1:
if val < endpoints[n]:
counts1[n] += 1
else:
n += 1
if n >= len(counts1):
raise RuntimeError("Problem with hist algorithm. Should be impossible.")
n = 0
for val in vals2:
if val < endpoints[n]:
counts2[n] += 1
else:
n += 1
if n >= len(counts2):
raise RuntimeError("Problem with hist algorithm. Should be impossible.")
return endpoints, counts1, counts2
def __validate_curve_lengths(
self, message: str = None, check_all_equal: bool = False
) -> None:
"""Ensure curve lengths are appropriate for desired comparison methods."""
curves = self.curves
labels = self.labels
if len(curves) < 1:
raise ValueError("There must be more than one curve to compare.")
if len(curves) == 1 and self.base_curve is None:
raise ValueError(
"There must be more than one curve to compare to the base curve."
)
if len(self.curves) != len(labels):
raise ValueError("`labels` must have the same length as `curves`.")
all_equal = np.all([len(curve) == len(curves[0]) for curve in curves])
if check_all_equal:
if self.base_curve is not None and self.base_label is not None:
if len(curves[0]) != len(self.base_curve):
raise ValueError(message)
if not all_equal:
raise ValueError(message)
```
#### File: empyricalRMT/empyricalRMT/_eigvals.py
```python
import numpy as np
from numpy import ndarray
from typing import Sized
from empyricalRMT._validate import make_1d_array
from empyricalRMT.observables.step import _step_function_fast
from empyricalRMT.plot import _spacings as plotSpacings
from empyricalRMT.plot import _raw_eig_dist, _raw_eig_sorted, _step_function, PlotResult
class EigVals:
"""Base class, not to be instantiated. """
def __init__(self, eigenvalues: Sized):
self.__construct_vals: ndarray = make_1d_array(eigenvalues)
self._steps = None
self._vals = np.sort(eigenvalues) # to be overridden in actual classes
@property
def original_values(self) -> ndarray:
return self.__construct_vals
@property
def original_eigs(self) -> ndarray:
return self.__construct_vals
@property
def original_eigenvalues(self) -> ndarray:
return self.__construct_vals
# NOTE: This *must* be overridden
@property
def values(self) -> ndarray:
raise NotImplementedError(".values() should be implemented in derived classes.")
# NOTE: This *must* be overridden
@property
def vals(self) -> ndarray:
raise NotImplementedError(".vals() should be implemented in derived classes.")
@property
def steps(self) -> ndarray:
if self._steps is None:
self._steps = _step_function_fast(self._vals, self._vals)
return self._steps
@property
def spacings(self) -> ndarray:
return np.diff(np.sort(self.vals))
def step_function(self, x: ndarray) -> ndarray:
return _step_function_fast(eigs=self.vals, x=x)
def plot_sorted(self, *args, **kwargs) -> PlotResult: # type: ignore
return _raw_eig_sorted(eigs=self.values, *args, **kwargs) # type: ignore
def plot_distribution(self, *args, **kwargs) -> PlotResult: # type: ignore
return _raw_eig_dist(eigs=self.values, *args, **kwargs) # type: ignore
def plot_steps(self, *args, **kwargs) -> PlotResult: # type: ignore
return _step_function(eigs=self.values, *args, **kwargs) # type: ignore
def plot_spacings(self, *args, **kwargs) -> PlotResult: # type: ignore
return plotSpacings(unfolded=self.values, *args, **kwargs) # type: ignore
```
#### File: empyricalRMT/signalproc/detrend.py
```python
import numpy as np
from numpy import ndarray
from numba import jit, prange
from PyEMD import EMD
from scipy.stats import linregress
from empyricalRMT.utils import slope, intercept
class Detrend:
def __init__(self) -> None:
return
def linear(self, series: ndarray) -> ndarray:
"""Remove the linear trend by fitting a linear model, and returning
the residuals"""
time = np.arange(0, len(series))
m, b = linregress(time, series) # m == slope, b == intercept
fitted = m * time + b
return series - fitted
def emd(self, series: ndarray) -> ndarray:
"""Remove the lowest-frequency trend as determined by Empirical
Mode Decomposition """
trend = EMD().emd(series)[-1]
return series - trend
def difference(self, series: ndarray) -> ndarray:
"""Remove non-stationarity by differencing the data (once)"""
differenced = np.empty([len(series - 1)])
for i in range(len(series) - 1):
differenced[i] = series[i + 1] - series[i]
return differenced
@jit(nopython=True, parallel=True, fastmath=True)
def linear_detrend(signals: ndarray, ret: ndarray) -> ndarray:
"""takes voxels with nonzero variance"""
m, T = signals.shape
x = np.arange(0, T)
for i in prange(m):
y = signals[i, :]
a = slope(x, y)
b = intercept(x, y, a)
fitted = m * x + b
detrended = y - fitted
ret[i, :] = detrended
return ret
@jit(nopython=True, parallel=True, fastmath=True)
def mean_detrend(signals: ndarray, ret: ndarray) -> ndarray:
"""takes voxels with nonzero variance"""
m, T = signals.shape
for i in prange(m):
ret[i, :] = signals[i, :] - np.mean(signals[i, :])
return ret
```
#### File: empyricalRMT/empyricalRMT/smoother.py
```python
import numpy as np
import pandas as pd
from numpy import ndarray
from numpy.polynomial.polynomial import polyfit, polyval
from pandas import DataFrame
from scipy.interpolate import UnivariateSpline as USpline
from scipy.optimize import curve_fit
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from typing_extensions import Literal
from warnings import warn
from empyricalRMT._constants import (
DEFAULT_POLY_DEGREE,
DEFAULT_SPLINE_DEGREE,
DEFAULT_SPLINE_DEGREES,
DEFAULT_SPLINE_SMOOTH,
DEFAULT_SPLINE_SMOOTHS,
)
from empyricalRMT.exponentials import gompertz
from empyricalRMT.detrend import emd_detrend
SPLINE_DICT = {3: "cubic", 4: "quartic", 5: "quintic"}
SmoothMethod = Union[Literal["poly"], Literal["spline"], Literal["gompertz"]]
SmoothArg = Union[List[float], Literal["heuristic"]]
def _spline_name(i: int) -> str:
return SPLINE_DICT[i] if SPLINE_DICT.get(i) is not None else f"deg{i}"
class Smoother:
def __init__(self, eigenvalues: ndarray):
"""Initialize a Smoother.
Parameters
----------
eigenvalues: ndarray
Eigenvalues for fitting to the step function.
"""
try:
eigs = np.array(eigenvalues).ravel()
except BaseException as e:
raise ValueError("Could not convert eigenvalues into numpy array.") from e
if len(eigs) != len(eigenvalues):
raise ValueError("Input array must be one-dimensional.")
self._eigs = np.sort(eigs)
def fit(
self,
smoother: SmoothMethod = "poly",
degree: int = DEFAULT_POLY_DEGREE,
spline_smooth: float = DEFAULT_SPLINE_SMOOTH,
detrend: bool = False,
return_callable: bool = False,
) -> Tuple[ndarray, ndarray, Optional[Callable[[ndarray], ndarray]]]:
"""Computer the specified smoothing function values for a set of eigenvalues.
Parameters
----------
eigs: ndarray
The sorted eigenvalues
smoother: "poly" | "spline" | "gompertz" | lambda
The type of smoothing function used to fit the step function
degree: int
The degree of the polynomial or spline
spline_smooth: float
The smoothing factors passed into scipy.interpolate.UnivariateSpline
detrend: bool
Whether or not to perform EMD detrending before returning the
unfolded eigenvalues.
return_callable: bool
If true, return a function that closes over the fit parameters so
that, e.g., additional values can be fit later.
Returns
-------
unfolded: ndarray
the unfolded eigenvalues
steps: ndarray
the step-function values
"""
eigs = self._eigs
# steps = _step_function_fast(eigs, eigs)
steps = np.arange(0, len(eigs)) + 1
self.__validate_args(
smoother=smoother, degree=degree, spline_smooth=spline_smooth
)
if smoother == "poly":
poly_coef = polyfit(eigs, steps, degree)
unfolded = polyval(eigs, poly_coef)
func = lambda x: polyval(x, poly_coef) if return_callable else None
if detrend:
unfolded = emd_detrend(unfolded)
return unfolded, steps, func
if smoother == "spline":
k = DEFAULT_SPLINE_DEGREE
try:
k = int(degree)
except BaseException as e:
print(ValueError("Cannot convert spline degree to int."))
raise e
if spline_smooth == "heuristic":
s = len(eigs) * np.var(eigs, ddof=1)
spline = USpline(eigs, steps, k=k, s=s)
elif spline_smooth is not None:
if not isinstance(spline_smooth, float):
raise ValueError("Spline smoothing factor must be a float")
spline = USpline(eigs, steps, k=k, s=spline_smooth)
else:
raise ValueError(
"Unreachable: All possible spline_smooth arguments should have been handled."
)
spline = USpline(eigs, steps, k=k, s=spline_smooth)
func = lambda x: spline(x) if return_callable else None
unfolded = spline(eigs)
if detrend:
unfolded = emd_detrend(unfolded)
return unfolded, steps, func
if smoother == "gompertz":
# use steps[end] as guess for the asymptote, a, of gompertz curve
[a, b, c], cov = curve_fit(gompertz, eigs, steps, p0=(steps[-1], 1, 1))
func = lambda x: gompertz(x, a, b, c) if return_callable else None
unfolded = gompertz(eigs, a, b, c)
if detrend:
unfolded = emd_detrend(unfolded)
return unfolded, steps, func
raise RuntimeError("Unreachable!")
def fit_all(
self,
poly_degrees: List[int] = [],
spline_smooths: SmoothArg = [],
spline_degrees: List[int] = DEFAULT_SPLINE_DEGREES,
gompertz: bool = False,
detrend: bool = False,
) -> Tuple[DataFrame, DataFrame, DataFrame, Dict[str, Callable]]:
"""unfold eigenvalues for all specified smoothers
Parameters
----------
poly_degrees: List[int]
the polynomial degrees for which to compute fits.
Default [3, 4, 5, 6, 7, 8, 9, 10, 11]
spline_smooths: List[float] | "heuristic"
If a list of floats, the smoothing factors, s, passed into
scipy.interpolate.UnivariateSpline.
If "heuristic", choose a set of smoothing factors scaled to the length of the
eigenvalues, that, on GOE eigenvalues, tend to result in a range of fits
varying from highly flexible (nearly interpolated) to about the flexibility of
a cubic or quartic. As the number of eigenvalues starts to go below about 300,
an increasing number of practically-identical, redundant splines will be fit
with this option, and manual inspection or non-heuristic specification of
spline smoothing factors is strongly recommended.
spline_degrees: List[int]
A list of ints determining the degrees of scipy.interpolate.UnivariateSpline
fits. Default [3]
Returns
-------
unfoldeds: DataFrame
DataFrame of unfolded eigenvalues for each set of fit parameters, e.g. where
each column contains a name indicating the fitting parameters, with the values
of that column being the (sorted) unfolded eigenvalues.
sqes: DataFrame
DataFrame of mean-squared error of fits, where each column contains a name
indicating the fitting parameters and smoother, with the values of
the column being the mean of the squared residuals of the fit
smoother_map: dict
A dict of {col_name: closure} for accessing the fitted smoothers later.
"""
# construct dataframes to hold all info
col_names, unfoldeds, spacings, sqes = [], [], [], []
smoother_map = {}
for d in poly_degrees:
col_name = f"poly_{d}"
unfolded, steps, closure = self.fit(
smoother="poly", degree=d, return_callable=True, detrend=detrend
)
col_names.append(col_name)
sqes.append(np.mean((unfolded - steps) ** 2))
unfolded = np.sort(unfolded) # Important!
unfoldeds.append(unfolded)
spacings.append(np.diff(unfolded))
smoother_map[col_name] = closure
if spline_smooths == "heuristic":
for s in DEFAULT_SPLINE_SMOOTHS:
for d in spline_degrees:
col_name = f"{_spline_name(d)}-spline_" "{:1.2f}_heuristic".format(
s
)
unfolded, steps, closure = self.fit(
smoother="spline",
spline_smooth=len(self._eigs) ** s,
degree=d,
return_callable=True,
detrend=detrend,
)
col_names.append(col_name)
sqes.append(np.mean((unfolded - steps) ** 2))
unfolded = np.sort(unfolded)
unfoldeds.append(unfolded)
spacings.append(np.diff(unfolded))
smoother_map[col_name] = closure
else:
for s in spline_smooths: # type: ignore
for d in spline_degrees:
col_name = f"{_spline_name(d)}-spline_" "{:1.3f}".format(s)
unfolded, steps, closure = self.fit(
smoother="spline",
spline_smooth=s,
degree=d,
return_callable=True,
detrend=detrend,
)
col_names.append(col_name)
sqes.append(np.mean((unfolded - steps) ** 2))
unfolded = np.sort(unfolded)
unfoldeds.append(unfolded)
spacings.append(np.diff(unfolded))
smoother_map[col_name] = closure
if gompertz:
unfolded, steps, closure = self.fit(
smoother="gompertz", return_callable=True, detrend=detrend
)
col_names.append("gompertz")
sqes.append(np.mean((unfolded - steps) ** 2))
unfolded = np.sort(unfolded)
unfoldeds.append(unfolded)
spacings.append(np.diff(unfolded))
smoother_map["gompertz"] = closure
unfoldeds = pd.DataFrame(data=unfoldeds, index=col_names).T
spacings = pd.DataFrame(data=spacings, index=col_names).T
sqes = pd.DataFrame(data=sqes, index=col_names).T
return unfoldeds, spacings, sqes, smoother_map # type: ignore
@staticmethod
def _get_smoother_names(
poly_degrees: List[int],
spline_smooths: SmoothArg,
spline_degrees: List[int] = [3],
gompertz: bool = True,
) -> List[str]:
"""If arguments are arrays, generate names (unique identifiers) for each smoother
+ smoother parameters. Otherwise, just return the name for indexing into the report.
"""
col_names = []
if isinstance(poly_degrees, list):
for d in poly_degrees:
col_names.append(f"poly_{d}")
else:
raise ValueError("poly_degrees must be a list of int values")
if spline_smooths == "heuristic":
for s in DEFAULT_SPLINE_SMOOTHS:
if not isinstance(spline_degrees, list):
raise ValueError("spline_degrees must be a list of integer values")
for deg in spline_degrees:
col_name = (
f"{_spline_name(deg)}-spline_" "{:1.3f}_heuristic".format(s)
)
col_names.append(col_name)
else:
try:
spline_smooths = list(spline_smooths) # type: ignore
except Exception as e:
raise ValueError(f"Error converting `spline_smooths` to list: {e}")
if isinstance(spline_smooths, list):
for s in spline_smooths:
if not isinstance(spline_degrees, list):
raise ValueError(
"spline_degrees must be a list of integer values"
)
for deg in spline_degrees:
col_name = f"{_spline_name(deg)}-spline_" "{:1.3f}".format(s)
col_names.append(col_name)
else:
raise ValueError("spline_smooths must be a list of float values")
if gompertz is True:
col_names.append("gompertz")
return col_names
def __validate_args(self, **kwargs: Any) -> None:
"""throw an error if smoother args are in any way invalid"""
smoother = kwargs.get("smoother")
degree = kwargs.get("degree")
spline_smooth = kwargs.get("spline_smooth")
emd = kwargs.get("detrend") # TODO: implement
method = kwargs.get("method")
if smoother == "poly":
if degree is None:
warn(
"No degree set for polynomial unfolding."
f"Will default to polynomial of degree {DEFAULT_POLY_DEGREE}.",
category=UserWarning,
)
if not isinstance(degree, int):
raise ValueError("Polynomial degree must be of type `int`")
if degree < 3:
raise ValueError("Unfolding polynomial must have minimum degree 3.")
elif smoother == "spline":
spline_degree = degree
if degree is None:
warn(
f"No degree set for spline unfolding. Will default to spline of degree {DEFAULT_SPLINE_DEGREE}.",
category=UserWarning,
)
if not isinstance(spline_degree, int) or spline_degree > 5:
raise ValueError("Degree of spline must be an int <= 5")
if spline_smooth is not None and spline_smooth != "heuristic":
spline_smooth = float(spline_smooth)
elif smoother == "gompertz":
pass # just allow this for now
elif callable(smoother):
# NOTE: above is not a great check, but probably good enough for our purposes
# https://stackoverflow.com/questions/624926/how-do-i-detect-whether-a-python-variable-is-a-function#comment437753_624939
raise NotImplementedError("Custom fit functions not currently implemented.")
else:
raise ValueError("Unrecognized smoother argument.")
if emd is not None and not isinstance(emd, bool):
raise ValueError("`detrend` can be only a boolean or undefined (None).")
if method is None or method == "auto" or method == "manual":
pass
else:
raise ValueError("`method` must be one of 'auto', 'manual', or 'None'")
```
#### File: empyricalRMT/empyricalRMT/utils.py
```python
import curses
import multiprocess as mp
import nibabel as nib
import numpy as np
from numpy import ndarray
import os
import shutil
import sys
from colorama import Cursor, init, Style, Fore
from nibabel import Nifti1Image
from numba import jit
from pathlib import Path
from progressbar import Bar, AdaptiveETA, Percentage, ProgressBar, RotatingMarker, Timer
from sys import stderr
from typing import Any, Callable, List, Optional
RESET = Style.RESET_ALL
def res(path: Path) -> str:
return str(path.absolute().resolve())
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=stderr, **kwargs)
def log(label: str, var: Any) -> None:
eprint(f"{label}: {var}")
# https://stackoverflow.com/a/42913743
def is_symmetric(a: ndarray, rtol: float = 1e-05, atol: float = 1e-08) -> bool:
return bool(np.allclose(a, a.T, rtol=rtol, atol=atol))
def array_map(array: np.array, f: Callable, x: ndarray) -> None:
it = np.nditer(array, flags=["f_index"], op_flags=["readwrite"])
while not it.finished:
i = it.index
it[0] = f(x[i])
it.iternext()
it.close()
def make_cheaty_nii(orig: Nifti1Image, array: np.array) -> Nifti1Image:
"""clone the header and extraneous info from `orig` and data in `array`
into a new Nifti1Image object, for plotting
"""
affine = orig.affine
header = orig.header
return nib.Nifti1Image(dataobj=array, affine=affine, header=header)
def mkdirp(path: Path) -> None:
try:
os.makedirs(path, exist_ok=True)
except Exception as e:
print(
f"Error making directory {path}. Another program may have modified the file "
"while this script was running.",
file=sys.stderr,
)
print("Original error:", file=sys.stderr)
raise e
def make_directory(path: Path) -> Path:
if not os.path.exists(path):
try:
os.makedirs(path)
return path
except Exception as e:
print(
f"Error making directory {path}. Another program likely modified it while this script was running.",
file=sys.stderr,
)
print("Original error:", file=sys.stderr)
raise e
else:
return path
def make_parent_directories(path: Path) -> None:
path = path.absolute()
paths = []
for folder in path.parents:
if folder != Path.home():
paths.append(folder)
else:
break
paths.reverse()
for path in paths:
make_directory(path)
def parallel_map(func: Callable, data: list, cpus: int = None) -> List[Any]:
"""func: function that takes one parameter
data: the array of values that func will take
"""
result = []
if cpus is None:
with mp.Pool(
mp.cpu_count()
) as pool: # ensure automatic closing, use available cpus
result = pool.map(func, data)
else:
with mp.Pool(cpus) as pool: # ensure automatic closing, use available cpus
result = pool.map(func, data)
return result
@jit(nopython=True, cache=True, fastmath=True)
def nd_find(arr: np.array, value: Any) -> Optional[int]:
for i, val in np.ndenumerate(arr):
if val == value:
return i # type: ignore
return None
@jit(nopython=True)
def find_first(arr: np.array, value: Any) -> int:
for i, val in enumerate(arr):
if val == value:
return i # type: ignore
return -1
@jit(nopython=True)
def find_last(arr: np.array, value: Any) -> int:
for i in range(len(arr)):
j = len(arr) - i - 1
if arr[j] == value:
return j # type: ignore
return -1
# clear all
def tty_clear(COLS: int, ROWS: int) -> None:
sys.stdout.write(Cursor.POS(1, 1))
for row in range(ROWS):
sys.stdout.write(Cursor.POS(1, row + 1))
sys.stdout.write(f"{' ' * COLS}")
sys.stdout.write(Cursor.POS(1, 1))
def write_in_place(message: str, value: str, value_color: Any) -> None:
init()
COLS, ROWS = shutil.get_terminal_size((80, 40))
tty_clear(COLS, ROWS)
full = f"{message}: {value_color}{value}{Style.RESET_ALL}{Cursor.POS(1, 1)}"
sys.stdout.write(full)
sys.stdout.flush()
def write_block(messages: List[str], border: str = None) -> None:
init()
COLS, ROWS = shutil.get_terminal_size((80, 40))
tty_clear(COLS, ROWS)
if border is not None and len(str(border[0])) == 1:
sys.stdout.write(str(border[0]) * COLS)
for i, message in enumerate(messages):
if border is not None and len(str(border[0])) == 1:
full = "{:160}{}".format(message, Cursor.POS(1, i + 3))
else:
full = "{:160}{}".format(message, Cursor.POS(1, i + 2))
sys.stdout.write(full)
if border is not None and len(str(border[0])) == 1:
sys.stdout.write("=" * COLS)
sys.stdout.write(f"{Cursor.POS(0, len(messages)+2)}")
else:
sys.stdout.write(f"{Cursor.POS(0, len(messages)+1)}")
sys.stdout.write(f"{Fore.RESET}")
sys.stdout.flush()
def end_curses(screen: Any) -> None:
curses.nocbreak()
screen.keypad(False)
curses.echo()
curses.endwin()
def setup_progressbar(desc: str, max_count: int, marker: bool = False) -> ProgressBar:
bar = Bar(marker=RotatingMarker()) if marker else ""
bar_space = " " if marker else ""
pbar_widgets = [
f"{Fore.GREEN}{desc}: {Style.RESET_ALL}",
f"{Fore.BLUE}",
Percentage(),
f" {Style.RESET_ALL}",
bar,
bar_space,
f"|{Fore.WHITE}",
Timer(),
f"{Style.RESET_ALL} |",
f"{Fore.YELLOW}",
AdaptiveETA(),
f"{Style.RESET_ALL}|",
]
pbar = ProgressBar(
widgets=pbar_widgets, maxval=max_count, redirect_stderr=True
).start()
return pbar
def flatten_4D(img4D: np.ndarray) -> np.ndarray:
if type(img4D) == np.ndarray:
return img4D.reshape((np.prod(img4D.shape[0:-1]),) + (img4D.shape[-1],))
@jit(nopython=True, fastmath=True, cache=True)
def slope(x: np.array, y: np.array) -> np.float64:
x_mean = np.mean(x)
y_mean = np.mean(y)
x_dev = x - x_mean
y_dev = y - y_mean
cov = np.sum(x_dev * y_dev)
var = np.sum(x_dev * x_dev)
if var == 0:
return 0
return cov / var
@jit(nopython=True, fastmath=True)
def variance(arr: np.array) -> float:
"""i.e. s^2"""
n = len(arr)
scale = 1.0 / (n - 1.0)
mean = np.mean(arr)
diffs = arr - mean
squares = diffs ** 2
summed = np.sum(squares)
return scale * summed # type: ignore
@jit(nopython=True, fastmath=True, cache=True)
def intercept(x: np.array, y: np.array, slope: np.float64) -> np.float64:
return np.mean(y) - slope * np.mean(x)
@jit(nopython=True, fastmath=True, cache=True)
def fast_r(x: np.array, y: np.array) -> np.float64:
n = len(x)
num = x * y - n * np.mean(x) * np.mean(y)
denom = (n - 1) * np.sqrt(variance(x)) * np.sqrt(variance(y))
if denom == 0:
return 0
return num / denom
# termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
```
#### File: empyricalRMT/tests/test_plot.py
```python
import numpy as np
import pytest
import time
from numpy import ndarray
from empyricalRMT.eigenvalues import Eigenvalues
from empyricalRMT.construct import goe_unfolded
from empyricalRMT.correlater import correlate_fast
def get_eigs(arr: ndarray) -> ndarray:
print(f"\n{time.strftime('%H:%M:%S (%b%d)')} -- computing eigenvalues...")
eigs = np.linalg.eigvalsh(arr)
print(f"\n{time.strftime('%H:%M:%S (%b%d)')} -- computed eigenvalues...")
return eigs
@pytest.mark.plot
def test_axes_configuring() -> None:
var = 0.1
percent = 25
A = np.random.standard_normal([1000, 500])
correlated = np.random.permutation(A.shape[0] - 1) + 1 # don't select first row
last = int(np.floor((percent / 100) * A.shape[0]))
corr_indices = correlated[:last]
# introduce correlation in A
for i in corr_indices:
A[i, :] = np.random.uniform(1, 2) * A[0, :] + np.random.normal(
0, var, size=A.shape[1]
)
M = correlate_fast(A)
eigs = get_eigs(M)
print(f"\nPercent correlated noise: {percent}%")
unfolded = Eigenvalues(eigs).unfold(degree=13)
unfolded.plot_fit(mode="noblock")
goe_unfolded(1000, log=True).plot_fit(mode="block")
```
#### File: empyricalRMT/tests/test_trim.py
```python
import numpy as np
import pandas as pd
import pytest
from pathlib import Path
from empyricalRMT.eigenvalues import Eigenvalues
from empyricalRMT.construct import generate_eigs
from empyricalRMT.trim import TrimIter
@pytest.mark.fast
@pytest.mark.trim
def test_init_sanity() -> None:
eigs = Eigenvalues(generate_eigs(1000))
report = eigs.trim_report(
max_iters=9,
poly_degrees=[5, 7, 9],
spline_degrees=[],
spline_smooths=[],
show_progress=True,
)
assert np.allclose(report._untrimmed, eigs.original_eigenvalues)
assert isinstance(report.summary, pd.DataFrame)
assert isinstance(report._trim_iters, list)
assert isinstance(report._trim_iters[0], TrimIter)
path = Path(".") / "trim_report.csv"
report.to_csv(path)
assert path.exists()
path.unlink()
report.plot_trim_steps(mode="test")
@pytest.mark.fast
@pytest.mark.trim
def test_trim_manual() -> None:
vals = generate_eigs(2000)
for i in range(20):
m, n = np.sort(np.array(np.random.uniform(0, len(vals), 2), dtype=int))
raw_trimmed = np.copy(vals[m:n])
eigenvalues = Eigenvalues(vals)
trimmed = eigenvalues.trim_manually(m, n)
assert np.allclose(raw_trimmed, trimmed.vals)
@pytest.mark.fast
@pytest.mark.trim
def test_trim_reports() -> None:
eigs = Eigenvalues(generate_eigs(2000, seed=2))
report = eigs.trim_report()
best_smoothers, best_unfolds, best_indices, consistent_smoothers = (
report.best_overall()
)
assert np.array_equal(
np.sort(consistent_smoothers), np.sort(["poly_7", "poly_8", "poly_9"])
)
assert np.array_equal(best_indices, [(104, 1765), (231, 1765), (104, 2000)])
report.plot_trim_steps(mode="test")
``` |
{
"source": "jlev/njtransit-fares",
"score": 3
} |
#### File: jlev/njtransit-fares/cli.py
```python
import argparse, sys
from datetime import datetime
from collections import defaultdict
import itertools
import csv
import logging
from api import get_trip
import stops
log = logging.getLogger(__name__)
def valid_date(s):
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def valid_time(s):
try:
return datetime.strptime(s, "%H:%M").time()
except ValueError:
msg = "Not a valid time: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def valid_town(s):
s = s.strip().replace("'",'').upper()
if s in stops.NAMES:
return s
else:
log.error(f'{s} not in valid stop names')
return None
def write_outfile(pairs, filename='output.csv'):
# output huge spreadsheet with all possible stop combinations
with open(filename, 'w') as out_file:
fieldnames = list(stops.NAMES)
fieldnames.insert(0, 'stop')
pair_writer = csv.DictWriter(out_file, fieldnames=fieldnames)
pair_writer.writeheader()
for town in stops.NAMES:
values = pairs[town]
values['stop'] = town
pair_writer.writerow(values)
log.info('wrote '+filename)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='Get bus fares for NJ Transit')
parser.add_argument('origin', help='TOWN', type=valid_town, nargs='?')
parser.add_argument('destination', help='TOWN', type=valid_town, nargs='?')
parser.add_argument('--load', help='route CSV')
parser.add_argument('date', help='YYYY-MM-DD', type=valid_date)
parser.add_argument('time', help='HH:MM', type=valid_time)
parser.add_argument('--log', help='LEVEL', default='error')
args = parser.parse_args()
console_out = logging.StreamHandler(sys.stdout)
console_out.setLevel(args.log.upper())
log.addHandler(console_out)
when = datetime.combine(args.date, args.time)
if args.origin and args.destination:
log.info(f'getting {args.origin} to {args.destination} at {when}')
fare = get_trip(args.origin, args.destination, when)
log.info(f'fare: {fare}')
elif args.load:
log.info(f'loading {args.load}')
# file layout is route,also,towns (list)
with open(args.load, 'r') as route_file:
route_reader = csv.reader(route_file)
fieldnames = route_reader.__next__()
pairs = defaultdict(dict)
try:
for route in route_reader:
print('route '+route[0])
# each route contains a list of towns to check legs
# split field by comma, check for valid name, and filter out nones
towns = filter(None.__ne__, [valid_town(t) for t in route[2].split(',')])
# check each pair of towns without repeating
combinations = itertools.combinations(towns, 2)
for (orig,dest) in combinations:
log.info(f'{orig}-{dest}')
pairs[orig][dest] = get_trip(orig, dest, when)
write_outfile(pairs)
except KeyboardInterrupt:
print('quitter')
finally:
log.debug(pairs)
write_outfile(pairs)
``` |
{
"source": "jlevy44/airlab",
"score": 3
} |
#### File: airlab/utils/imageFilters.py
```python
import os
import multiprocessing as mp
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = str(mp.cpu_count())
import SimpleITK as sitk
import numpy as np
import torch as th
from .image import Image
def auto_crop_image_filter(image, boundary_value=0):
"""
Performs an auto cropping of values on boundary
image (Image): image which has to be cropped
boundary_value (float|int): specifies the boundary value which will be cropped
return (Image): a new image with cropped boundary
"""
msk = 1 - (image.image.squeeze() == boundary_value)
rminmax = []
for d in range(len(msk.shape)):
region = msk.argmax(dim=d).nonzero()
rminmax.append((region.min(dim=0)[0], region.max(dim=0)[0]))
#print(rminmax[-1])
if image.ndim == 2:
cropped = image.image.squeeze()[rminmax[1][0]:rminmax[1][1], rminmax[0][0]:rminmax[0][1]]
origin = image.origin + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0], rminmax[0][0]])
elif image.ndim == 3:
cropped = image.image.squeeze()[rminmax[1][0][0]:rminmax[1][1][0], \
rminmax[0][0][0]:rminmax[0][1][0], \
rminmax[0][0][1]:rminmax[0][1][1]]
#print(cropped.shape)
origin = th.Tensor(image.origin) + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0][0], rminmax[0][0][0],rminmax[0][0][1]])
else:
raise Exception("Only 2 and 3 space dimensions supported")
size = tuple(cropped.shape)
cropped.unsqueeze_(0).unsqueeze_(0)
return Image(cropped, size, image.spacing, origin.tolist())
def normalize_images(fixed_image, moving_image):
"""
Noramlize image intensities by extracting joint minimum and dividing by joint maximum
Note: the function is inplace
fixed_image (Image): fixed image
moving_image (Image): moving image
return (Image, Image): normalized images
"""
fixed_min = fixed_image.image.min()
moving_min = moving_image.image.min()
min_val = min(fixed_min, moving_min)
fixed_image.image -= min_val
moving_image.image -= min_val
moving_max = moving_image.image.max()
fixed_max = fixed_image.image.max()
max_val = max(fixed_max, moving_max)
fixed_image.image /= max_val
moving_image.image /= max_val
return (fixed_image, moving_image)
def remove_bed_filter(image, cropping=True):
"""
Removes fine structures from the image using morphological operators. It can be used to remove the bed structure
usually present in CT images. The resulting image and the respective body mask can be cropped with the cropping
option.
Note: the morphological operations are performed on a downsampled version of the image
image (Image): image of interest
cropping (bool): specifies if the image should be cropped after bed removal
return (Image, Image): bed-free image and a body mask
"""
# define parameters
houndsfield_min = -300
houndsfield_max = 3071
houndsfield_default = -1024
radius_opening = 3
radius_closing = 40
image_itk = image.itk()
# resample image
workingSize = np.array(image.size)
workingSize[0] /= 3
workingSize[1] /= 3
workingSpacing = np.array(image.spacing, dtype=float) * np.array(image.size, dtype=float) / np.array(workingSize, dtype=float)
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image.origin)
resampler.SetSize(workingSize.tolist())
resampler.SetOutputSpacing(workingSpacing.tolist())
resampler.SetInterpolator(2) # linear interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
image_tmp = resampler.Execute(image_itk)
# threshold image
thresholder = sitk.BinaryThresholdImageFilter()
thresholder.SetOutsideValue(0)
thresholder.SetInsideValue(1)
thresholder.SetLowerThreshold(houndsfield_min)
thresholder.SetUpperThreshold(houndsfield_max)
thresholder.SetNumberOfThreads(mp.cpu_count())
image_tmp = thresholder.Execute(image_tmp)
# morphological opening with ball as structuring element
# removes thin structures as the bed
opening = sitk.BinaryMorphologicalOpeningImageFilter()
opening.SetKernelType(sitk.sitkBall)
opening.SetKernelRadius(radius_opening)
opening.SetForegroundValue(1)
opening.SetNumberOfThreads(mp.cpu_count())
image_tmp = opening.Execute(image_tmp)
# crop zero values from mask boundary
if cropping:
image_tmp = auto_crop_image_filter(Image(image_tmp).to(device=image.device)).itk()
# morphological closing with ball as structuring element
# fills up the lungs
closing = sitk.BinaryMorphologicalClosingImageFilter()
closing.SetKernelRadius(sitk.sitkBall)
closing.SetKernelRadius(radius_closing)
closing.SetForegroundValue(1)
closing.SetNumberOfThreads(mp.cpu_count())
image_tmp = closing.Execute(image_tmp)
# resample mask to original spacing
mask_size = np.array(np.array(image_tmp.GetSpacing(), dtype=float)*np.array(image_tmp.GetSize(),dtype=float)/np.array(image.spacing, dtype=float), dtype=int).tolist()
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image_tmp.GetOrigin())
resampler.SetSize(mask_size)
resampler.SetOutputSpacing(image.spacing)
resampler.SetInterpolator(1) # nearest neighbor interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
bodyMask = resampler.Execute(image_tmp)
# resample also original image
resampler.SetInterpolator(2)
image_itk = resampler.Execute(image_itk)
# mask image with found label map
masking = sitk.MaskImageFilter()
masking.SetMaskingValue(0)
masking.SetOutsideValue(houndsfield_default)
masking.SetNumberOfThreads(mp.cpu_count())
outImage = masking.Execute(image_itk, bodyMask)
return (Image(outImage).to(device=image.device), Image(bodyMask).to(device=image.device))
```
#### File: airlab/utils/points.py
```python
import numpy as np
import torch as th
import SimpleITK as sitk
from .image import Displacement
class Points:
"""
Class implementing functionality for dealing with points:
- read/write: supported formats are pts and vtk (polydata)
- transform: transform the points given a displacement field
- TRE: calculates the target registration error between two point sets
"""
@staticmethod
def read(filename):
"""
Read points from file. Following formats are supported:
- pts: each point is represended in one line where the coordinates are separated with a tab
- vtk: the vtk polydata is supported as well
filename (str): filename
return (array): two dimensional array
"""
if filename.endswith("pts"):
points = []
with open(filename) as f:
lines = f.readlines()
for l in lines:
points.append([float(p) for p in l.split()])
return np.array(points)
elif filename.endswith("vtk"):
with open(filename) as f:
lines = f.readlines()
if not lines[1] == "vtk output\n" and \
not lines[2] == "ASCII\n" and \
not lines[3] == "DATASET POLYDATA\n":
raise Exception("Tried to read corrupted vtk polydata file")
n = int(lines[4].split()[1])
one_line = ''.join(''.join(lines[5:]).split('\n'))
one_line = [float(p) for p in one_line.split()]
return np.array(one_line).reshape((n, 3))
else:
raise Exception("Format not supported: "+str(filename))
@staticmethod
def write(filename, points):
"""
Write point list to hard drive
filename (str): destination filename
points (array): two dimensional array
"""
if filename.endswith("pts"):
with open(filename, 'w') as f:
for p in points:
f.write('\t'.join([str(v) for v in p])+'\n')
elif filename.endswith("vtk"):
n = points.shape[0]
with open(filename, 'w') as f:
f.write("# vtk DataFile Version 3.0\n")
f.write("vtk output\n")
f.write("ASCII\n")
f.write("DATASET POLYDATA\n")
f.write("POINTS "+str(n)+" float\n")
for p in points:
f.write('\t'.join([str(v) for v in p])+'\n')
else:
raise Exception("Format not supported: "+str(filename))
@staticmethod
def transform(points, displacement):
"""
Transforms a set of points with a displacement field
points (array): array of points
displacement (SimpleITK.Image | Displacement ): displacement field to transform points
return (array): transformed points
"""
if type(displacement) == sitk.SimpleITK.Image:
df_transform = sitk.DisplacementFieldTransform(displacement)
elif type(displacement) == Displacement:
df_transform = sitk.DisplacementFieldTransform(displacement.to(dtype=th.float64).itk())
else:
raise Exception("Datatype of displacement field not supported.")
df_transform.SetSmoothingOff()
transformed_points = np.zeros_like(points)
for i in range(points.shape[0]):
transformed_points[i, :] = df_transform.TransformPoint(points[i, :])
return transformed_points
@staticmethod
def TRE(points1, points2):
"""
Computes the average distance between points in points1 and points2
Note: if there is a different amount of points in the two sets, only the first points are compared
points1 (array): point set 1
points2 (array): point set 2
return (float): mean difference
"""
n = min(points1.shape[0], points2.shape[0])
return np.mean(np.linalg.norm(points1[:n,:]-points2[:n,:], axis=1))
```
#### File: airlab/examples/affine_registration_3d.py
```python
import sys
import os
import time
import matplotlib.pyplot as plt
import torch as th
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import airlab as al
def main():
start = time.time()
# set the used data type
dtype = th.float32
# set the device for the computaion to CPU
device = th.device("cpu")
# In order to use a GPU uncomment the following line. The number is the device index of the used GPU
# Here, the GPU with the index 0 is used.
# device = th.device("cuda:0")
# create 3D image volume with two objects
object_shift = 10
fixed_image = th.zeros(64, 64, 64).to(device=device)
fixed_image[16:32, 16:32, 16:32] = 1.0
fixed_image = al.Image(fixed_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])
moving_image = th.zeros(64, 64, 64).to(device=device)
moving_image[16 - object_shift:32 - object_shift, 16 - object_shift:32 - object_shift,
16 - object_shift:32 - object_shift] = 1.0
moving_image = al.Image(moving_image, [64, 64, 64], [1, 1, 1], [0, 0, 0])
# create pairwise registration object
registration = al.PairwiseRegistration()
# choose the affine transformation model
transformation = al.transformation.pairwise.RigidTransformation(moving_image, opt_cm=True)
transformation.init_translation(fixed_image)
registration.set_transformation(transformation)
# choose the Mean Squared Error as image loss
image_loss = al.loss.pairwise.MSE(fixed_image, moving_image)
registration.set_image_loss([image_loss])
# choose the Adam optimizer to minimize the objective
optimizer = th.optim.Adam(transformation.parameters(), lr=0.1)
registration.set_optimizer(optimizer)
registration.set_number_of_iterations(500)
# start the registration
registration.start()
# set the intensities for the visualisation
fixed_image.image = 1 - fixed_image.image
moving_image.image = 1 - moving_image.image
# warp the moving image with the final transformation result
displacement = transformation.get_displacement()
warped_image = al.transformation.utils.warp_image(moving_image, displacement)
end = time.time()
print("=================================================================")
print("Registration done in: ", end - start, " s")
print("Result parameters:")
transformation.print()
# sitk.WriteImage(warped_image.itk(), '/tmp/rigid_warped_image.vtk')
# sitk.WriteImage(moving_image.itk(), '/tmp/rigid_moving_image.vtk')
# sitk.WriteImage(fixed_image.itk(), '/tmp/rigid_fixed_image.vtk')
# plot the results
plt.subplot(131)
plt.imshow(fixed_image.numpy()[16, :, :], cmap='gray')
plt.title('Fixed Image Slice')
plt.subplot(132)
plt.imshow(moving_image.numpy()[16, :, :], cmap='gray')
plt.title('Moving Image Slice')
plt.subplot(133)
plt.imshow(warped_image.numpy()[16, :, :], cmap='gray')
plt.title('Warped Moving Image Slice')
plt.show()
if __name__ == '__main__':
main()
``` |
{
"source": "jlevy44/HE2Tri",
"score": 3
} |
#### File: HE2Tri/data/npy_dataset.py
```python
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import os
import numpy as np
import cv2
class NPYDataset(BaseDataset):
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.img_path = os.path.abspath(opt.wsi_name)
self.img = np.load(self.img_path)
print("WSI image shape", self.img.shape)
if False or opt.bgr2rgb:
self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)
input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
# self.transform = get_transform(opt, grayscale=(input_nc == 1))
self.transform = get_transform(opt, grayscale=(input_nc == 1), to_pil=True)
self.reset()
def reset(self):
self.img_new = np.zeros_like(self.img, dtype=np.uint32)
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A and A_paths
A(tensor) - - an image in one domain
A_paths(str) - - the path of the image
"""
A = self.img[index]
A = self.transform(A)
return A
def __len__(self):
"""Return the total number of images in the dataset."""
return self.img.shape[0]
def push_image(self, index, patch_img):
self.img_new[index] = patch_img
``` |
{
"source": "jlevy44/JoshuaTree2",
"score": 3
} |
#### File: JoshuaTree2/CNSAnalysis/circosGeneration.py
```python
import sys,os
from collections import defaultdict, Counter
from fai2karyotypeMod import fai2karyotype
from pybedtools import BedTool
import subprocess
import numpy as np
import shutil
import operator
from ete2 import Tree
e = sys.argv
print e
try:
maxInner = float(e[1])
except:
maxInner = 0.3
try:
analysisCompare = int(e[2])
except:
analysisCompare = 0
print 'maxInner',maxInner
print analysisCompare
def parseConfigFindList(stringFind,configFile):
"""parseConfigFindList inputs a particular string to find and read file after and a configuration file object
outputs list of relevant filenames"""
read = 0
listOfItems = []
for line in configFile:
if line:
if read == 1:
if 'Stop' in line:
configFile.seek(0)
break # exit the function and return the list of files or list information
listOfItems.append(line.strip('\n'))
if stringFind in line:
read = 1 # if find string specified, begin reading lines
configFile.seek(0)
return listOfItems
def parseConfigFindPath(stringFind,configFile):
"""findPath will find path of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or info
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
confDict = {'ticks':"""show_ticks = yes
show_tick_labels = yes
<ticks>
radius = dims(ideogram,radius_outer)
orientation = out
label_multiplier = 1e-6
color = black
size = 20p
thickness = 3p
label_offset = 5p
format = %d
<tick>
spacing = 1u
show_label = no
size = 10p
</tick>
<tick>
spacing = 5u
show_label = yes
label_size = 20p
size = 15p
</tick>
<tick>
spacing = 10u
show_label = yes
label_size = 24p
</tick>
</ticks>""",
'label':"""show_label = yes
label_font = default
label_radius = dims(image,radius)-30p
label_size = 24
label_parallel = yes
label_case = lower
label_format = eval(sprintf("chr%s",var(label)))
""",
'position':"""radius = 0.775r
thickness = 30p
fill = yes
fill_color = black
stroke_thickness = 2
stroke_color = black""",
'r0r1':"""# set track radius values based on track counter
r1 = eval(sprintf("%fr",conf(track_start)-counter(plot)*(conf(track_width)+conf(track_pad))))
r0 = eval(sprintf("%fr",conf(track_start)-counter(plot)*(conf(track_width)+conf(track_pad))-conf(track_width)))""",
'ideogram':"""<ideogram>
<spacing>
default = 0.01r
break = 0.5r
</spacing>
<<include position.conf>>
<<include label.conf>>
<<include bands.conf>>
radius* = 0.95r
</ideogram>""",
'bands':"""show_bands = yes
fill_bands = yes
band_stroke_thickness = 2
band_stroke_color = white
band_transparency = 0"""}
for conf in confDict.keys():
with open(conf+'.conf','w') as f:
f.write(confDict[conf])
f.close()
with open('configCNSAnalysis.txt','r') as f:
inputSpecies = parseConfigFindList('masterListSpecies', f)
inputTree = parseConfigFindPath('inputTree',f)
protId = defaultdict(list)
for line in inputSpecies:
if line:
print line
protId[line.split('_')[0]] = line.split('_')[1]
#protId = {'Bdistachyon':'314','Bstacei':'316','Osativa':'323','Phallii':'308','Pvirgatum':'383','Sbicolor':'313','Sitalica':'312'}
#inputList = sys.argv
inputList = protId.keys()#['Bdistachyon','Bstacei','Osativa','Phallii','Pvirgatum','Sbicolor','Sitalica']
try:
with open(inputTree,'r') as f:
speciesTree = Tree(f.read())
inputSpecies = [node.name for node in speciesTree.traverse('preorder') if node.name]
inputList2 = []
for species in inputSpecies:
if species in inputList:
inputList2.append(species)
inputList = inputList2
except:
pass
listFiles = os.listdir('.')
speciesDict = defaultdict(list)
"""files = [[file for file in listFiles if species in file and 'Conserved_CDS' in file][0],
[file for file in listFiles if species in file and 'CNSElements_Intergenic' in file][0],
[file for file in listFiles if species in file and 'CNSElements_Intronic' in file][0]]"""
for species in inputList:
if species:
speciesDict[species] = [[file for file in listFiles if species in file and 'ConservedElements' in file][0],
[file for file in listFiles if protId[species] in file and 'Genes' in file and file.endswith('.bed3')][0],
fai2karyotype([file for file in listFiles if protId[species] in file and file.endswith('.fai')][0],species,300000),
'heatmap.%s.txt'%species,open('heatmap.%s.txt'%species,'w')]
generateRadii = np.linspace(0.40,0.80,len(speciesDict.keys())+1)
print generateRadii
totalNumberSpecies = float(len(speciesDict.keys()))
for species in inputList:
try:
histInterval = defaultdict(list)
with open([file for file in listFiles if protId[species] in file and file.endswith('.fai')][0],'r') as f:
#chromCount = 0
#print 'a1'
faiArray = np.array([line.split('\t')[0:2] for line in f if line])
faiArray = faiArray[np.argsort(faiArray[:,1].astype(np.int32),axis=0)[::-1],:]
faiArray = faiArray[0:22,:]
#print faiArray
#print 'a2'
#for line in f:
#chromCount+=1
#bedread = '\n'.join('\t'.join('%s\t%d\t%d'%tuple([line.split('\t')[0]]+sorted(np.vectorize(lambda x: int(x))(line.split('\t')[1:3])))))
for chrom in faiArray:
histInterval[chrom[0]] = list(np.arange(0.,int(chrom[1]),250000.)) + [int(chrom[1])]
#interval = sorted(np.vectorize(lambda x: int(x))(line.split('\t')[1:3]))
#histInterval[line.split('\t')[0]] = list(np.arange(0.,interval[-1],250000.)) + [interval[-1]]
#if chromCount > 22:
# break
print histInterval.keys()
bedHist = BedTool('\n'.join('\n'.join('\t'.join([key] + [str(int(x)) for x in [histInterval[key][i],histInterval[key][i+1]]]) for i in range(len(histInterval[key])-1)) for key in histInterval.keys()),from_string=True)
print speciesDict[species][1]
with open(speciesDict[species][1],'r') as f:
bedGenes = BedTool(f.read(),from_string=True).sort().merge()
bedHistGeneFinal = bedHist.intersect(bedGenes,wao=True).sort().merge(c=7,o='sum',d=-1)
with open('%s_geneDensity.txt'%(species),'w') as f:
for line in str(bedHistGeneFinal).split('\n'):
if line:
lineList = line.split('\t')
f.write('\t'.join(lineList[0:3])+'\t%f'%(float(lineList[-1])/(float(lineList[2])-float(lineList[1])))+'\n')
transposonDensityFile = next((file for file in os.listdir('.') if '%s_transposonDensity' % protId[species] in file and (file.endswith('.gff') or file.endswith('.gff2') or file.endswith('.gff3'))), ['emptyDensity.txt'])
print protId[species],transposonDensityFile
if transposonDensityFile != 'emptyDensity.txt': #FIXME start here
with open(transposonDensityFile, 'r') as f:
#print 'hello'
#print '\n'.join('\t'.join(operator.itemgetter(0, 3, 4)(line.split('\t'))) for line in f.readlines())
bedTrans = bedHist.intersect(BedTool('\n'.join('\t'.join(operator.itemgetter(0, 3, 4)(line.split('\t'))) for line in f.readlines() if line.startswith("##") == 0),from_string=True).sort().merge(),wao = True).merge(c=7,o='sum',d=-1)
with open('%s_transposonDensity.bed'%protId[species],'w') as f:
for line in str(bedTrans).split('\n'):
if line:
lineList = line.split('\t')
f.write('\t'.join(lineList[0:3])+'\t%f'%(float(lineList[-1])/(float(lineList[2])-float(lineList[1])))+'\n')
else:
open('%s_transposonDensity.bed' % protId[species],'w').close()
for species2 in speciesDict.keys():
speciesDict[species2][-1].close()
speciesDict[species2][-1] = open(speciesDict[species2][-2],'w')
with open('histogramCount%s.txt'%species,'w') as f:
for i in range(1):#3
file = speciesDict[species][i]
#file in speciesDict[species][0:3]:
with open(file,'r') as bedfile:
for line in bedfile:
if line:
try:
countSeq = Counter()
for countOfSpecies in line.split('\t')[-1].split(';')[1].split(','):
countSeq[countOfSpecies.split(':')[0]] = int(countOfSpecies.split(':')[1])
f.write(line[:line.rfind('\t')+1]+str((float(line.split('\t')[2])-float(line.split('\t')[1]))*float(len(set(countSeq.elements())))/totalNumberSpecies)+'\n')
for species2 in countSeq.keys():
if countSeq[species2] > 0:
output = str(i+2)
speciesDict[species2][-1].write(line[:line.rfind('\t')] + '\n')
else:
output = '1'
except:
pass
f.close()
with open('histogramCount%s.txt'%species,'r') as f:
bedHist2 = BedTool(f.read(), from_string=True).sort().merge(c=4,o='mean').saveas('histogramCount%s.txt'%species)
bedSpeciesHist = bedHist.intersect(bedHist2, wao=True).sort().merge(c=7, o='sum', d=-1).saveas('histogramCount%s.txt'%species)#.merge(c=[7,8], o=['sum','sum'], d=-1)
"""with open('histogramCount%s.txt'%species,'w') as f:
for line in str(bedSpeciesHist).split('\n'):
if line:
#if not float(line.split('\t')[3]):
# print line
if line.split('\t')[4] != '0':
f.write('\t'.join(line.split('\t')[0:3]+[str(float(line.split('\t')[3])/float(line.split('\t')[4]))])+'\n')
else:
f.write('\t'.join(
line.split('\t')[0:3] + [str(float(line.split('\t')[3]))]) + '\n')
"""
for species2 in speciesDict.keys():
speciesDict[species2][-1].close()
with open(speciesDict[species2][-2],'r') as f:
reads = f.read()
with open(speciesDict[species2][-2],'w') as f2:
for line in str(bedHist.intersect(BedTool(reads,from_string=True).sort().merge(),wao=True).sort().merge(c=7,o='sum',d=-1)).split('\n'):
if line:
try:
f2.write('\t'.join(line.split('\t')[0:3]+[str(float(line.split('\t')[-1])/250000.)])+'\n')
except:
pass
# now configure circos files
print speciesDict[species][2], [(os.getcwd()+'/',species2) for species2 in speciesDict.keys()], os.getcwd()+'/'+'histogramCount%s.txt' %species,os.getcwd()+'/'+'%s_geneDensity.txt'%species
if analysisCompare:
compareString = """<plot>
show = conf(show_histogram)
type = heatmap
file = %s
orientation = out
thickness = 1
padding = 1
color = greens-9-seq
color_mapping = 1
#fill_under = yes
#fill_color = green
r0 = 0.85r
r1 = 0.90r
max_gap = 5u
z = 10
</plot>"""%(os.getcwd()+'/CompareAnalysis/'+'histogramCount%s.txt'%(species))
else:
compareString = ''
#print compareString
circosconf = """
show_histogram = yes
show_heatmap = yes
use_rules = yes
<<include colors_fonts_patterns.conf>>
<<include ideogram.conf>>
<<include ticks.conf>>
<<include bands.conf>>
<<include position.conf>>
<<include label.conf>>

karyotype = %s
chromosomes_units = 1000000
chromosomes_display_default = yes
# to see how reversing ideograms work - comment out the chromosomes
# line below
#chromosomes = hs2
# and uncomment the two definitions below
# - first split hs2 into three ideograms
# - now reverse the ideogram with tag "b"
#chromosomes = hs2[a]:0-60;hs2[b]:70-140;hs2[c]:150-)
#chromosomes_reverse = b
#chromosomes = hs2[a]:0-30;hs2[b]:50-80;hs2[c]:100-130;hs2[d]:150-180;hs2[e]:190-200;hs2[f]:210-)
#chromosomes_radius = a:0.95r;b:0.9r;c:0.85r;d:0.8r;e:0.75r;f:0.7r
<plots>
show = no
%s
%s
<plot>
show = conf(show_histogram)
type = heatmap
file = %s
orientation = out
thickness = 1
padding = 1
color = reds-9-seq
color_mapping = 1
#fill_under = yes
#fill_color = green
r0 = 0.80r
r1 = 0.85r
max_gap = 5u
z = 10
</plot>
<plot>
show = conf(show_histogram)
type = heatmap
file = %s
orientation = out
thickness = 1
padding = 1
color = purples-9-seq
color_mapping = 1
#fill_under = yes
#fill_color = green
r0 = 0.90r
r1 = 0.95r
max_gap = 5u
min = 0
max = 0.45
z = 10
</plot>
<plot>
show = conf(show_histogram)
type = heatmap
file = %s
min = 0
max = 0.45
orientation = out
thickness = 1
padding = 1
color = blues-9-seq
color_mapping = 1
#fill_under = yes
#fill_color = green
r0 = 0.95r
r1 = 1.0r
max_gap = 5u
z = 10
</plot>
</plots>
<<include etc/housekeeping.conf>>
data_out_of_range* = trim"""%(os.getcwd()+'/'+speciesDict[species][2],'\n'.join("""<plot>
show = conf(show_heatmap)
type = heatmap
min = 0
max = %f
margin = 0.02u
#orientation = out
color = white, spectral-11-div, grey
color_mapping = 1
thickness = 1
padding = 1
#color = black
#fill_color = yellow
#stroke_thickness = 5
#scale_log_base = 0.25
#stroke_color = black
file = %s
r0 = %fr
r1 = %fr
#<rules>
#use = conf(use_rules)
#<rule>
#condition = var(value) == 1
#color = white
#</rule>
#<rule>
#condition = var(value) > 1
#color = black
#</rule>
#</rules>
</plot>"""%(maxInner,os.getcwd()+'/'+'heatmap.'+inputList[i]+'.txt',generateRadii[i],generateRadii[i+1]) for i in range(len(speciesDict.keys()))),
compareString,os.getcwd()+'/'+'histogramCount%s.txt'%(species),os.getcwd()+'/'+'%s_transposonDensity.bed'%protId[species],os.getcwd()+'/'+species+'_geneDensity.txt')
with open('circos.conf','w') as f:
f.write(circosconf)
f.close()
print os.getcwd()+'/'+'circos.conf'
print ['circos','-conf',os.getcwd()+'/'+'circos.conf','-outputfile',
species,'-outputdir',os.getcwd()]
subprocess.call(['circos','-conf',os.getcwd()+'/'+'circos.conf','-outputfile',species,'-outputdir',os.getcwd()])
except:
print 'Error for '+species
"""<plot>
<<include r0r1.conf>>
file = data/6/variation.heatmap.txt
stroke_thickness = 0
min = 2000
max = 250000
</plot>
<plot>
<<include r0r1.conf>>
scale_log_base = 0.5
</plot>
<plot>
<<include r0r1.conf>>
scale_log_base = 1 # this is the default value
</plot>
<plot>
<<include r0r1.conf>>
scale_log_base = 2
</plot>
<plot>
<<include r0r1.conf>>
scale_log_base = 3
</plot>
<plot>
<<include r0r1.conf>>
scale_log_base = 5
</plot>
<plot>
<<include r0r1.conf>>
color = conf(plots,color_alt)
file = data/6/heatmap.step.txt
pattern = hline,vline
color_mapping = 0 # default
min = 0
max = 10
stroke_thickness = 0
</plot>
<plot>
<<include r0r1.conf>>
color = conf(plots,color_alt)
file = data/6/heatmap.step.txt
pattern = hline,solid,vline
color_mapping = 1
min = 0
max = 10
stroke_thickness = 0
</plot>
<plot>
<<include r0r1.conf>>
color = conf(plots,color_alt)
file = data/6/heatmap.step.txt
pattern = hline,solid,vline
color_mapping = 2
min = 0
max = 10
stroke_thickness = 0
</plot>
<plot>
<<include r0r1.conf>>
color = conf(plots,color_alt)
file = data/6/heatmap.step.txt
pattern = hline,checker,vline
color_mapping = 2
min = 2
max = 8
stroke_thickness = 0
</plot>"""
"""<<include etc/colors_fonts_patterns.conf>>
<<include ideogram.conf>>
<<include ticks.conf>>

karyotype = %s
chromosomes_units = 1000000
#chromosomes = hs1;hs2
#chromosomes_breaks = -hs1:120-140
chromosomes_display_default = yes
track_width = 0.05
track_pad = 0.02
track_start = 0.95
<plots>
type = heatmap
<rules>
<rule>
condition = var(value) = 0
color = white
</rule>
<rule>
condition = var(value) = 1
color = black
</rule>
#<rule>
#condition = var(value) = 2
#color = green
#</rule>
#<rule>
#condition = var(value) = 3
#color = blue
#</rule>
</rules>
# default file for all tracks
#file = data/6/snp.number.1mb.txt
# a 9 color diverging spectral palette specified using a color list name
color = spectral-9-div
# referenced via conf(plots,color_alt)
color_alt = black,spectral-8-div,grey
# or the reverse list
#color = spectral-9-div-rev
# or you can even combine lists
# color = ylorrd-9-seq-rev,ylgnbu-9-seq
stroke_thickness = 1
stroke_color = black
min = 1000
max = 5000
%s
<\plots>
<plots>
<plot>
# The type sets the format of the track.
type = histogram
file = %s
# The track is confined within r0/r1 radius limits. When using the
# relative "r" suffix, the values are relative to the position of the
# ideogram.
r1 = 0.75r
r0 = 0.80r
# Histograms can have both a fill and outline. The default outline is 1px thick black.
fill_color = vdgrey
# To turn off default outline, set the outline thickness to zero. If
# you want to permanently disable this default, edit
# etc/tracks/histogram.conf in the Circos distribution.
#thickness = 0p
# Do not join histogram bins that do not abut.
extend_bin = no
# Like for links, rules are used to dynamically alter formatting of
# each data point (i.e. histogram bin). Here, I include the <rule>
# block from a file, which contains the following
#
# <rule>
# condition = on(hs1)
# show = no
# </rule>
#
# to avoid displaying any data on hs1. The rule is included from a
# file because it is reused again in the track below.
<rules>
</rules>
</plot>
<plot>
# The type sets the format of the track.
type = histogram
file = %s
# The track is confined within r0/r1 radius limits. When using the
# relative "r" suffix, the values are relative to the position of the
# ideogram.
r1 = 0.65r
r0 = 0.75r
# Histograms can have both a fill and outline. The default outline is 1px thick black.
fill_color = vdgrey
# To turn off default outline, set the outline thickness to zero. If
# you want to permanently disable this default, edit
# etc/tracks/histogram.conf in the Circos distribution.
#thickness = 0p
# Do not join histogram bins that do not abut.
extend_bin = no
# Like for links, rules are used to dynamically alter formatting of
# each data point (i.e. histogram bin). Here, I include the <rule>
# block from a file, which contains the following
#
# <rule>
# condition = on(hs1)
# show = no
# </rule>
#
# to avoid displaying any data on hs1. The rule is included from a
# file because it is reused again in the track below.
<rules>
</rules>
</plot>
</plots>
<<include etc/housekeeping.conf>>
data_out_of_range* = trim
<plot>
show = conf(show_histogram)
type = histogram
file = %s
thickness = 2
#color = black
fill_under = yes
fill_color = blue
r0 = 0.80r
r1 = 0.95r
orientation = out
max_gap = 5u
z = 10
</plot>
<plot>
show = conf(show_histogram)
type = histogram
file = %s
thickness = 2
#color = black
fill_under = yes
fill_color = blue
r0 = 0.85r
r1 = 0.90r
orientation = out
max_gap = 5u
z = 10
</plot>
<plot>
show = conf(show_histogram)
type = histogram
file = %s
orientation = out
thickness = 1
#color = black
fill_under = yes
fill_color = green
r0 = 0.90r
r1 = 0.95r
max_gap = 5u
z = 10
</plot>"""#%(os.getcwd()+'/'+speciesDict[species][2],'\n'.join("""<plot>
#<<include r0r1.conf>>
#file = %sheatmap.%s.txt
#</plot>"""%(os.getcwd()+'/',species2) for species2 in speciesDict.keys()),os.getcwd()+'/'+'histogramCount%s.txt' %species,os.getcwd()+'/'+'%s_geneDensity.txt'%species)
```
#### File: scaffolding_tool_bin/old_scripts/fixv0.py
```python
import subprocess, os, sys
from collections import defaultdict
import numpy as np
from multiprocessing import Pool, Queue
import subprocess,shutil
from jcvi.formats import gff
from pyfaidx import Fasta
references = [folder for folder in os.listdir('referenceGenomes') if '.' not in folder and folder]
buildSamples = np.vectorize(lambda x: 'Bdist_%s_v0'%(x))(sys.argv[1:])
root = os.getcwd()+'/'
CDSOld = root+'referenceGenomes/'+'314'+'/'+[file for file in os.listdir(root+'referenceGenomes/'+'314') if file.endswith('.fa') and '.cds' in file.lower()][0]
runCommand = lambda x: subprocess.call(x,shell=True)
def formatSamplev0(sample):
global root
commands = ['python %sformatBed.py s %s v0'%(root,sample),'python %sformatCDS.py s %s v0'%(root,sample)]
for command in commands:
runCommand(command)
os.chdir(root)
def buildSamplesv0(sample): #sample = Bdist_xxx_v0.fa
global root
global CDSOld
print sample
os.chdir('v0/'+sample)
print 'c ' + os.getcwd()
fastaNew = sample+'.fa'
geneNaming = sample.replace('_','')
writeCommands = ['samtools faidx %s' %fastaNew,'python -m jcvi.formats.gff bed --type=CDS --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.CDS.bed'),'bedtools getfasta -name -fi ./%s -bed %s.CDS.bed -fo %s.cds'%(fastaNew,sample,sample) ]#'gffread -E %s -o- > %s' % (geneNaming + '.gff3', geneNaming + '.cufflinks.gff'),
#'python -m jcvi.formats.gff load %s %s --feature=CDS --id_attribute=Name -o %s' % (geneNaming + '.cufflinks.gff', fastaNew,sample + '.cds')]
writeCommands2 = ['samtools faidx %s' % fastaNew,
'gmap_build --dir=. -d %s %s' % (geneNaming, fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 8 %s > %s 2> %s' % (
geneNaming, CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', 'Bradi', geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff load %s %s --feature=CDS --id_attribute=Name -o %s' % (
geneNaming + '.gff3', fastaNew, sample + '.cds')]
for command in writeCommands:
runCommand(command)
print command
os.chdir(root)
if __name__ == '__main__':
with open('output.txt', 'a') as f:
f.write('Outv1')
p = Pool(processes=6)
p.map(func=buildSamplesv0, iterable=buildSamples)
p.map(func=formatSamplev0, iterable=buildSamples)
#for sample in buildSamples:
# os.chdir(root)
# buildSamplesv0(sample)
# os.chdir(root)
# formatSamplev0(sample)
```
#### File: scaffolding_tool_bin/old_scripts/genomeScaffolding.py
```python
import subprocess, os, sys
from collections import defaultdict, OrderedDict
import numpy as np
from multiprocessing import Pool, Queue, Process
from threading import Thread
import subprocess,shutil
from pybedtools import BedTool
from jcvi.formats import gff
from pyfaidx import Fasta
import time
"""python genomeScaffolding.py ReferenceBuild sampleBuild CDSProtID OldCDSGeneName protID1 weight1 protID2 weight2 ..."""
CDSgeneNaming = sys.argv[4]
CDSspecies = sys.argv[3]
args = sys.argv[5:]
root = os.getcwd()+'/'
weights = OrderedDict()
listSamplesv0 = [folder for folder in os.listdir('v0') if folder.endswith('v0')]
try:
ReferenceBuild = int(sys.argv[1])
except:
ReferenceBuild = 1
try:
sampleBuild = int(sys.argv[2])
except:
sampleBuild = 1
print args
print CDSgeneNaming
print CDSspecies
for i in np.arange(0,len(args),2):
try:
weights[args[i]]=int(args[i+1])
except:
print args
print weights
runCommand = lambda x: subprocess.call(x,shell=True)
binbash = "#!/bin/bash"
makeTrashFolder = 'mkdir oldFiles'
moduleLoads = """module load cufflinks/2.2.1
module load samtools/1.3.1
module load gmap
module load parallel/20150222
module load bedtools/2.25.0
module unload gcc
module load gcc/6.3.0
"""
def runCommands(q):
while not q.empty():
print q
try:
print q.get()
runCommand(q.get())
except:
with open('Error.txt','a') as f:
f.write(q.get()+'\n')
q.task_done()
def buildReferences(reference): # essentially keys of weights
global root
global binbash, makeTrashFolder, moduleLoads
print reference
os.chdir('./referenceGenomes/'+reference)
#print os.getcwd()
#print os.listdir('.')
fastaOld = [fasta for fasta in os.listdir('.') if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
#Fasta(fastaOld)
#gff.load([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0])
writeCommands = [binbash,moduleLoads,makeTrashFolder,'samtools faidx %s'%fastaOld,
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],fastaOld,reference+'.cds'),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],reference+'.bed'),
'python %sreplacepath.py %s'%(root,reference+'.bed'),'mv %s %s ..'%(reference+'.bed',reference+'.cds')]
#binbash,makeTrashFolder,moduleLoads,
#print '\n'.join(writeCommands)
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
"""for command in writeCommands:
print command
try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""for i, command in writeCommands:
print command
if (i == 3 or i==4) and (reference + '.bed' not in os.listdir('..') or os.stat('../'+reference + '.bed').st_size == 0):
runCommand(command)
elif i == 2 and (reference + '.cds' not in os.listdir('..') or os.stat('../'+reference + '.cds').st_size == 0):
runCommand(command)
elif i not in range(2, 7):
runCommand(command)"""
with open('buildReference.sh','w') as f:
f.write('\n'.join(writeCommands))
subprocess.call(['nohup','sh','buildReference.sh'])
os.chdir(root)
#print ReferenceBuild
CDSOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
linkReferences = ['ln -s %s%s/%s.cds %s.cds\nln -s %s%s/%s.bed %s.bed'%(root,'referenceGenomes',ref,ref,root,'referenceGenomes',ref,ref) for ref in weights.keys()]
def buildSamplesv0(sample): #sample = Bdist_xxx_v0.fa
global root
global CDSspecies, CDSOld
global binbash, makeTrashFolder, moduleLoads
global CDSgeneNaming, linkReferences
print sample
os.chdir('v0/'+sample)
fastaNew = sample+'.fa'
geneNaming = sample.replace('_','') # -t is number of worker threads
runCommand('rm finishBuild.txt')
writeCommands = [binbash,moduleLoads,makeTrashFolder,'rm -r %s %s.gff3.db %s.chromosome *.iit %s.coords'%(geneNaming,geneNaming,geneNaming,geneNaming),
'samtools faidx %s' %fastaNew,
'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 6 %s > %s 2> %s' % (
geneNaming, '../../referenceGenomes/%s/'%CDSspecies + CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' %(root,geneNaming + '.gff3', CDSgeneNaming ,geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff load %s %s --parents=mRNA --children=CDS -o %s' % (
geneNaming+'.gff3', fastaNew,sample + '.cds')]+linkReferences+['> finishBuild.txt']
#"""'python %sgff2CDSBed.py %s'%(root,geneNaming + '.gff3'),'sortBed -i %s.CDS.bed > %s.CDS2.bed'%(geneNaming,geneNaming),
#'python %sformatBed.py s %s v0 1'%(root,geneNaming+'.CDS2'),'bedtools getfasta -name -fi ./%s -bed %s.CDS2.bed -fo %s.cds'%(fastaNew,geneNaming,sample)
#]"""#'mv %s %s ..'%(sample+'.cds',sample+'.bed') binbash, moduleLoads, makeTrashFolder,
#'python -m jcvi.formats.gff load %s %s --feature=CDS --id_attribute=Name -o %s' % (geneNaming + '.gff3', fastaNew,sample + '.cds'),
#'mergeBed -c 4 -i %s.CDS2.bed > %s.CDS.bed'%(geneNaming,geneNaming)
#print writeCommands
#print os.getcwd()
#open('buildSample.sh', 'w').close()
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in writeCommands:
q.put(command)
runCommands(q)"""
i=0
"""
for command in writeCommands:
#print i,command
#print i
if (i == 2 or i == 3 or i == 4) and (geneNaming + '.gff3' not in os.listdir('.') or os.stat(geneNaming + '.gff3').st_size ==0):
print(command)
runCommand(command)
elif i==5 and (sample + '.bed' not in os.listdir('.') or os.stat(sample + '.bed').st_size ==0):
print(command)
runCommand(command)
elif i == 6 and (sample + '.cds' not in os.listdir('.') or os.stat(sample + '.cds').st_size ==0):
print(command)
runCommand(command)
elif i not in range(2,7):
print(command)
runCommand(command)
i+=1
"""
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands))
#subprocess.call(['nohup', 'sh', 'buildSample.sh'])
runCommand('qsub -P plant-analysis.p -N %s -cwd -l high.c -pe pe_slots 16 -e %s %s' % (
'build'+sample.split('_')[1], 'ErrFile.txt', 'buildSample.sh'))
while True:
if os.path.isfile('finishBuild.txt'):
break
else:
time.sleep(10)
os.chdir(root)
"""try:
runCommand(command)
except:
with open('Error.txt','a') as f:
f.write(command+'\n')"""
"""with open('buildSample.sh','w') as f:
f.write('\n'.join(writeCommands))
try:
subprocess.call(['nohup','sh','buildSample.sh'])
except:
with open('output.txt', 'a') as f:
f.write('Error in %s'%sample)"""
"""writeCommands2 = [binbash, moduleLoads,'gmap_build --dir=. -d %s %s' % (geneNaming,fastaNew),
'gmap --dir=. -d %s -B 5 -A --format=gff3_gene -n 1 -t 8 %s > %s 2> %s' % (
geneNaming, CDSOld, geneNaming + '.gff3', geneNaming + '.log'),
'python %srenameGenes.py %s %s %s' % (root, geneNaming + '.gff3', CDSgeneNaming, geneNaming),
'python -m jcvi.formats.gff bed --type=mRNA --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.bed'),
'python -m jcvi.formats.gff bed --type=CDS --key=Name %s -o %s' % (
geneNaming + '.gff3', sample + '.CDS.bed'),
'bedtools getfasta -name -fi ./%s -bed %s.CDS.bed -fo %s.cds' % (
fastaNew, sample, sample)]
with open('buildSample.sh', 'w') as f:
f.write('\n'.join(writeCommands2))
subprocess.call(['nohup', 'sh', 'buildSample.sh'])"""
try:
os.mkdir('v1')
for folder in listSamplesv0:
os.mkdir('v1/%s'%folder.replace('v0','v1'))
os.mkdir('v1/%s/OldFiles'%folder.replace('v0','v1'))
except:
pass
buildCorrespondence = {folder:folder.replace('v0','v1') for folder in listSamplesv0}
listSamplesv1 = buildCorrespondence.values()
print listSamplesv1
def replaceGeneNames(sample,ref,count=0,nuc=0):
refGeneCount = 0
synmap = '%s.%s.lifted.anchors' % (sample, ref)
if nuc:
nucAdd = 'nuc'
synmap = 'nucMap.bed'
refbed = ref + '_nucSyn.bed'
sampbed = sample + '_nucSyn.bed'
a, b = 1, 0
else:
nucAdd = ''
refbed = ref + '.bed'
sampbed = sample + '.bed'
a, b = 0, 1
sampleProt = sample.split('_')[1]
with open(refbed,'r') as f:
refBedLines = f.readlines()
refBedOut = []
refGenes = defaultdict(list)
for line in refBedLines:
if line:
refGenes[line.split('\t')[3]] = ref+nucAdd+'_'+str(refGeneCount)
refBedOut.append(line.replace(line.split('\t')[3],ref+nucAdd+'_'+str(refGeneCount)))
refGeneCount+=1
#ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed'
#print refGenes
with open(sampbed,'r') as f:
sampBedLines = f.readlines()
sampBedOut = []
sampGenes = defaultdict(list)
for line in sampBedLines:
if line:
sampGenes[line.split('\t')[3]] = sampleProt+nucAdd+'_'+str(count)
sampBedOut.append(line.replace(line.split('\t')[3], sampleProt + nucAdd + '_' + str(count)))
count+=1
with open(synmap,'r') as f:
synRead = f.readlines()
synOut = []
for line in synRead:
if line and '###' not in line:
try:
genes = line.split('\t')
print genes
synOut.append(line.replace(genes[0],refGenes[genes[a]]).replace(genes[1],sampGenes[genes[b]]))
except:
with open('Err.txt','a') as f:
f.write(line+'\n')
"""
if nuc:
print sampBedOut[0:10]
print refBedOut[0:10]
print sampGenes.items()[0:10]
print refGenes.items()[0:10]
print synOut[0:10]
with open('nucMap.bed','r') as f:
print f.readlines()[0:10]
"""
if nuc == 0:
for writeTuple in [(ref+'_syn'+'.bed',refBedOut),(sample+'_%ssyn'%ref+'.bed',sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
else:
for writeTuple in [(refbed,refBedOut),(sampbed,sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
return count
def tiling2bed(tilingFile,ref,sample,sampBed):
with open(tilingFile,'r') as f:
tilingLines = f.read().split('\n')
genesDict = defaultdict(list)
with open(ref+'_nucSyn.bed','w') as f1, open(sample+'_nucSyn.bed','w') as f2:
for line in tilingLines:
if line:
lineList = line.split('\t')
int1 = sorted(map(int,lineList[0:2]))
int1[0] -= 1
int2 = sorted(map(int,lineList[2:4]))
int2[0] -= 1
f1.write('\t'.join([lineList[-2]]+map(str,int1)+['_'.join([lineList[-2]]+map(str,int1)),'0','+']) + '\n')
f2.write('\t'.join([lineList[-1]]+map(str,int2)+['_'.join([lineList[-1]]+map(str,int2)),'0','+']) + '\n')
genesDict['_'.join([lineList[-1]]+map(str,int2))] = '_'.join([lineList[-2]]+map(str,int1))
b = BedTool(sample+'_nucSyn.bed').subtract(BedTool(sampBed),A=True)
#print b.head()
#print genesDict.keys()[0:10]
origGenes = set(genesDict.keys())
#print str(b).split('\n')[0:10]
#print [ line.split('\t')[3] for line in str(b).split('\n') if line][0:10]
remainGenes = set([ line.split('\t')[3] for line in str(b).split('\n') if line])
#print list(remainGenes)[0:10]
BadGenes = list(origGenes - remainGenes)
#print BadGenes[0:10]
#print len(origGenes), len(remainGenes), len(BadGenes)
#exit()
for gene in BadGenes:
try:
del genesDict[gene]
except:
pass
with open('nucMap.bed','w') as f:
f.write('\n'.join('%s\t%s\t100'%item for item in genesDict.items() if item))
fastaNucOld = [fasta for fasta in os.listdir('./referenceGenomes/%s'%CDSspecies) if 'cds' not in fasta.lower() and (fasta.endswith('.fa') or fasta.endswith('.fasta'))][0]
def generatev1(sample):
os.chdir('v0/%s'%sample)
print sample.replace('v0', 'v1')
global binbash, makeTrashFolder, moduleLoads, root, weights, fastaNucOld, CDSspecies
#print weights
print '\n'.join('%s %d'%(key,weights[key]) for key in weights.keys())#weights.keys()#'\n'.join('%s %d'%(key,weights[key]) for key in sorted(weights, key=weights.get, reverse=True).keys())
print 'hi'
"""if __name__ == '__main__':
p = Pool(None)
p.imap(pairwise, [(sample,ref) for ref in weights.keys()])"""
with open('weights.txt','w') as f:
f.write('\n'.join([weights.keys()[0]+' %d'%weights[weights.keys()[0]],'%snuc %d'%(CDSspecies,weights[CDSspecies]-1)]+['%s %d'%(key,weights[key]) for key in weights.keys()[1:]]))
nucCommands = [binbash,moduleLoads]+ ['nucmer -t 6 -p %s %s %s'%(CDSspecies+'nuc',root+'referenceGenomes/%s/'%CDSspecies+fastaNucOld,sample+'.fa'),
'delta-filter -m -q -i 85 -u 50 %snuc.delta > %snuc2.delta'%(CDSspecies,CDSspecies),'show-tiling -a %snuc2.delta > %snuc.tiling'%(CDSspecies,CDSspecies)]
commands1 = [binbash, moduleLoads]+['rm *.anchors *.last *.filtered *.prj']+\
['nohup python -m jcvi.compara.catalog ortholog %s %s\nmv %s %s'%(ref,sample,'%s.%s.lifted.anchors'%(ref,sample),'%s.%s.lifted.anchors'%(sample,ref)) for ref in weights.keys()]
commands2=[binbash, moduleLoads]+['rm multipleMapping.bed','\n'.join('python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %s'%('%s.%s.lifted.anchors'%(sample,ref),ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed','%s.synteny.bed'%(ref)) for ref in weights.keys()),
'python -m jcvi.assembly.syntenypath bed %s --switch --scale=10000 --qbed=%s --sbed=%s -o %snuc.synteny.bed'%('nucMap.bed',CDSspecies+'_nucSyn.bed',sample+'_nucSyn.bed',CDSspecies),
'nohup python -m jcvi.assembly.allmaps mergebed %s -o %s'%(' '.join(['%s.synteny.bed'%(ref) for ref in (weights.keys() + [CDSspecies+'nuc'])]),'multipleMapping.bed')]
qsub=[binbash,moduleLoads]+['python -m jcvi.assembly.allmaps path --skipconcorde --cpus=32 --ngen=300 --npop=50 multipleMapping.bed %s.fa' % (sample),
'mv multipleMapping.fasta %sv1/%s/%s.fa' % (root,sample.replace('v0', 'v1'), sample.replace('v0', 'v1'))]
#'nohup liftOver -gff %s.gff3 multipleMapping.chain %s.gff3 unmapped' % (sample.replace('_',''), sample.replace('_','').replace('v0', 'v1')), ,'mv %s.gff3 ../../v1/%s' % (sample.replace('_','').replace('v0', 'v1'), sample.replace('v0', 'v1'))
#for ref in weights.keys():
# pairwise((sample,ref))
"""if __name__ == '__main__':
q = Queue(maxsize=0)
for command in commands:
q.put(command)
runCommands(q)"""
#print '\n'.join(commands)
with open('nucCommand.sh','w') as f:
f.write('\n'.join(nucCommands))
with open('constructv1_1.sh','w') as f:
f.write('\n'.join(commands1))
with open('constructv1_2.sh','w') as f:
f.write('\n'.join(commands2))
with open('qsub_buildv1.sh','w') as f:
f.write('\n'.join(qsub))
print os.listdir('%s/v1/%s'%(root,sample.replace('v0','v1')))
if '%snuc.tiling'%CDSspecies not in os.listdir('.'):
runCommand('sh nucCommand.sh')
#print ['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]
print all(['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]) == 0
#exit()
if all([os.path.isfile('%s.%s.lifted.anchors' %(sample, ref)) and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]) == 0:
print sample, ['%s.%s.lifted.anchors' %(sample, ref) in os.listdir('.') and os.stat('%s.%s.lifted.anchors' %(sample, ref)).st_size > 0 for ref in weights.keys()]
runCommand('sh constructv1_1.sh')
sampleCount = 0
for ref in weights.keys():
sampleCount = replaceGeneNames(sample, ref, sampleCount)
print 'hello ' + sample, ref
print 'construct_1' + sample + ' done'
try:
tiling2bed('%snuc.tiling'%CDSspecies, CDSspecies, sample, sample+'_%ssyn'%CDSspecies+'.bed')
except:
print sys.exc_info()[0]
#exit()
print 'hi2'
replaceGeneNames(sample,CDSspecies,0,1)
if os.stat('nucMap.bed').st_size == 0:
exit()
print 'hi3'
runCommand('sh constructv1_2.sh')
try:
if os.stat('./multipleMapping.bed').st_size > 0:
runCommand('qsub -P plant-analysis.p -N %s -cwd -l h_rt=50:00:00 -pe pe_slots 32 -e %s %s'%(sample,'ErrFile.txt','qsub_buildv1.sh')) #FIXME pe_slots 16, time limit pe_8
else:
with open('ErrFile.txt','a') as f:
f.write('Multiple Mapping Size 0, unable to build v1...')
except:
with open('ErrFile.txt', 'a') as f:
f.write('Multiple Mapping File does not exist, unable to build v1...')
os.chdir(root)
#for command in commands:
# print command
# runCommand(command)
#FIXME ADD qsub
def formatSamplev0(sample):
global root
commands = ['python %sformatBed.py s %s v0'%(root,sample),'python %sformatCDS.py s %s v0'%(root,sample)]
for command in commands:
runCommand(command)
os.chdir(root)
def formatRef(reference):
global root
commands = ['python %sformatBed.py r %s v0' % (root, reference), 'python %sformatCDS.py r %s v0' % (root, reference)]
for command in commands:
runCommand(command)
os.chdir(root)
sampleDist = [listSamplesv0[x:x+7] for x in xrange(0,len(listSamplesv0),7)]
print sampleDist
def buildSampv0List(samplist):
for sample in samplist:
try:
buildSamplesv0(sample)
except:
print 'Error building ' + sample
def formatv0List(samplist):
for sample in samplist:
try:
formatSamplev0(sample)
except:
print 'Error formatting ' + sample
if __name__ == '__main__':
with open('output.txt', 'a') as f:
f.write('Outv1')
listSamplesv0 = [sample for sample in listSamplesv0 if sample.replace('v0', 'v1') + '.fa' not in os.listdir(
'%sv1/%s' % (root, sample.replace('v0', 'v1')))]
print len(listSamplesv0) // 6 + 1
sampleDist = [listSamplesv0[x:x + len(listSamplesv0) // 6 + 1] for x in
xrange(0, len(listSamplesv0), len(listSamplesv0) // 6 + 1)]
print listSamplesv0
print sampleDist
if ReferenceBuild:
p = Pool(processes=6)
p.map(buildReferences, weights.keys())
p.map(func=formatRef, iterable=weights.keys())
p.close()
p.join()
if sampleBuild:
p = Pool(processes=6)#processes=8
p.map_async(func=buildSampv0List, iterable=sampleDist)
p.map_async(func=formatv0List, iterable=sampleDist)
p.close()
p.join()
#for samplelist in sampleDist:
# p.map(generatev1, samplelist)
#for ref in weights.keys():
# formatRef(ref)
#buildReferences('460')
#formatRef('460')
def reader(q):
while True:
sample = q.get()
try:
generatev1(sample)
except:
print 'Generation Error in ' + sample
with open('Error.txt', 'a') as f:
f.write('Generation Error in ' + sample + '\n')
q.task_done()
def genv1List(samplelist):
for sample in samplelist:
#generatev1(sample)
try:
generatev1(sample)
except:
print 'Error gen v1 in ' + sample
if __name__ == '__main__':
#for samplelist in sampleDist:
#q = Queue(maxsize=0)
#num_threads = 6
#for i in range(num_threads):
# worker = Process(target = reader,args=(q,))
# worker.daemon=True
# worker.start()
listSamplesv0 = [sample for sample in listSamplesv0 if sample.replace('v0','v1') + '.fa' not in os.listdir('%sv1/%s'%(root,sample.replace('v0','v1')))]
print len(listSamplesv0)//6 + 1
sampleDist = [listSamplesv0[x:x + len(listSamplesv0)//6 + 1] for x in xrange(0, len(listSamplesv0), len(listSamplesv0)//6 + 1)]
p = Pool()
p.map_async(genv1List,sampleDist)
#for sample in samplelist:
# p.map(generatev1,args=(sample,))
p.close()
p.join()
#for sample in samplelist:
# q.put(sample)
#q.join()
"""try:
generatev1(sample)
break
except:
print 'Generation Error in '+ sample
with open('Error.txt','a') as f:
f.write('Generation Error in '+ sample + '\n')
break
"""
"""'gffread -E %s -o- > %s' % (geneNaming + '.gff3', sample + '.cufflinks.gff'),
'python %sgff2CDSBed.py %s.cufflinks.gff' % (root, sample),
'gffread -E %s -o- > %s' % (geneNaming + '.gff3', sample + '.cufflinks.gff'),
'gffread -E %s -o- > %s'%([file for file in os.listdir('.') if 'cufflinks' not in file and (file.endswith('.gff3') or file.endswith('.gff'))][0],reference+'.cufflinks.gff'),
'gffread %s -x %s -g %s'%(reference+'.cufflinks.gff',reference+'.cds',fastaOld),
'python %sgff2CDSBed.py %s.cufflinks.gff'%(root,sample),
'bedtools getfasta -name -fi ./%s -bed %s.cufflinks.CDS.bed -fo %s.cds'%(fastaNew,sample,sample), """
```
#### File: JoshuaTree2/scaffolding_tool_bin/pipelineFunctions.py
```python
from pybedtools import BedTool
from collections import defaultdict, OrderedDict
def parseConfigFindList(stringFind,configFile):
"""parseConfigFindList inputs a particular string to find and read file after and a configuration file object
outputs list of relevant filenames"""
read = 0
listOfItems = []
for line in configFile:
if line:
if read == 1:
if 'Stop' in line:
configFile.seek(0)
break # exit the function and return the list of files or list information
listOfItems.append(line.strip('\n'))
if stringFind in line:
read = 1 # if find string specified, begin reading lines
configFile.seek(0)
return listOfItems
def parseConfigFindPath(stringFind,configFile):
"""findPath will find path or value of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or specific value trying to find
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
def replaceGeneNames(sample,ref,count=0,nuc=0,BB=0):
refGeneCount = 0
if nuc:
nucAdd = 'nuc'
synmap = 'nucMap.bed'
refbed = ref + '_nucSyn.bed'
sampbed = sample + '_nucSyn.bed'
a, b = 1, 0
elif BB:
nucAdd = 'BB'
synmap = 'BBMap.bed'
refbed = ref + '_BBSyn.bed'
sampbed = sample + '_BBSyn.bed'
a, b = 1, 0
else:
synmap = '%s.%s.lifted.anchors' % (sample, ref)
nucAdd = ''
refbed = ref + '.bed'
sampbed = sample + '.bed'
a, b = 0, 1
sampleProt = sample.split('_')[1]
with open(refbed,'r') as f:
refBedLines = f.readlines()
refBedOut = []
refGenes = defaultdict(list)
for line in refBedLines:
if line:
refGenes[line.split('\t')[3]] = ref+nucAdd+'_'+str(refGeneCount)
refBedOut.append(line.replace(line.split('\t')[3],ref+nucAdd+'_'+str(refGeneCount)))
refGeneCount+=1
#ref+'_syn'+'.bed',sample+'_%ssyn'%ref+'.bed'
#print refGenes
with open(sampbed,'r') as f:
sampBedLines = f.readlines()
sampBedOut = []
sampGenes = defaultdict(list)
for line in sampBedLines:
if line:
sampGenes[line.split('\t')[3]] = sampleProt+nucAdd+'_'+str(count)
sampBedOut.append(line.replace(line.split('\t')[3], sampleProt + nucAdd + '_' + str(count)))
count+=1
with open(synmap,'r') as f:
synRead = f.readlines()
synOut = []
for line in synRead:
if line and '###' not in line:
try:
genes = line.split('\t')
print genes
synOut.append(line.replace(genes[0],refGenes[genes[a]]).replace(genes[1],sampGenes[genes[b]]))
except:
with open('Err.txt','a') as f:
f.write(line+'\n')
"""
if nuc:
print sampBedOut[0:10]
print refBedOut[0:10]
print sampGenes.items()[0:10]
print refGenes.items()[0:10]
print synOut[0:10]
with open('nucMap.bed','r') as f:
print f.readlines()[0:10]
"""
if nuc == 0 and BB == 0:
for writeTuple in [(ref+'_syn'+'.bed',refBedOut),(sample+'_%ssyn'%ref+'.bed',sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
else:
for writeTuple in [(refbed,refBedOut),(sampbed,sampBedOut),(synmap,synOut)]:
with open(writeTuple[0],'w') as f:
f.writelines(writeTuple[1])
return count
def tiling2bed(tilingFile,ref,sample,sampBed):
with open(tilingFile,'r') as f:
tilingLines = f.read().split('\n')
genesDict = defaultdict(list)
with open(ref+'_nucSyn.bed','w') as f1, open(sample+'_nucSyn.bed','w') as f2:
for line in tilingLines:
if line:
lineList = line.split('\t')
int1 = sorted(map(int,lineList[0:2]))
int1[0] -= 1
int2 = sorted(map(int,lineList[2:4]))
int2[0] -= 1
f1.write('\t'.join([lineList[-2]]+map(str,int1)+['_'.join([lineList[-2]]+map(str,int1)),'0','+']) + '\n')
f2.write('\t'.join([lineList[-1]]+map(str,int2)+['_'.join([lineList[-1]]+map(str,int2)),'0','+']) + '\n')
genesDict['_'.join([lineList[-1]]+map(str,int2))] = '_'.join([lineList[-2]]+map(str,int1))
b = BedTool(sample+'_nucSyn.bed').subtract(BedTool(sampBed),A=True)
#print b.head()
#print genesDict.keys()[0:10]
origGenes = set(genesDict.keys())
#print str(b).split('\n')[0:10]
#print [ line.split('\t')[3] for line in str(b).split('\n') if line][0:10]
remainGenes = set([ line.split('\t')[3] for line in str(b).split('\n') if line])
#print list(remainGenes)[0:10]
BadGenes = list(origGenes - remainGenes)
#print BadGenes[0:10]
#print len(origGenes), len(remainGenes), len(BadGenes)
#exit()
for gene in BadGenes:
try:
del genesDict[gene]
except:
pass
with open('nucMap.bed','w') as f:
f.write('\n'.join('%s\t%s\t100'%item for item in genesDict.items() if item))
def BB2bed(BBfile,ref,sample,centromereBed):
with open(BBfile,'r') as f:
BBLines = f.read().split('\n')
genesDict = defaultdict(list)
with open(ref+'_BBSyn.bed','w') as f1, open(sample+'_BBSyn.bed','w') as f2:
for line in BBLines:
if line:
lineList = line.split('\t')
refChr = lineList[0]
int1 = sorted(map(int,lineList[1:3]))
#int1[0] -= 1
if '_part_' in lineList[3]:
sampChr,part = tuple(lineList[3].split('_part_'))
int2 = [(int(part)-1)*600]#300]
int2.append(int2[0] + 600)#300)
else:
sampChr = lineList[3]
int2 = [0,600]#300]
#int2[0] -= 1
f1.write('\t'.join([refChr]+map(str,int1)+['_'.join([refChr]+map(str,int1)),'0','+']) + '\n')
f2.write('\t'.join([sampChr]+map(str,int2)+['_'.join([sampChr]+map(str,int2)),'0','+']) + '\n')
genesDict['_'.join([refChr]+map(str,int1))] = '_'.join([sampChr]+map(str,int2))
origGenes = set(genesDict.keys())
centromere = BedTool(ref+'_BBSyn.bed').intersect(centromereBed,wa=True)
nonCentromere = BedTool(ref+'_BBSyn.bed').subtract(centromereBed,A=True)
remainGenes = set([line.split('\t')[3] for line in str(centromere).split('\n')[::2] if line] + [line.split('\t')[3] for line in str(nonCentromere).split('\n')[::10] if line])
BadGenes = list(origGenes - remainGenes)
for gene in BadGenes:
try:
del genesDict[gene]
except:
pass
with open('BBMap.bed','w') as f:
f.write('\n'.join('%s\t%s\t100'%item[::-1] for item in genesDict.items() if item))
def filterBB(BBbed):
with open(BBbed,'r') as f:
change = 1
#FIXME one to three per gene/scaffold
```
#### File: jlevy44/JoshuaTree2/ScaffoldingTool.py
```python
import click, os, sys, subprocess
#######################
#### RUN CLI GROUP ####
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.01')
def scaffolder():
pass
############################## # fixme just throw this into another python script dedicated to scaffolding pipeline, not worth having in main joshuaTree code. It looks really bad, play it off as old script
#### SCAFFOLD VIA SYNTENY ####
@scaffolder.command()
@click.option('-i', '--scaffolding_inputs_dir', default = './scaffolding_inputs', show_default=True, help='Path containing fasta file one.', type=click.Path(exists=False))
@click.option('-o', '--scaffolding_outputs_dir', default = './scaffolding_outputs', show_default=True, help='Path containing fasta file two.', type=click.Path(exists=False))
@click.option('-n', '--new_genome_name', default = 'Bdist', show_default=True, help='New genome name.', type=click.Path(exists=False))
@click.option('-w', '--weights_file', default = './weights.txt', show_default=True, help='Weights file.', type=click.Path(exists=False))
@click.option('-p', '--primary_proteome_id', default = 314, show_default=True, help='Primary proteome id.', type=click.Path(exists=False))
def scaffold_assemblies(scaffolding_inputs_dir,scaffolding_outputs_dir, new_genome_name, weights_file, primary_proteome_id):
"""Scaffold assemblies based on synteny to references."""
cwd = os.getcwd()
scaffolding_bin = os.path.abspath('scaffolding_tool_bin')+'/'
scaffolding_inputs_dir = os.path.abspath(scaffolding_inputs_dir)
scaffolding_outputs_dir = os.path.abspath(scaffolding_outputs_dir)
query_dir = scaffolding_inputs_dir+'/query/'
references_dir = scaffolding_inputs_dir+'/references/'
subprocess.call('python %s/renameGenomes.py %s %s'%(scaffolding_bin,new_genome_name, query_dir),shell=True)
subprocess.call('cp %s/pipelineFunctions.py %s'%(scaffolding_bin,query_dir))
# add build references and weights file
os.chdir(query_dir)
subprocess.call('python %s/scaffoldPipeline.sh --write_sh 1 --version v0 --cds %s '
'--gene_name_old %s --build_sample 1 --bb 0 --nuc 0 --com1_2 1 --allmaps 1 --reference_genomes_dir %s'
'--output_dir %s --query_genome_dir %s --bin %s --weights_file %s'%(scaffolding_bin,primary_proteome_id, new_genome_name, references_dir,
scaffolding_outputs_dir, query_dir, scaffolding_bin, os.path.abspath(weights_file)), shell=True) # fixme write nextflow config
os.chdir(cwd)
"""writeSh = params.write_sh.asType(Integer);
buildRef = params.build_ref.asType(Integer);
version = params.version;
CDS = params.cds;
CDSFasta = params.cds_fasta;
geneNameOld = params.gene_name_old;
buildSamp = params.build_sample.asType(Integer);
BB = params.bb.asType(Integer);
nuc = params.nucmer.asType(Integer);
com1_2 = params.com1_2.asType(Integer);
allmaps = params.allmaps.asType(Integer);
nextVersion = 'v' + ((version - 'v').asType(Integer)+1).asType(String);
chanBuildSamples = Channel.fromPath(version + '/*'+version,type: 'dir', relative: true)
workingDir = new File('').getAbsolutePath()
reference_genome_dir = params.reference_genomes_dir
query_genome_dir = params.query_genome_dir
output_dir = params.output_dir
bin = params.bin
""" # fixme add reference and output directories to scaffold pipeline, dockerize scaffold pipeline, make docker images
#### RUN CLI ####
if __name__ == '__main__':
scaffolder()
``` |
{
"source": "jlevy44/PathFlowAI",
"score": 2
} |
#### File: lib/pathflowai/model_training.py
```python
import torch, os, numpy as np, pandas as pd
from pathflowai.utils import *
#from large_data_utils import *
from pathflowai.datasets import *
from pathflowai.models import *
from pathflowai.schedulers import *
from pathflowai.visualize import *
import copy
from pathflowai.sampler import ImbalancedDatasetSampler
import argparse
import sqlite3
#from nonechucks import SafeDataLoader as DataLoader
from torch.utils.data import DataLoader
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.1')
def train():
pass
def train_model_(training_opts):
"""Function to train, predict on model.
Parameters
----------
training_opts : dict
Training options populated from command line.
"""
dataset_df = pd.read_csv(training_opts['dataset_df']) if os.path.exists(training_opts['dataset_df']) else create_train_val_test(training_opts['train_val_test_splits'],training_opts['patch_info_file'],training_opts['patch_size'])
dataset_opts=dict(dataset_df=dataset_df, set='pass', patch_info_file=training_opts['patch_info_file'], input_dir=training_opts['input_dir'], target_names=training_opts['target_names'], pos_annotation_class=training_opts['pos_annotation_class'], segmentation=training_opts['segmentation'], patch_size=training_opts['patch_size'], fix_names=training_opts['fix_names'], other_annotations=training_opts['other_annotations'], target_segmentation_class=training_opts['target_segmentation_class'][0] if set=='train' else -1, target_threshold=training_opts['target_threshold'][0], oversampling_factor=training_opts['oversampling_factor'][0] if set=='train' else 1, n_segmentation_classes=training_opts['num_targets'],gdl=training_opts['loss_fn']=='gdl',mt_bce=training_opts['mt_bce'], classify_annotations=training_opts['classify_annotations'])
norm_dict = get_normalizer(training_opts['normalization_file'], dataset_opts)
transform_opts=dict(patch_size = training_opts['patch_resize'], mean=norm_dict['mean'], std=norm_dict['std'], resize=True, transform_platform=training_opts['transform_platform'] if not training_opts['segmentation'] else 'albumentations')
transformers = get_data_transforms(**transform_opts)
datasets= {set: DynamicImageDataset(dataset_df, set, training_opts['patch_info_file'], transformers, training_opts['input_dir'], training_opts['target_names'], training_opts['pos_annotation_class'], segmentation=training_opts['segmentation'], patch_size=training_opts['patch_size'], fix_names=training_opts['fix_names'], other_annotations=training_opts['other_annotations'], target_segmentation_class=training_opts['target_segmentation_class'][0] if set=='train' else -1, target_threshold=training_opts['target_threshold'][0], oversampling_factor=training_opts['oversampling_factor'][0] if set=='train' else 1, n_segmentation_classes=training_opts['num_targets'],gdl=training_opts['loss_fn']=='gdl',mt_bce=training_opts['mt_bce'], classify_annotations=training_opts['classify_annotations']) for set in ['train','val','test']}
# nc.SafeDataset(
print(datasets['train'])
if len(training_opts['target_segmentation_class']) > 1:
from functools import reduce
for i in range(1,len(training_opts['target_segmentation_class'])):
#print(training_opts['classify_annotations'])
datasets['train'].concat(DynamicImageDataset(dataset_df, 'train', training_opts['patch_info_file'], transformers, training_opts['input_dir'], training_opts['target_names'], training_opts['pos_annotation_class'], segmentation=training_opts['segmentation'], patch_size=training_opts['patch_size'], fix_names=training_opts['fix_names'], other_annotations=training_opts['other_annotations'], target_segmentation_class=training_opts['target_segmentation_class'][i], target_threshold=training_opts['target_threshold'][i], oversampling_factor=training_opts['oversampling_factor'][i],n_segmentation_classes=training_opts['num_targets'],gdl=training_opts['loss_fn']=='gdl',mt_bce=training_opts['mt_bce'],classify_annotations=training_opts['classify_annotations']))
#datasets['train']=reduce(lambda x,y: x.concat(y),[DynamicImageDataset(dataset_df, 'train', training_opts['patch_info_file'], transformers, training_opts['input_dir'], training_opts['target_names'], training_opts['pos_annotation_class'], segmentation=training_opts['segmentation'], patch_size=training_opts['patch_size'], fix_names=training_opts['fix_names'], other_annotations=training_opts['other_annotations'], target_segmentation_class=training_opts['target_segmentation_class'][i], target_threshold=training_opts['target_threshold'][i], oversampling_factor=training_opts['oversampling_factor'][i]) for i in range(len(training_opts['target_segmentation_class']))])
print(datasets['train'])
if training_opts['supplement']:
old_train_set = copy.deepcopy(datasets['train'])
datasets['train']=DynamicImageDataset(dataset_df, 'train', training_opts['patch_info_file'], transformers, training_opts['input_dir'], training_opts['target_names'], training_opts['pos_annotation_class'], segmentation=training_opts['segmentation'], patch_size=training_opts['patch_size'], fix_names=training_opts['fix_names'], other_annotations=training_opts['other_annotations'], target_segmentation_class=-1, target_threshold=training_opts['target_threshold'], oversampling_factor=1,n_segmentation_classes=training_opts['num_targets'],gdl=training_opts['loss_fn']=='gdl',mt_bce=training_opts['mt_bce'],classify_annotations=training_opts['classify_annotations'])
datasets['train'].concat(old_train_set)
if training_opts['subsample_p']<1.0:
datasets['train'].subsample(training_opts['subsample_p'])
if training_opts['subsample_p_val']<1.0:
if training_opts['subsample_p_val']==-1.:
training_opts['subsample_p_val']=training_opts['subsample_p']
if training_opts['subsample_p_val']<1.0:
datasets['val'].subsample(training_opts['subsample_p_val'])
if training_opts['num_training_images_epoch']>0:
num_train_batches = min(training_opts['num_training_images_epoch'],len(datasets['train']))//training_opts['batch_size']
else:
num_train_batches = None
if training_opts['classify_annotations']:
binarizer=datasets['train'].binarize_annotations(num_targets=training_opts['num_targets'],binary_threshold=training_opts['binary_threshold'])
datasets['val'].binarize_annotations(num_targets=training_opts['num_targets'],binary_threshold=training_opts['binary_threshold'])
datasets['test'].binarize_annotations(num_targets=training_opts['num_targets'],binary_threshold=training_opts['binary_threshold'])
training_opts['num_targets']=len(datasets['train'].targets)
for Set in ['train','val','test']:
print(datasets[Set].patch_info.iloc[:,6:].sum(axis=0))
if training_opts['external_test_db'] and training_opts['external_test_dir']:
datasets['test'].update_dataset(input_dir=training_opts['external_test_dir'],new_db=training_opts['external_test_db'])
dataloaders={set: DataLoader(datasets[set], batch_size=training_opts['batch_size'], shuffle=False if (not training_opts['segmentation']) else (set=='train'), num_workers=10, sampler=ImbalancedDatasetSampler(datasets[set]) if (training_opts['imbalanced_correction'] and set=='train' and not training_opts['segmentation']) else None) for set in ['train', 'val', 'test']}
#print(dataloaders) # FIXME VAL SEEMS TO BE MISSING DURING PREDICTION
model = generate_model(pretrain=training_opts['pretrain'],architecture=training_opts['architecture'],num_classes=training_opts['num_targets'], add_sigmoid=False, n_hidden=training_opts['n_hidden'], segmentation=training_opts['segmentation'])
if os.path.exists(training_opts['pretrained_save_location']):
model_dict = torch.load(training_opts['pretrained_save_location'])
keys=list(model_dict.keys())
if not training_opts['segmentation']:
model_dict.update(dict(list(model.state_dict().items())[-2:]))#={k:model_dict[k] for k in keys[:-2]}
model.load_state_dict(model_dict) # this will likely break after pretraining?
if torch.cuda.is_available():
model.cuda()
if 0 and training_opts['run_test']:
for X,y in dataloaders['train']:
np.save('test_predictions.npy',model(X.cuda() if torch.cuda.is_available() else X).detach().cpu().numpy())
exit()
model_trainer_opts=dict(model=model,
n_epoch=training_opts['n_epoch'],
validation_dataloader=dataloaders['val'],
optimizer_opts=dict(name=training_opts['optimizer'],
lr=training_opts['lr'],
weight_decay=training_opts['wd']),
scheduler_opts=dict(scheduler=training_opts['scheduler_type'],
lr_scheduler_decay=0.5,
T_max=training_opts['T_max'],
eta_min=training_opts['eta_min'],
T_mult=training_opts['T_mult']),
loss_fn=training_opts['loss_fn'],
num_train_batches=num_train_batches)
if not training_opts['predict']:
trainer = ModelTrainer(**model_trainer_opts)
if training_opts['imbalanced_correction2']:
trainer.add_class_balance_loss(datasets['train'])
if training_opts['adopt_training_loss']:
trainer.val_loss_fn = trainer.loss_fn
trainer.fit(dataloaders['train'], verbose=True, print_every=1, plot_training_curves=True, plot_save_file=training_opts['training_curve'], print_val_confusion=training_opts['print_val_confusion'], save_val_predictions=training_opts['save_val_predictions'])
torch.save(trainer.model.state_dict(),training_opts['save_location'])
else:
model_dict = torch.load(training_opts['save_location'])
model.load_state_dict(model_dict)
if training_opts['extract_model']:
dataset_opts.update(dict(target_segmentation_class=-1, target_threshold=training_opts['target_threshold'][0] if len(training_opts['target_threshold']) else 0., set='test', binary_threshold=training_opts['binary_threshold'], num_targets=training_opts['num_targets'], oversampling_factor=1))
torch.save(dict(model=model,dataset_opts=dataset_opts, transform_opts=transform_opts),'{}.{}'.format(training_opts['save_location'],'extracted_model.pkl'))
exit()
trainer = ModelTrainer(**model_trainer_opts)
if training_opts['segmentation']:
for ID, dataset in datasets['test'].split_by_ID():
dataloader = DataLoader(dataset, batch_size=training_opts['batch_size'], shuffle=False, num_workers=10)
if training_opts['run_test']:
for X,y in dataloader:
np.save('test_predictions.npy',model(X.cuda() if torch.cuda.is_available() else X).detach().cpu().numpy())
exit()
y_pred = trainer.predict(dataloader)
print(ID,y_pred.shape)
segmentation_predictions2npy(y_pred, dataset.patch_info, dataset.segmentation_maps[ID], npy_output='{}/{}_predict.npy'.format(training_opts['prediction_output_dir'],ID), original_patch_size=training_opts['patch_size'], resized_patch_size=training_opts['patch_resize'])
else:
extract_embedding=training_opts['extract_embedding']
if extract_embedding:
trainer.model.fc = trainer.model.fc[0]
trainer.bce=False
y_pred = trainer.predict(dataloaders['test'])
patch_info = dataloaders['test'].dataset.patch_info
if extract_embedding:
patch_info['name']=patch_info.astype(str).apply(lambda x: '\n'.join(['{}:{}'.format(k,v) for k,v in x.to_dict().items()]),axis=1)#.apply(','.join,axis=1)
embeddings=pd.DataFrame(y_pred,index=patch_info['name'])
embeddings['ID']=patch_info['ID'].values
torch.save(dict(embeddings=embeddings,patch_info=patch_info),join(training_opts['prediction_output_dir'],'embeddings.pkl'))
else:
if len(y_pred.shape)>1 and y_pred.shape[1]>1:
annotations = np.vectorize(lambda x: x+'_pred')(np.arange(y_pred.shape[1]).astype(str)).tolist() # [training_opts['pos_annotation_class']]+training_opts['other_annotations']] if training_opts['classify_annotations'] else
for i in range(y_pred.shape[1]):
patch_info.loc[:,annotations[i]]=y_pred[:,i]
patch_info['y_pred']=y_pred if (training_opts['num_targets']==1 or not (training_opts['classify_annotations'] or training_opts['mt_bce'])) else y_pred.argmax(axis=1)
conn = sqlite3.connect(training_opts['prediction_save_path'])
patch_info.to_sql(str(training_opts['patch_size']),con=conn, if_exists='replace')
conn.close()
@train.command()
@click.option('-s', '--segmentation', is_flag=True, help='Segmentation task.', show_default=True)
@click.option('-p', '--prediction', is_flag=True, help='Predict on model.', show_default=True)
@click.option('-pa', '--pos_annotation_class', default='', help='Annotation Class from which to apply positive labels.', type=click.Path(exists=False), show_default=True)
@click.option('-oa', '--other_annotations', default=[], multiple=True, help='Annotations in image.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--save_location', default='', help='Model Save Location, append with pickle .pkl.', type=click.Path(exists=False), show_default=True)
@click.option('-pt', '--pretrained_save_location', default='', help='Model Save Location, append with pickle .pkl, pretrained by previous analysis to be finetuned.', type=click.Path(exists=False), show_default=True)
@click.option('-i', '--input_dir', default='', help='Input directory containing slides and everything.', type=click.Path(exists=False), show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
@click.option('-pr', '--patch_resize', default=224, help='Patch resized.', show_default=True)
@click.option('-tg', '--target_names', default=[], multiple=True, help='Targets.', type=click.Path(exists=False), show_default=True)
@click.option('-df', '--dataset_df', default='', help='CSV file with train/val/test and target info.', type=click.Path(exists=False), show_default=True)
@click.option('-fn', '--fix_names', is_flag=True, help='Whether to fix names in dataset_df.', show_default=True)
@click.option('-a', '--architecture', default='alexnet', help='Neural Network Architecture.', type=click.Choice(['alexnet', 'densenet121', 'densenet161', 'densenet169', 'densenet201',
'inception_v3', 'resnet101', 'resnet152', 'resnet18', 'resnet34', 'resnet50', 'vgg11', 'vgg11_bn','unet','unet2','nested_unet','fast_scnn',
'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', 'deeplabv3_resnet101','deeplabv3_resnet50','fcn_resnet101', 'fcn_resnet50']+['efficientnet-b{}'.format(i) for i in range(8)]), show_default=True)
@click.option('-imb', '--imbalanced_correction', is_flag=True, help='Attempt to correct for imbalanced data.', show_default=True)
@click.option('-imb2', '--imbalanced_correction2', is_flag=True, help='Attempt to correct for imbalanced data.', show_default=True)
@click.option('-ca', '--classify_annotations', is_flag=True, help='Classify annotations.', show_default=True)
@click.option('-nt', '--num_targets', default=1, help='Number of targets.', show_default=True)
@click.option('-ss', '--subsample_p', default=1.0, help='Subsample training set.', show_default=True)
@click.option('-ssv', '--subsample_p_val', default=-1., help='Subsample val set. If not set, defaults to that of training set', show_default=True)
@click.option('-t', '--num_training_images_epoch', default=-1, help='Number of training images per epoch. -1 means use all training images each epoch.', show_default=True)
@click.option('-lr', '--learning_rate', default=1e-2, help='Learning rate.', show_default=True)
@click.option('-tp', '--transform_platform', default='torch', help='Transform platform for nonsegmentation tasks.', type=click.Choice(['torch','albumentations']))
@click.option('-ne', '--n_epoch', default=10, help='Number of epochs.', show_default=True)
@click.option('-pi', '--patch_info_file', default='patch_info.db', help='Patch info file.', type=click.Path(exists=False), show_default=True)
@click.option('-tc', '--target_segmentation_class', default=[-1], multiple=True, help='Segmentation Class to finetune on.', show_default=True)
@click.option('-tt', '--target_threshold', default=[0.], multiple=True, help='Threshold to include target for segmentation if saving one class.', show_default=True)
@click.option('-ov', '--oversampling_factor', default=[1.], multiple=True, help='How much to oversample training set.', show_default=True)
@click.option('-sup', '--supplement', is_flag=True, help='Use the thresholding to supplement the original training set.', show_default=True)
@click.option('-bs', '--batch_size', default=10, help='Batch size.', show_default=True)
@click.option('-rt', '--run_test', is_flag=True, help='Output predictions for a batch to "test_predictions.npy". Use for debugging.', show_default=True)
@click.option('-mtb', '--mt_bce', is_flag=True, help='Run multi-target bce predictions on the annotations.', show_default=True)
@click.option('-po', '--prediction_output_dir', default='predictions', help='Where to output segmentation predictions.', type=click.Path(exists=False), show_default=True)
@click.option('-ee', '--extract_embedding', is_flag=True, help='Extract embeddings.', show_default=True)
@click.option('-em', '--extract_model', is_flag=True, help='Save entire torch model.', show_default=True)
@click.option('-bt', '--binary_threshold', default=0., help='If running binary classification on annotations, dichotomize selected annotation as such.', show_default=True)
@click.option('-prt', '--pretrain', is_flag=True, help='Pretrain on ImageNet.', show_default=True)
@click.option('-olf', '--overwrite_loss_fn', default='', help='Overwrite the default training loss functions with loss of choice.', type=click.Choice(['','bce','mse','focal','dice','gdl','ce']), show_default=True)
@click.option('-atl', '--adopt_training_loss', is_flag=True, help='Adopt training loss function for validation calculation.', show_default=True)
@click.option('-tdb', '--external_test_db', default='', help='External database of samples to test on.', type=click.Path(exists=False), show_default=True)
@click.option('-tdir', '--external_test_dir', default='', help='External directory of samples to test on.', type=click.Path(exists=False), show_default=True)
def train_model(segmentation,prediction,pos_annotation_class,other_annotations,save_location,pretrained_save_location,input_dir,patch_size,patch_resize,target_names,dataset_df,fix_names, architecture, imbalanced_correction, imbalanced_correction2, classify_annotations, num_targets, subsample_p,subsample_p_val,num_training_images_epoch, learning_rate, transform_platform, n_epoch, patch_info_file, target_segmentation_class, target_threshold, oversampling_factor, supplement, batch_size, run_test, mt_bce, prediction_output_dir, extract_embedding, extract_model, binary_threshold, pretrain, overwrite_loss_fn, adopt_training_loss, external_test_db,external_test_dir):
"""Train and predict using model for regression and classification tasks."""
# add separate pretrain ability on separating cell types, then transfer learn
# add pretrain and efficient net, pretraining remove last layer while loading state dict
target_segmentation_class=list(map(int,target_segmentation_class))
target_threshold=list(map(float,target_threshold))
oversampling_factor=[(int(x) if float(x)>=1 else float(x)) for x in oversampling_factor]
other_annotations=list(other_annotations)
command_opts = dict(segmentation=segmentation,
prediction=prediction,
pos_annotation_class=pos_annotation_class,
other_annotations=other_annotations,
save_location=save_location,
pretrained_save_location=pretrained_save_location,
input_dir=input_dir,
patch_size=patch_size,
target_names=target_names,
dataset_df=dataset_df,
fix_names=fix_names,
architecture=architecture,
patch_resize=patch_resize,
imbalanced_correction=imbalanced_correction,
imbalanced_correction2=imbalanced_correction2,
classify_annotations=classify_annotations,
num_targets=num_targets,
subsample_p=subsample_p,
num_training_images_epoch=num_training_images_epoch,
lr=learning_rate,
transform_platform=transform_platform,
n_epoch=n_epoch,
patch_info_file=patch_info_file,
target_segmentation_class=target_segmentation_class,
target_threshold=target_threshold,
oversampling_factor=oversampling_factor,
supplement=supplement,
predict=prediction,
batch_size=batch_size,
run_test=run_test,
mt_bce=mt_bce,
prediction_output_dir=prediction_output_dir,
extract_embedding=extract_embedding,
extract_model=extract_model,
binary_threshold=binary_threshold,
subsample_p_val=subsample_p_val,
wd=1e-3,
scheduler_type='warm_restarts',
T_max=10,
T_mult=2,
eta_min=5e-8,
optimizer='adam',
n_hidden=100,
pretrain=pretrain,
training_curve='training_curve.png',
adopt_training_loss=adopt_training_loss,
external_test_db=external_test_db,
external_test_dir=external_test_dir)
training_opts = dict(normalization_file="normalization_parameters.pkl",
loss_fn='bce',
print_val_confusion=True,
save_val_predictions=True,
prediction_save_path = 'predictions.db',
train_val_test_splits='train_val_test.pkl'
)
segmentation_training_opts = copy.deepcopy(training_opts)
segmentation_training_opts.update(dict(loss_fn='dice',#gdl dice+ce
normalization_file='normalization_segmentation.pkl',
fix_names=False,
save_val_predictions=True,
))
if segmentation:
training_opts = segmentation_training_opts
for k in command_opts:
training_opts[k] = command_opts[k]
if classify_annotations:
if training_opts['num_targets']==1:
training_opts['loss_fn']='bce'
else:
training_opts['loss_fn']='ce'
if mt_bce:
training_opts['loss_fn']='bce'
if overwrite_loss_fn:
training_opts['loss_fn']=overwrite_loss_fn
train_model_(training_opts)
if __name__=='__main__':
train()
```
#### File: lib/pathflowai/monitor_memory_usage.py
```python
import GPUtil
import psutil
import time
from threading import Thread
import pandas as pd
import argparse
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.1')
def monitor():
pass
class Monitor(Thread):
def __init__(self, start_time, delay, end_time):
super(Monitor, self).__init__()
self.stopped = False
self.start_time = start_time
self.end_time = end_time
self.delay = delay # Time between calls to GPUtil
self.records = []
self.start()
def run(self):
time_from_start = 0.
while time_from_start <= self.end_time:
memory = psutil.virtual_memory()
stats = {"gpu.{}.memory.used".format(gpu.id):gpu.memoryUsed for gpu in GPUtil.getGPUs()}
stats['cpu.utilization'] = psutil.cpu_percent()
current_time = time.time()
stats['current.time'] = current_time
time_from_start = current_time - self.start_time
stats['system.memory.used'] = memory.used
stats['system.memory.used.percent'] = memory.percent
stats['elapsed.time'] = time_from_start
self.records.append(stats)
time.sleep(self.delay)
self.stop()
def stop(self):
self.stopped = True
def return_records(self):
return pd.DataFrame(self.records)
def get_usage(total_time, delay_time, records_output_csv):
start_time = time.time()
monitor = Monitor(start_time, delay_time, total_time)
monitor.run()
while not monitor.stopped:
time.sleep(delay_time)
records = monitor.return_records()
records.to_csv(records_output_csv)
@monitor.command()
@click.option('-csv', '--records_output_csv', default='records.csv', help='Where to store records.', type=click.Path(exists=False), show_default=True)
@click.option('-tt', '--total_time', default=1., help='Total time to monitor for in minutes.', show_default=True)
@click.option('-dt', '--delay_time', default=1., help='Time between samples, in seconds.', show_default=True)
def monitor_usage(records_output_csv,total_time,delay_time):
"""Monitor Usage over Time Interval."""
total_time*= 60. # convert to seconds
get_usage(total_time, delay_time, records_output_csv)
if __name__ == '__main__':
monitor()
```
#### File: lib/pathflowai/visualize.py
```python
import plotly.graph_objs as go
import plotly.offline as py
import pandas as pd, numpy as np
import networkx as nx
import dask.array as da
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import sqlite3
import seaborn as sns
from os.path import join
sns.set()
class PlotlyPlot:
"""Creates plotly html plots."""
def __init__(self):
self.plots=[]
def add_plot(self, t_data_df, G=None, color_col='color', name_col='name', xyz_cols=['x','y','z'], size=2, opacity=1.0, custom_colors=[]):
"""Adds plotting data to be plotted.
Parameters
----------
t_data_df:dataframe
3-D transformed dataframe.
G:nx.Graph
Networkx graph.
color_col:str
Column to use to color points.
name_col:str
Column to use to name points.
xyz_cols:list
3 columns that denote x,y,z coords.
size:int
Marker size.
opacity:float
Marker opacity.
custom_colors:list
Custom colors to supply.
"""
plots = []
x,y,z=tuple(xyz_cols)
if t_data_df[color_col].dtype == np.float64:
plots.append(
go.Scatter3d(x=t_data_df[x], y=t_data_df[y],
z=t_data_df[z],
name='', mode='markers',
marker=dict(color=t_data_df[color_col], size=size, opacity=opacity, colorscale='Viridis',
colorbar=dict(title='Colorbar')), text=t_data_df[color_col] if name_col not in list(t_data_df) else t_data_df[name_col]))
else:
colors = t_data_df[color_col].unique()
c = sns.color_palette('hls', len(colors))
c = np.array(['rgb({})'.format(','.join(((np.array(c_i)*255).astype(int).astype(str).tolist()))) for c_i in c])#c = ['hsl(' + str(h) + ',50%' + ',50%)' for h in np.linspace(0, 360, len(colors) + 2)]
if custom_colors:
c = custom_colors
color_dict = {name: c[i] for i,name in enumerate(sorted(colors))}
for name,col in color_dict.items():
plots.append(
go.Scatter3d(x=t_data_df[x][t_data_df[color_col]==name], y=t_data_df[y][t_data_df[color_col]==name],
z=t_data_df[z][t_data_df[color_col]==name],
name=str(name), mode='markers',
marker=dict(color=col, size=size, opacity=opacity), text=t_data_df.index[t_data_df[color_col]==name] if 'name' not in list(t_data_df) else t_data_df[name_col][t_data_df[color_col]==name]))
if G is not None:
#pos = nx.spring_layout(G,dim=3,iterations=0,pos={i: tuple(t_data.loc[i,['x','y','z']]) for i in range(len(t_data))})
Xed, Yed, Zed = [], [], []
for edge in G.edges():
if edge[0] in t_data_df.index.values and edge[1] in t_data_df.index.values:
Xed += [t_data_df.loc[edge[0],x], t_data_df.loc[edge[1],x], None]
Yed += [t_data_df.loc[edge[0],y], t_data_df.loc[edge[1],y], None]
Zed += [t_data_df.loc[edge[0],z], t_data_df.loc[edge[1],z], None]
plots.append(go.Scatter3d(x=Xed,
y=Yed,
z=Zed,
mode='lines',
line=go.scatter3d.Line(color='rgb(210,210,210)', width=2),
hoverinfo='none'
))
self.plots.extend(plots)
def plot(self, output_fname, axes_off=False):
"""Plot embedding of patches to html file.
Parameters
----------
output_fname:str
Output html file.
axes_off:bool
Remove axes.
"""
if axes_off:
fig = go.Figure(data=self.plots,layout=go.Layout(scene=dict(xaxis=dict(title='',autorange=True,showgrid=False,zeroline=False,showline=False,ticks='',showticklabels=False),
yaxis=dict(title='',autorange=True,showgrid=False,zeroline=False,showline=False,ticks='',showticklabels=False),
zaxis=dict(title='',autorange=True,showgrid=False,zeroline=False,showline=False,ticks='',showticklabels=False))))
else:
fig = go.Figure(data=self.plots)
py.plot(fig, filename=output_fname, auto_open=False)
def to_pil(arr):
"""Numpy array to pil.
Parameters
----------
arr:array
Numpy array.
Returns
-------
Image
PIL Image.
"""
return Image.fromarray(arr.astype('uint8'), 'RGB')
def blend(arr1, arr2, alpha=0.5):
"""Blend 2 arrays together, mixing with alpha.
Parameters
----------
arr1:array
Image 1.
arr2:array
Image 2.
alpha:float
Higher alpha makes image more like image 1.
Returns
-------
array
Resulting image.
"""
return alpha*arr1 + (1.-alpha)*arr2
def prob2rbg(prob, palette, arr):
"""Convert probability score to rgb image.
Parameters
----------
prob:float
Between 0 and 1 score.
palette:palette
Pallet converts between prob and color.
arr:array
Original array.
Returns
-------
array
New image colored by prediction score.
"""
col = palette(prob)
for i in range(3):
arr[...,i] = int(col[i]*255)
return arr
def seg2rgb(seg, palette, n_segmentation_classes):
"""Color each pixel by segmentation class.
Parameters
----------
seg:array
Segmentation mask.
palette:palette
Color to RGB map.
n_segmentation_classes:int
Total number segmentation classes.
Returns
-------
array
Returned segmentation image.
"""
#print(seg.shape)
#print((seg/n_segmentation_classes))
img=(palette(seg/n_segmentation_classes)[...,:3]*255).astype(int)
#print(img.shape)
return img
def annotation2rgb(i,palette,arr):
"""Go from annotation of patch to color.
Parameters
----------
i:int
Annotation index.
palette:palette
Index to color mapping.
arr:array
Image array.
Returns
-------
array
Resulting image.
"""
col = palette[i]
for i in range(3):
arr[...,i] = int(col[i]*255)
return arr
def plot_image_(image_file, compression_factor=2., test_image_name='test.png'):
"""Plots entire SVS/other image.
Parameters
----------
image_file:str
Image file.
compression_factor:float
Amount to shrink each dimension of image.
test_image_name:str
Output image file.
"""
from pathflowai.utils import svs2dask_array, npy2da
import cv2
arr=svs2dask_array(image_file, tile_size=1000, overlap=0, remove_last=True, allow_unknown_chunksizes=False) if (not image_file.endswith('.npy')) else npy2da(image_file)
arr2=to_pil(cv2.resize(arr.compute(), dsize=tuple((np.array(arr.shape[:2])/compression_factor).astype(int).tolist()), interpolation=cv2.INTER_CUBIC))
arr2.save(test_image_name)
# for now binary output
class PredictionPlotter:
"""Plots predictions over entire image.
Parameters
----------
dask_arr_dict:dict
Stores all dask arrays corresponding to all of the images.
patch_info_db:str
Patch level information, eg. prediction.
compression_factor:float
How much to compress image by.
alpha:float
Low value assigns higher weight to prediction over original image.
patch_size:int
Patch size.
no_db:bool
Don't use patch information.
plot_annotation:bool
Plot annotations from patch information.
segmentation:bool
Plot segmentation mask.
n_segmentation_classes:int
Number segmentation classes.
input_dir:str
Input directory.
annotation_col:str
Annotation column to plot.
scaling_factor:float
Multiplies the prediction scores to make them appear darker on the images when predicting.
"""
# some patches have been filtered out, not one to one!!! figure out
def __init__(self, dask_arr_dict, patch_info_db, compression_factor=3, alpha=0.5, patch_size=224, no_db=False, plot_annotation=False, segmentation=False, n_segmentation_classes=4, input_dir='', annotation_col='annotation', scaling_factor=1.):
self.segmentation = segmentation
self.scaling_factor=scaling_factor
self.segmentation_maps = None
self.n_segmentation_classes=float(n_segmentation_classes)
self.pred_palette = sns.cubehelix_palette(start=0,as_cmap=True)
if not no_db:
self.compression_factor=compression_factor
self.alpha = alpha
self.patch_size = patch_size
conn = sqlite3.connect(patch_info_db)
patch_info=pd.read_sql('select * from "{}";'.format(patch_size),con=conn)
conn.close()
self.annotations = {str(a):i for i,a in enumerate(patch_info['annotation'].unique().tolist())}
self.plot_annotation=plot_annotation
self.palette=sns.color_palette(n_colors=len(list(self.annotations.keys())))
#print(self.palette)
if 'y_pred' not in patch_info.columns:
patch_info['y_pred'] = 0.
self.patch_info=patch_info[['ID','x','y','patch_size','annotation',annotation_col]] # y_pred
if 0:
for ID in predictions:
patch_info.loc[patch_info["ID"]==ID,'y_pred'] = predictions[ID]
self.patch_info = self.patch_info[np.isin(self.patch_info['ID'],np.array(list(dask_arr_dict.keys())))]
if self.segmentation:
self.segmentation_maps = {slide:npy2da(join(input_dir,'{}_mask.npy'.format(slide))) for slide in dask_arr_dict.keys()}
#self.patch_info[['x','y','patch_size']]/=self.compression_factor
self.dask_arr_dict = {k:v[...,:3] for k,v in dask_arr_dict.items()}
def add_custom_segmentation(self, basename, npy):
"""Replace segmentation mask with new custom segmentation.
Parameters
----------
basename:str
Patient ID
npy:str
Numpy mask.
"""
self.segmentation_maps[basename] = da.from_array(np.load(npy,mmap_mode='r+'))
def generate_image(self, ID):
"""Generate the image array for the whole slide image with predictions overlaid.
Parameters
----------
ID:str
patient ID.
Returns
-------
array
Resulting overlaid whole slide image.
"""
patch_info = self.patch_info[self.patch_info['ID']==ID]
dask_arr = self.dask_arr_dict[ID]
arr_shape = np.array(dask_arr.shape).astype(float)
#image=da.zeros_like(dask_arr)
arr_shape[:2]/=self.compression_factor
arr_shape=arr_shape.astype(int).tolist()
img = Image.new('RGB',arr_shape[:2],'white')
for i in range(patch_info.shape[0]):
ID,x,y,patch_size,annotation,pred = patch_info.iloc[i].tolist()
#print(x,y,annotation)
x_new,y_new = int(x/self.compression_factor),int(y/self.compression_factor)
image = np.zeros((patch_size,patch_size,3))
if self.segmentation:
image=seg2rgb(self.segmentation_maps[ID][x:x+patch_size,y:y+patch_size].compute(),self.pred_palette, self.n_segmentation_classes)
else:
image=prob2rbg(pred*self.scaling_factor, self.pred_palette, image) if not self.plot_annotation else annotation2rgb(self.annotations[str(pred)],self.palette,image) # annotation
arr=dask_arr[x:x+patch_size,y:y+patch_size].compute()
#print(image.shape)
blended_patch=blend(arr,image, self.alpha).transpose((1,0,2))
blended_patch_pil = to_pil(blended_patch)
patch_size/=self.compression_factor
patch_size=int(patch_size)
blended_patch_pil=blended_patch_pil.resize((patch_size,patch_size))
img.paste(blended_patch_pil, box=(x_new,y_new), mask=None)
return img
def return_patch(self, ID, x, y, patch_size):
"""Return one single patch instead of entire image.
Parameters
----------
ID:str
Patient ID
x:int
X coordinate.
y:int
Y coordinate.
patch_size:int
Patch size.
Returns
-------
array
Image.
"""
img=(self.dask_arr_dict[ID][x:x+patch_size,y:y+patch_size].compute() if not self.segmentation else seg2rgb(self.segmentation_maps[ID][x:x+patch_size,y:y+patch_size].compute(),self.pred_palette, self.n_segmentation_classes))
return to_pil(img)
def output_image(self, img, filename, tif=False):
"""Output calculated image to file.
Parameters
----------
img:array
Image.
filename:str
Output file name.
tif:bool
Store in TIF format?
"""
if tif:
from tifffile import imwrite
imwrite(filename, np.array(img), photometric='rgb')
else:
img.save(filename)
def plot_shap(model, dataset_opts, transform_opts, batch_size, outputfilename, n_outputs=1, method='deep', local_smoothing=0.0, n_samples=20, pred_out=False):
"""Plot shapley attributions overlaid on images for classification tasks.
Parameters
----------
model:nn.Module
Pytorch model.
dataset_opts:dict
Options used to configure dataset
transform_opts:dict
Options used to configure transformers.
batch_size:int
Batch size for training.
outputfilename:str
Output filename.
n_outputs:int
Number of top outputs.
method:str
Gradient or deep explainer.
local_smoothing:float
How much to smooth shapley map.
n_samples:int
Number shapley samples to draw.
pred_out:bool
Label images with binary prediction score?
"""
import torch
from torch.nn import functional as F
import numpy as np
from torch.utils.data import DataLoader
import shap
from pathflowai.datasets import DynamicImageDataset
import matplotlib
from matplotlib import pyplot as plt
from pathflowai.sampler import ImbalancedDatasetSampler
out_transform=dict(sigmoid=F.sigmoid,softmax=F.softmax,none=lambda x: x)
binary_threshold=dataset_opts.pop('binary_threshold')
num_targets=dataset_opts.pop('num_targets')
dataset = DynamicImageDataset(**dataset_opts)
if dataset_opts['classify_annotations']:
binarizer=dataset.binarize_annotations(num_targets=num_targets,binary_threshold=binary_threshold)
num_targets=len(dataset.targets)
dataloader_val = DataLoader(dataset,batch_size=batch_size, num_workers=10, shuffle=True if num_targets>1 else False, sampler=ImbalancedDatasetSampler(dataset) if num_targets==1 else None)
#dataloader_test = DataLoader(dataset,batch_size=batch_size,num_workers=10, shuffle=False)
background,y_background=next(iter(dataloader_val))
if method=='gradient':
background=torch.cat([background,next(iter(dataloader_val))[0]],0)
X_test,y_test=next(iter(dataloader_val))
if torch.cuda.is_available():
background=background.cuda()
X_test=X_test.cuda()
if pred_out!='none':
if torch.cuda.is_available():
model2=model.cuda()
y_test=out_transform[pred_out](model2(X_test)).detach().cpu()
y_test=y_test.numpy()
if method=='deep':
e = shap.DeepExplainer(model, background)
s=e.shap_values(X_test, ranked_outputs=n_outputs)
elif method=='gradient':
e = shap.GradientExplainer(model, background, batch_size=batch_size, local_smoothing=local_smoothing)
s=e.shap_values(X_test, ranked_outputs=n_outputs, nsamples=n_samples)
if y_test.shape[1]>1:
y_test=y_test.argmax(axis=1)
if n_outputs>1:
shap_values, idx = s
else:
shap_values, idx = s, y_test
#print(shap_values) # .detach().cpu()
if num_targets == 1:
shap_numpy = [np.swapaxes(np.swapaxes(shap_values, 1, -1), 1, 2)]
else:
shap_numpy = [np.swapaxes(np.swapaxes(s, 1, -1), 1, 2) for s in shap_values]
#print(shap_numpy.shape)
X_test_numpy=X_test.detach().cpu().numpy()
X_test_numpy=X_test_numpy.transpose((0,2,3,1))
for i in range(X_test_numpy.shape[0]):
X_test_numpy[i,...]*=np.array(transform_opts['std'])
X_test_numpy[i,...]+=np.array(transform_opts['mean'])
X_test_numpy=X_test_numpy.transpose((0,3,1,2))
test_numpy = np.swapaxes(np.swapaxes(X_test_numpy, 1, -1), 1, 2)
if pred_out!='none':
labels=y_test.astype(str)
else:
labels = np.array([[(dataloader_val.dataset.targets[i[j]] if num_targets>1 else str(i)) for j in range(n_outputs)] for i in idx])#[:,np.newaxis] # y_test
if 0 and (len(labels.shape)<2 or labels.shape[1]==1):
labels=labels.flatten()#[:np.newaxis]
#print(labels.shape,shap_numpy.shape[0])
plt.figure()
shap.image_plot(shap_numpy, test_numpy, labels)# if num_targets!=1 else shap_values -test_numpy , labels=dataloader_test.dataset.targets)
plt.savefig(outputfilename, dpi=300)
def plot_umap_images(dask_arr_dict, embeddings_file, ID=None, cval=1., image_res=300., outputfname='output_embedding.png', mpl_scatter=True, remove_background_annotation='', max_background_area=0.01, zoom=0.05, n_neighbors=10, sort_col='', sort_mode='asc'):
"""Make UMAP embedding plot, overlaid with images.
Parameters
----------
dask_arr_dict:dict
Stored dask arrays for each WSI.
embeddings_file:str
Embeddings pickle file stored from running using after trainign the model.
ID:str
Patient ID.
cval:float
Deprecated
image_res:float
Image resolution.
outputfname:str
Output image file.
mpl_scatter:bool
Recommended: Use matplotlib for scatter plot.
remove_background_annotation:str
Remove the background annotations. Enter for annotation to remove.
max_background_area:float
Maximum backgrund area in each tile for inclusion.
zoom:float
How much to zoom in on each patch, less than 1 is zoom out.
n_neighbors:int
Number of neighbors for UMAP embedding.
sort_col:str
Patch info column to sort on.
sort_mode:str
Sort ascending or descending.
Returns
-------
type
Description of returned object.
Inspired by: https://gist.github.com/lukemetz/be6123c7ee3b366e333a
WIP!! Needs testing."""
import torch
import dask
from dask.distributed import Client
from umap import UMAP
from pathflowai.visualize import PlotlyPlot
import pandas as pd, numpy as np
import skimage.io
from skimage.transform import resize
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
sns.set(style='white')
def min_resize(img, size):
"""
Resize an image so that it is size along the minimum spatial dimension.
"""
w, h = map(float, img.shape[:2])
if min([w, h]) != size:
if w <= h:
img = resize(img, (int(round((h/w)*size)), int(size)))
else:
img = resize(img, (int(size), int(round((w/h)*size))))
return img
#dask_arr = dask_arr_dict[ID]
embeddings_dict=torch.load(embeddings_file)
embeddings=embeddings_dict['embeddings']
patch_info=embeddings_dict['patch_info']
if sort_col:
idx=np.argsort(patch_info[sort_col].values)
if sort_mode == 'desc':
idx=idx[::-1]
patch_info = patch_info.iloc[idx]
embeddings=embeddings.iloc[idx]
if ID:
removal_bool=(patch_info['ID']==ID).values
patch_info = patch_info.loc[removal_bool]
embeddings=embeddings.loc[removal_bool]
if remove_background_annotation:
removal_bool=(patch_info[remove_background_annotation]<=(1.-max_background_area)).values
patch_info=patch_info.loc[removal_bool]
embeddings=embeddings.loc[removal_bool]
umap=UMAP(n_components=2,n_neighbors=n_neighbors)
t_data=pd.DataFrame(umap.fit_transform(embeddings.iloc[:,:-1].values),columns=['x','y'],index=embeddings.index)
images=[]
for i in range(patch_info.shape[0]):
ID=patch_info.iloc[i]['ID']
x,y,patch_size=patch_info.iloc[i][['x','y','patch_size']].values.tolist()
arr=dask_arr_dict[ID][x:x+patch_size,y:y+patch_size]#.transpose((2,0,1))
images.append(arr)
c=Client()
images=dask.compute(images)
c.close()
if mpl_scatter:
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
def imscatter(x, y, ax, imageData, zoom):
images = []
for i in range(len(x)):
x0, y0 = x[i], y[i]
img = imageData[i]
#print(img.shape)
image = OffsetImage(img, zoom=zoom)
ab = AnnotationBbox(image, (x0, y0), xycoords='data', frameon=False)
images.append(ax.add_artist(ab))
ax.update_datalim(np.column_stack([x, y]))
ax.autoscale()
fig, ax = plt.subplots()
imscatter(t_data['x'].values, t_data['y'].values, imageData=images[0], ax=ax, zoom=zoom)
sns.despine()
plt.savefig(outputfname,dpi=300)
else:
xx=t_data.iloc[:,0]
yy=t_data.iloc[:,1]
images = [min_resize(image, img_res) for image in images]
max_width = max([image.shape[0] for image in images])
max_height = max([image.shape[1] for image in images])
x_min, x_max = xx.min(), xx.max()
y_min, y_max = yy.min(), yy.max()
# Fix the ratios
sx = (x_max-x_min)
sy = (y_max-y_min)
if sx > sy:
res_x = sx/float(sy)*res
res_y = res
else:
res_x = res
res_y = sy/float(sx)*res
canvas = np.ones((res_x+max_width, res_y+max_height, 3))*cval
x_coords = np.linspace(x_min, x_max, res_x)
y_coords = np.linspace(y_min, y_max, res_y)
for x, y, image in zip(xx, yy, images):
w, h = image.shape[:2]
x_idx = np.argmin((x - x_coords)**2)
y_idx = np.argmin((y - y_coords)**2)
canvas[x_idx:x_idx+w, y_idx:y_idx+h] = image
skimage.io.imsave(outputfname, canvas)
```
#### File: PathFlowAI/experimental/get_counts.py
```python
import brambox as bb
import os
from os.path import join, basename
from pathflowai.utils import load_sql_df, npy2da, df2sql
import skimage
import dask, dask.array as da, pandas as pd, numpy as np
import argparse
from scipy import ndimage
from scipy.ndimage.measurements import label
import pickle
from dask.distributed import Client
from multiprocessing import Pool
from functools import reduce
def count_cells(m, num_classes=3):
lbls,n_lbl=label(m)
obj_labels=np.zeros(num_classes)
for i in range(1,num_classes+1):
obj_labels[i-1]=len(np.unique(lbls[m==i].flatten()))
return obj_labels
if __name__=='__main__':
p=argparse.ArgumentParser()
p.add_argument('--num_classes',default=4,type=int)
p.add_argument('--patch_size',default=512,type=int)
p.add_argument('--n_workers',default=40,type=int)
p.add_argument('--p_sample',default=0.7,type=float)
p.add_argument('--input_dir',default='inputs',type=str)
p.add_argument('--patch_info_file',default='cell_info.db',type=str)
p.add_argument('--reference_mask',default='reference_mask.npy',type=str)
#c=Client()
# add mode to just use own extracted boudning boxes or from seg, maybe from histomicstk
args=p.parse_args()
num_classes=args.num_classes
n_workers=args.n_workers
input_dir=args.input_dir
patch_info_file=args.patch_info_file
patch_size=args.patch_size
np.random.seed(42)
reference_mask=args.reference_mask
patch_info=load_sql_df(patch_info_file, patch_size)
IDs=patch_info['ID'].unique()
#slides = {slide:da.from_zarr(join(input_dir,'{}.zarr'.format(slide))) for slide in IDs}
masks = {mask:npy2da(join(input_dir,'{}_mask.npy'.format(mask))) for mask in IDs}
def process_chunk(patch_info_sub):
patch_info_sub=patch_info_sub.reset_index(drop=True)
counts=[]
for i in range(patch_info_sub.shape[0]):
#print(i)
patch=patch_info_sub.iloc[i]
ID,x,y,patch_size2=patch[['ID','x','y','patch_size']].tolist()
m=masks[ID][x:x+patch_size2,y:y+patch_size2]
counts.append(dask.delayed(count_cells)(m, num_classes=num_classes))
return dask.compute(*counts,scheduler='threading')
patch_info_subs=np.array_split(patch_info,n_workers)
p=Pool(n_workers)
counts=reduce(lambda x,y:x+y,p.map(process_chunk,patch_info_subs))
#bbox_dfs=dask.compute(*bbox_dfs,scheduler='processes')
counts=pd.DataFrame(np.vstack(counts))
patch_info=pd.concat([patch_info[['ID','x','y','patch_size','annotation']].reset_index(drop=True),counts.reset_index(drop=True)],axis=1).reset_index()
print(patch_info)
df2sql(patch_info, 'counts_test.db', patch_size, mode='replace')
```
#### File: PathFlowAI/pathflowai/cli_visualizations.py
```python
import click
from pathflowai.visualize import PredictionPlotter, plot_image_
import glob, os
from utils import load_preprocessed_img
import dask.array as da
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='0.1')
def visualize():
pass
@visualize.command()
@click.option('-i', '--input_dir', default='./inputs/', help='Input directory for patches.', type=click.Path(exists=False), show_default=True)
@click.option('-b', '--basename', default='A01', help='Basename of patches.', type=click.Path(exists=False), show_default=True)
@click.option('-p', '--patch_info_file', default='patch_info.db', help='Datbase containing all patches', type=click.Path(exists=False), show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
@click.option('-x', '--x', default=0, help='X Coordinate of patch.', show_default=True)
@click.option('-y', '--y', default=0, help='Y coordinate of patch.', show_default=True)
@click.option('-o', '--outputfname', default='./output_image.png', help='Output extracted image.', type=click.Path(exists=False), show_default=True)
@click.option('-s', '--segmentation', is_flag=True, help='Plot segmentations.', show_default=True)
@click.option('-sc', '--n_segmentation_classes', default=4, help='Number segmentation classes', show_default=True)
@click.option('-c', '--custom_segmentation', default='', help='Add custom segmentation map from prediction, in npy', show_default=True)
def extract_patch(input_dir, basename, patch_info_file, patch_size, x, y, outputfname, segmentation, n_segmentation_classes, custom_segmentation):
"""Extract image of patch of any size/location and output to image file"""
if glob.glob(os.path.join(input_dir,'*.zarr')):
dask_arr_dict = {os.path.basename(f).split('.zarr')[0]:da.from_zarr(f) for f in glob.glob(os.path.join(input_dir,'*.zarr')) if os.path.basename(f).split('.zarr')[0] == basename}
else:
dask_arr_dict = {basename:load_preprocessed_img(os.path.join(input_dir,'{}.npy'.format(basename)))}
pred_plotter = PredictionPlotter(dask_arr_dict, patch_info_file, compression_factor=3, alpha=0.5, patch_size=patch_size, no_db=True, segmentation=segmentation,n_segmentation_classes=n_segmentation_classes, input_dir=input_dir)
if custom_segmentation:
pred_plotter.add_custom_segmentation(basename,custom_segmentation)
img = pred_plotter.return_patch(basename, x, y, patch_size)
pred_plotter.output_image(img,outputfname)
@visualize.command()
@click.option('-i', '--image_file', default='./inputs/a.svs', help='Input image file.', type=click.Path(exists=False), show_default=True)
@click.option('-cf', '--compression_factor', default=3., help='How much compress image.', show_default=True)
@click.option('-o', '--outputfname', default='./output_image.png', help='Output extracted image.', type=click.Path(exists=False), show_default=True)
def plot_image(image_file, compression_factor, outputfname):
"""Plots the whole slide image supplied."""
plot_image_(image_file, compression_factor=compression_factor, test_image_name=outputfname)
@visualize.command()
@click.option('-i', '--mask_file', default='./inputs/a_mask.npy', help='Input mask file.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--outputfname', default='./output_image.png', help='Output extracted image.', type=click.Path(exists=False), show_default=True)
def plot_mask_mpl(mask_file, outputfname):
"""Plots the whole slide mask supplied."""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
#plt.figure()
plt.imshow(np.load(mask_file))
plt.axis('off')
plt.savefig(outputfname,dpi=500)
@visualize.command()
@click.option('-i', '--input_dir', default='./inputs/', help='Input directory for patches.', type=click.Path(exists=False), show_default=True)
@click.option('-b', '--basename', default='A01', help='Basename of patches.', type=click.Path(exists=False), show_default=True)
@click.option('-p', '--patch_info_file', default='patch_info.db', help='Datbase containing all patches', type=click.Path(exists=False), show_default=True)
@click.option('-ps', '--patch_size', default=224, help='Patch size.', show_default=True)
@click.option('-o', '--outputfname', default='./output_image.png', help='Output extracted image.', type=click.Path(exists=False), show_default=True)
@click.option('-an', '--annotations', is_flag=True, help='Plot annotations instead of predictions.', show_default=True)
@click.option('-cf', '--compression_factor', default=3., help='How much compress image.', show_default=True)
@click.option('-al', '--alpha', default=0.8, help='How much to give annotations/predictions versus original image.', show_default=True)
@click.option('-s', '--segmentation', is_flag=True, help='Plot segmentations.', show_default=True)
@click.option('-sc', '--n_segmentation_classes', default=4, help='Number segmentation classes', show_default=True)
@click.option('-c', '--custom_segmentation', default='', help='Add custom segmentation map from prediction, npy format.', show_default=True)
@click.option('-ac', '--annotation_col', default='annotation', help='Column of annotations', type=click.Path(exists=False), show_default=True)
@click.option('-sf', '--scaling_factor', default=1., help='Multiply all prediction scores by this amount.', show_default=True)
@click.option('-tif', '--tif_file', is_flag=True, help='Write to tiff file.', show_default=True)
def plot_predictions(input_dir,basename,patch_info_file,patch_size,outputfname,annotations, compression_factor, alpha, segmentation, n_segmentation_classes, custom_segmentation, annotation_col, scaling_factor, tif_file):
"""Overlays classification, regression and segmentation patch level predictions on top of whole slide image."""
if glob.glob(os.path.join(input_dir,'*.zarr')):
dask_arr_dict = {os.path.basename(f).split('.zarr')[0]:da.from_zarr(f) for f in glob.glob(os.path.join(input_dir,'*.zarr')) if os.path.basename(f).split('.zarr')[0] == basename}
else:
dask_arr_dict = {basename:load_preprocessed_img(os.path.join(input_dir,'{}.npy'.format(basename)))}
pred_plotter = PredictionPlotter(dask_arr_dict, patch_info_file, compression_factor=compression_factor, alpha=alpha, patch_size=patch_size, no_db=False, plot_annotation=annotations, segmentation=segmentation, n_segmentation_classes=n_segmentation_classes, input_dir=input_dir, annotation_col=annotation_col, scaling_factor=scaling_factor)
if custom_segmentation:
pred_plotter.add_custom_segmentation(basename,custom_segmentation)
img = pred_plotter.generate_image(basename)
pred_plotter.output_image(img, outputfname, tif_file)
@visualize.command()
@click.option('-i', '--img_file', default='image.txt', help='Input image.', type=click.Path(exists=False), show_default=True)
@click.option('-a', '--annotation_txt', default='annotation.txt', help='Column of annotations', type=click.Path(exists=False), show_default=True)
@click.option('-ocf', '--original_compression_factor', default=1., help='How much compress image.', show_default=True)
@click.option('-cf', '--compression_factor', default=3., help='How much compress image.', show_default=True)
@click.option('-o', '--outputfilename', default='./output_image.png', help='Output extracted image.', type=click.Path(exists=False), show_default=True)
def overlay_new_annotations(img_file,annotation_txt, original_compression_factor,compression_factor, outputfilename):
"""Custom annotations, in format [Point: x, y, Point: x, y ... ] one line like this per polygon, overlap these polygons on top of WSI."""
#from shapely.ops import unary_union, polygonize
#from shapely.geometry import MultiPolygon, LineString, MultiPoint, box, Point
#from shapely.geometry.polygon import Polygon
print("Experimental, in development")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import re, numpy as np
from PIL import Image
import cv2
from pathflowai.visualize import to_pil
from scipy.misc import imresize
im=plt.imread(img_file) if not img_file.endswith('.npy') else np.load(img_file,mmap_mode='r+')
print(im.shape)
if compression_factor>1 and original_compression_factor == 1.:
im=cv2.resize(im,dsize=(int(im.shape[1]/compression_factor),int(im.shape[0]/compression_factor)),interpolation=cv2.INTER_CUBIC)#im.resize((int(im.shape[0]/compression_factor),int(im.shape[1]/compression_factor)))
print(im.shape)
im=np.array(im)
im=im.transpose((1,0,2))##[::-1,...]#
plt.imshow(im)
with open(annotation_txt) as f:
polygons=[np.array([list(map(float,filter(None,coords.strip(' ').split(',')))) for coords in re.sub('\]|\[|\ ','',line).rstrip().split('Point:') if coords])/compression_factor for line in f]
for polygon in polygons:
plt.plot(polygon[:,0],polygon[:,1],color='blue')
plt.axis('off')
plt.savefig(outputfilename,dpi=500)
@visualize.command()
@click.option('-i', '--embeddings_file', default='predictions/embeddings.pkl', help='Embeddings.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--plotly_output_file', default='predictions/embeddings.html', help='Plotly output file.', type=click.Path(exists=False), show_default=True)
@click.option('-a', '--annotations', default=[], multiple=True, help='Multiple annotations to color image.', show_default=True)
@click.option('-rb', '--remove_background_annotation', default='', help='If selected, removes 100\% background patches based on this annotation.', type=click.Path(exists=False), show_default=True)
@click.option('-ma', '--max_background_area', default=0.05, help='Max background area before exclusion.', show_default=True)
@click.option('-b', '--basename', default='', help='Basename of patches.', type=click.Path(exists=False), show_default=True)
@click.option('-nn', '--n_neighbors', default=8, help='Number nearest neighbors.', show_default=True)
def plot_embeddings(embeddings_file,plotly_output_file, annotations, remove_background_annotation , max_background_area, basename, n_neighbors):
"""Perform UMAP embeddings of patches and plot using plotly."""
import torch
from umap import UMAP
from pathflowai.visualize import PlotlyPlot
import pandas as pd, numpy as np
embeddings_dict=torch.load(embeddings_file)
embeddings=embeddings_dict['embeddings']
patch_info=embeddings_dict['patch_info']
if remove_background_annotation:
removal_bool=(patch_info[remove_background_annotation]<=(1.-max_background_area)).values
patch_info=patch_info.loc[removal_bool]
embeddings=embeddings.loc[removal_bool]
if basename:
removal_bool=(patch_info['ID']==basename).values
patch_info=patch_info.loc[removal_bool]
embeddings=embeddings.loc[removal_bool]
if annotations:
annotations=np.array(annotations)
if len(annotations)>1:
embeddings.loc[:,'ID']=np.vectorize(lambda i: annotations[np.argmax(patch_info.iloc[i][annotations].values)])(np.arange(embeddings.shape[0]))
else:
embeddings.loc[:,'ID']=patch_info[annotations].values
umap=UMAP(n_components=3,n_neighbors=n_neighbors)
t_data=pd.DataFrame(umap.fit_transform(embeddings.iloc[:,:-1].values),columns=['x','y','z'],index=embeddings.index)
t_data['color']=embeddings['ID'].values
t_data['name']=embeddings.index.values
pp=PlotlyPlot()
pp.add_plot(t_data,size=8)
pp.plot(plotly_output_file,axes_off=True)
@visualize.command()
@click.option('-m', '--model_pkl', default='', help='Plotly output file.', type=click.Path(exists=False), show_default=True)
@click.option('-bs', '--batch_size', default=32, help='Batch size.', show_default=True)
@click.option('-o', '--outputfilename', default='predictions/shap_plots.png', help='SHAPley visualization.', type=click.Path(exists=False), show_default=True)
@click.option('-mth', '--method', default='deep', help='Method of explaining.', type=click.Choice(['deep','gradient']), show_default=True)
@click.option('-l', '--local_smoothing', default=0.0, help='Local smoothing of SHAP scores.', show_default=True)
@click.option('-ns', '--n_samples', default=32, help='Number shapley samples for shapley regression (gradient explainer).', show_default=True)
@click.option('-p', '--pred_out', default='none', help='If not none, output prediction as shap label.', type=click.Choice(['none','sigmoid','softmax']), show_default=True)
def shapley_plot(model_pkl, batch_size, outputfilename, method='deep', local_smoothing=0.0, n_samples=20, pred_out='none'):
"""Run SHAPley attribution method on patches after classification task to see where model made prediction based on."""
from pathflowai.visualize import plot_shap
import torch
from pathflowai.datasets import get_data_transforms
model_dict=torch.load(model_pkl)
model_dict['dataset_opts']['transformers']=get_data_transforms(**model_dict['transform_opts'])
plot_shap(model_dict['model'], model_dict['dataset_opts'], model_dict['transform_opts'], batch_size, outputfilename, method=method, local_smoothing=local_smoothing, n_samples=n_samples, pred_out=pred_out)
@visualize.command()
@click.option('-i', '--input_dir', default='./inputs/', help='Input directory for patches.', type=click.Path(exists=False), show_default=True)
@click.option('-e', '--embeddings_file', default='predictions/embeddings.pkl', help='Embeddings.', type=click.Path(exists=False), show_default=True)
@click.option('-b', '--basename', default='', help='Basename of patches.', type=click.Path(exists=False), show_default=True)
@click.option('-o', '--outputfilename', default='predictions/shap_plots.png', help='Embedding visualization.', type=click.Path(exists=False), show_default=True)
@click.option('-mpl', '--mpl_scatter', is_flag=True, help='Plot segmentations.', show_default=True)
@click.option('-rb', '--remove_background_annotation', default='', help='If selected, removes 100\% background patches based on this annotation.', type=click.Path(exists=False), show_default=True)
@click.option('-ma', '--max_background_area', default=0.05, help='Max background area before exclusion.', show_default=True)
@click.option('-z', '--zoom', default=0.05, help='Size of images.', show_default=True)
@click.option('-nn', '--n_neighbors', default=8, help='Number nearest neighbors.', show_default=True)
@click.option('-sc', '--sort_col', default='', help='Sort samples on this column.', type=click.Path(exists=False), show_default=True)
@click.option('-sm', '--sort_mode', default='asc', help='Sort ascending or descending.', type=click.Choice(['asc','desc']), show_default=True)
def plot_image_umap_embeddings(input_dir,embeddings_file,basename,outputfilename,mpl_scatter, remove_background_annotation, max_background_area, zoom, n_neighbors, sort_col='', sort_mode='asc'):
"""Plots a UMAP embedding with each point as its corresponding patch image."""
from pathflowai.visualize import plot_umap_images
if glob.glob(os.path.join(input_dir,'*.zarr')):
dask_arr_dict = {os.path.basename(f).split('.zarr')[0]:da.from_zarr(f) for f in glob.glob(os.path.join(input_dir,'*.zarr')) if (not basename) or os.path.basename(f).split('.zarr')[0] == basename}
else:
dask_arr_dict = {basename:load_preprocessed_img(os.path.join(input_dir,'{}.npy'.format(basename))) for basename in ([basename] if basename else set(list(map(lambda x: os.path.basename(os.path.splitext(x)[0]),glob.glob(os.path.join(input_dir,"*.*"))))))}
plot_umap_images(dask_arr_dict, embeddings_file, ID=basename, cval=1., image_res=300., outputfname=outputfilename, mpl_scatter=mpl_scatter, remove_background_annotation=remove_background_annotation, max_background_area=max_background_area, zoom=zoom, n_neighbors=n_neighbors, sort_col=sort_col, sort_mode=sort_mode)
if __name__ == '__main__':
visualize()
```
#### File: PathFlowAI/pathflowai/stain_norm.py
```python
import cv2
import sys
import fire
import histomicstk
import histomicstk as htk
import openslide
import dask
import tqdm
import numpy as np
from dask.diagnostics import ProgressBar
from pathflowai.utils import generate_tissue_mask
from histomicstk.preprocessing.color_normalization.\
deconvolution_based_normalization import deconvolution_based_normalization
W_target = np.array([
[0.6185391, 0.1576997, -0.01119131],
[0.7012888, 0.8638838, 0.45586256],
[0.3493163, 0.4657428, -0.85597752]
])
def return_norm_image(img,mask,W_source=None,W_target=None):
img=deconvolution_based_normalization(
img, W_source=W_source, W_target=W_target, im_target=None,
stains=['hematoxylin', 'eosin'], mask_out=~mask,
stain_unmixing_routine_params={"I_0":215})
return img
def check_ext(image_file):
return any([image_file.endswith(ext) for ext in ['.svs','.png','.jpg','.jpeg','.tiff','.tif']])
def stain_norm(image_file,compression=10,patch_size=1024):
if check_ext(image_file):
img = openslide.open_slide(image_file)
image = np.array(img.read_region((0,0), 0, img.level_dimensions[0]))[...,:3]
elif image_file.endswith(".npy"):
image=np.load(image_file)
else: raise NotImplementedError
mask=generate_tissue_mask(image,compression=compression,keep_holes=False)
img_small=cv2.resize(image,None,fx=1/compression,fy=1/compression)
mask_small=cv2.resize(mask.astype(int),None,fx=1/compression,fy=1/compression,interpolation=cv2.INTER_NEAREST).astype(bool)
W_source = htk.preprocessing.color_deconvolution.rgb_separate_stains_macenko_pca(img_small, 215)
W_source = htk.preprocessing.color_deconvolution._reorder_stains(W_source)
res=[]
coords=[]
for i in np.arange(0,image.shape[0]-patch_size,patch_size):
for j in np.arange(0,image.shape[1]-patch_size,patch_size):
if mask[i:i+patch_size,j:j+patch_size].mean():
coords.append((i,j))
res.append(dask.delayed(return_norm_image)(image[i:i+patch_size,j:j+patch_size],mask[i:i+patch_size,j:j+patch_size],W_source,W_target))
with ProgressBar():
res_returned=dask.compute(*res,scheduler="processes")
img_new=np.ones(image.shape).astype(np.uint8)*255
for k in tqdm.trange(len(coords)):
i,j=coords[k]
img_new[i:i+patch_size,j:j+patch_size]=res_returned[k]
return img_new
def stain_norm_pipeline(image_file="stain_in.svs",
npy_out='stain_out.npy',
compression=10,
patch_size=1024):
np.save(npy_out,stain_norm(image_file,compression,patch_size))
if __name__=="__main__":
fire.Fire(stain_norm_pipeline)
```
#### File: jlevy44/PathFlowAI/setup.py
```python
from setuptools import setup
from setuptools.command.install import install
import subprocess
import os
PACKAGES=[ 'pandas==0.25.0',
'numpy',
'dask[dataframe]',
'distributed',
'nonechucks',
'dask-image',
'opencv-python',
'scikit-learn',
'scipy',
'umap-learn',
'pysnooper',
'tifffile',
'seaborn',
'scikit-image',
'openslide-python',
'Shapely',
'click==6.7',
'torch',
'torchvision',
'albumentations',
'GPUtil',
'beautifulsoup4',
'plotly',
'xarray',
'matplotlib',
'networkx',
'shap',
'pyyaml',
'torch-encoding',
'xmltodict',
#'lightnet',
'brambox',
'blosc',
'numcodecs',
'zarr',
'pytorchcv',
'h5py',
'timm'
]
with open('README.md','r', encoding='utf-8') as f:
long_description = f.read()
class CustomInstallCommand(install):
"""Custom install setup to help run shell commands (outside shell) before installation"""
def run(self):
#for package in PACKAGES:
#os.system('pip install {}'.format(package))#install.do_egg_install(self)
self.do_egg_install()#install.run(self)
subprocess.call('rm -rf apex'.split())
os.system('git clone https://github.com/NVIDIA/apex')
#try:
#os.system('cd apex && pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./')
#except:
os.system('echo pwd && cd apex && (pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ || pip install -v --no-cache-dir ./)')
subprocess.call('rm -rf apex'.split())
setup(name='pathflowai',
version='0.1.1',
description='A modular approach for preprocessing and deep learning on histopathology images.',
url='https://github.com/jlevy44/PathFlowAI',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
scripts=['bin/install_apex',
'bin/install_lightnet'],
#cmdclass={'install': CustomInstallCommand},
entry_points={
'console_scripts':['pathflowai-preprocess=pathflowai.cli_preprocessing:preprocessing',
'pathflowai-visualize=pathflowai.cli_visualizations:visualize',
'pathflowai-monitor=pathflowai.monitor_memory_usage:monitor',
'pathflowai-train_model=pathflowai.model_training:train']
},
long_description=long_description,
long_description_content_type='text/markdown',
packages=['pathflowai'],
install_requires=PACKAGES)
``` |
{
"source": "jlevy44/PathPretrain",
"score": 2
} |
#### File: PathPretrain/pathpretrain/datasets.py
```python
import torch
import os
import pickle
import tifffile
from PIL import Image
import tqdm
import numpy as np, pandas as pd
from torch.utils.data import Dataset, DataLoader
import glob
from .utils import load_image
class NPYDataset(Dataset):
def __init__(self, patch_info, npy_file, transform, tensor_dataset=False):
self.ID=os.path.basename(npy_file).replace(".npy","").replace(".tiff","").replace(".tif","").replace(".svs","")
self.patch_info=patch_info.loc[patch_info["ID"]==self.ID].reset_index()
self.X=load_image(npy_file)
self.to_pil=lambda x: Image.fromarray(x)
self.transform=transform
self.tensor_dataset=tensor_dataset
def __getitem__(self,i):
x,y,patch_size=self.patch_info.loc[i,["x","y","patch_size"]]
img=self.X[x:x+patch_size,y:y+patch_size]
return self.transform(self.to_pil(img)) if not self.tensor_dataset else torch.tensor(img),torch.tensor([-1])
def __len__(self):
return self.patch_info.shape[0]
def embed(self,model,batch_size,out_dir):
Z=[]
dataloader=DataLoader(self,batch_size=batch_size,shuffle=False)
n_batches=len(self)//batch_size
with torch.no_grad():
for i,(X,y) in tqdm.tqdm(enumerate(dataloader),total=n_batches):
if torch.cuda.is_available(): X=X.cuda()
if self.tensor_dataset: X = self.transform(X)
z=model(X).detach().cpu().numpy()
Z.append(z)
print(f"Processed batch {i}/{n_batches}")
Z=np.vstack(Z)
torch.save(dict(embeddings=Z,patch_info=self.patch_info),os.path.join(out_dir,f"{self.ID}.pkl"))
print("Embeddings saved")
quit()
class PickleDataset(Dataset):
def __init__(self, pkl, transform, label_map):
self.data=pickle.load(open(pkl,'rb'))
self.X,self.targets=self.data['X'],self.data['y']
self.aux_data=self.data.get("z",None)
self.has_aux=(self.aux_data is not None)
if self.has_aux and isinstance(self.aux_data,pd.DataFrame): self.aux_data=self.aux_data.values
if self.has_aux: self.n_aux_features=self.aux_data.shape[1]
self.transform=transform
self.to_pil=lambda x: Image.fromarray(x)
self.label_map=label_map
if self.label_map:
self.targets=pd.Series(self.targets).map(lambda x: self.label_map.get(x,-1)).values
if -1 in self.targets:
remove_bool=(self.targets!=-1)
self.targets=self.targets[remove_bool]
self.X=pd.Series(self.X).iloc[remove_bool].tolist()
if self.has_aux: self.aux_data=self.aux_data[remove_bool]
self.length=len(self.X)
def __getitem__(self,idx):
items=(self.transform(self.to_pil(self.X[idx])), torch.tensor(self.targets[idx]).long())
if self.has_aux: items+=(torch.tensor(self.aux_data[idx]).float(),)
return items
def __len__(self):
return self.length
class NPYRotatingStack(Dataset):
def __init__(self, patch_dir, transform, sample_frac=1., sample_every=0, target_col={'old_y_true':'y_true'}):
self.patch_npy=np.array(glob.glob(os.path.join(patch_dir,"*.npy")))
self.patch_pkl=np.vectorize(lambda x: x.replace(".npy",".pkl"))(self.patch_npy)
self.sample_every=sample_every
self.sample_frac=sample_frac
if self.sample_frac==1: self.sample_every=0
self.target_col=list(target_col.items())[0]
self.ref_index=None # dictionary
self.data={}
self.cache_npy=None # dictionary keys
self.to_pil=lambda x: Image.fromarray(x)
self.transform=transform
assert self.target_col[1]=='y_true'
self.targets=np.hstack([pd.read_pickle(pkl)[self.target_col[0]].values for pkl in self.patch_pkl])
self.load_image_annot()
def load_image_annot(self):
if self.sample_frac<1:
idx=np.arange(len(self.patch_npy))
idx=np.random.choice(idx,int(self.sample_frac*len(index)))
patch_npy=self.patch_npy[idx]
patch_pkl=self.patch_pkl[idx]
remove_npy=np.setdiff1d(self.patch_npy,patch_npy)
for npy in remove_npy:
if isinstance(self.cache_npy,type(None))==False and npy not in self.cache_npy:
del self.data[npy]
new_data={npy:(dict(patches=load_image(npy),
patch_info=pd.read_pickle(pkl)) if (isinstance(self.cache_npy,type(None))==False and npy in self.cache_npy) else self.data[k]) for npy,pkl in zip(patch_npy,patch_pkl)}
self.data.clear()
self.data=new_data
self.cache_npy=sorted(list(self.data.keys()))
self.ref_index=np.vstack([np.array(([i]*self.data[npy]['patch_info'].shape[0],list(range(self.data[npy]['patch_info'].shape[0])))).T] for i,npy in enumerate(self.cache_npy))
else:
self.data={npy:dict(patches=load_image(npy),
patch_info=pd.read_pickle(pkl)) for npy,pkl in zip(self.patch_npy,self.patch_pkl)}
self.cache_npy=sorted(patch_npy)
self.ref_index=np.vstack([np.array(([i]*self.data[npy]['patch_info'].shape[0],list(range(self.data[npy]['patch_info'].shape[0])))).T] for i,npy in enumerate(self.cache_npy))
for npy in self.data: self.data[npy]['patch_info'][self.target_col[1]]=self.data[npy]['patch_info'][self.target_col[0]]
self.length=self.ref_index.shape[0]
def __getitem__(self,idx):
i,j=self.ref_index[idx]
npy=self.cache_npy[i]
X=self.data[npy]['patches'][j]
y=torch.LongTensor([self.data[npy]['patch_info'].iloc[j][self.target_col]])
X=self.transform(self.to_pil(X))
return X, y
def __len__(self):
return self.length
``` |
{
"source": "jlevy44/PolyCRACKER-Unofficial-Mirror",
"score": 2
} |
#### File: PolyCRACKER-Unofficial-Mirror/polycracker/format.py
```python
from collections import Counter, defaultdict, OrderedDict
import cPickle as pickle
import errno
from itertools import combinations, permutations
import itertools
import os
import shutil
import subprocess
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import click
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as py
import pybedtools
from Bio import Phylo
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor, _DistanceMatrix
import hdbscan
import networkx as nx
from pybedtools import BedTool
from pyfaidx import Fasta
import scipy.sparse as sps
from scipy.stats import pearsonr, chi2_contingency
import seaborn as sns
from sklearn.cluster import MiniBatchKMeans
from sklearn.manifold import SpectralEmbedding
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import FeatureAgglomeration
from sklearn.decomposition import FactorAnalysis, LatentDirichletAllocation, NMF
from sklearn.decomposition import KernelPCA, TruncatedSVD
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import SpectralClustering
from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
from sklearn.pipeline import Pipeline
from sklearn.metrics import *
from sklearn.metrics import calinski_harabaz_score, silhouette_score
# from evolutionary_search import maximize
RANDOM_STATE=42
CONTEXT_SETTINGS = dict(help_option_names=['-h','--help'], max_content_width=90)
@click.group(context_settings= CONTEXT_SETTINGS)
@click.version_option(version='1.1.3')
def format():
pass
def create_path(path):
"""Create a path if directory does not exist, raise exception for other errors"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
@format.command(name='maf2bed')
@click.option('-maf', '--last', default='fasta1.fasta2.last,fasta2.fasta1.last', show_default=True, help='Maf output of last alignment. Comma delimited list if multiple maf files.', type=click.Path(exists=False))
@click.option('-w', '--work_dir', default='./', show_default=True, help='Work directory for final outputs.', type=click.Path(exists=False))
def maf2bed(last, work_dir): # FIXME what I can do instead is say that if > 57.5% sequence is covered, than that CDS is one and others are 0, count 1 CDS vs all 0 CDS for identity, does not have to be pure alignment, this should increase similarity scores
"""Convert maf file to bed and perform stats on sequence alignment."""
from Bio import AlignIO
#from BCBio import GFF
import glob
work_dir += '/'
last_files = last.split(',')
final_output = []
for last in last_files:
gff_files, bed_files_final = [] , []
heads = last.split('/')[-1].split('.')[::-1][1:]
for f_name in heads:
open(work_dir + f_name+'.gff','w').close()
gff_files.append(open(work_dir + f_name+'.gff','w'))
bed_files_final.append(work_dir + f_name+'.bed')
seqrecs = [[] for i in heads]
for multiple_alignment in AlignIO.parse(last,'maf'):
for i,seqrec in enumerate(multiple_alignment): # FIXME
seqrecs[i].append((seqrec.name,seqrec.annotations['start'] if seqrec.annotations['strand'] == 1 else seqrec.annotations['srcSize'] - seqrec.annotations['start'] - seqrec.annotations['size'], seqrec.annotations['start'] + seqrec.annotations['size'] if seqrec.annotations['strand'] == 1 else seqrec.annotations['srcSize'] - seqrec.annotations['start']))
#for i, gff_file in enumerate(gff_files):
# GFF.write(seqrecs[i],gff_file)
# subprocess.call('grep -v "##sequence-region" %s > %s && mv %s %s'%(gff_files_final[i],'temp.gff','temp.gff',gff_files_final[i]),shell=True)
for i, bed_file in enumerate(bed_files_final):
pd.DataFrame(seqrecs[i]).to_csv(bed_file, sep='\t',header=None,index=None)
# FIXME
fasta_files = []
last_path = last[:last.rfind('/')+1]
for f in heads:
fasta_files.extend(glob.glob(last_path+f+'.fasta') + glob.glob(last_path+f+'.fa'))
for i,fasta in enumerate(fasta_files):
Fasta(fasta)
subprocess.call("awk -v OFS='\\t' '{print $1, 0, $2}' %s > %s"%(fasta+'.fai',fasta+'.bed'),shell=True)
a = BedTool(fasta+'.bed').sort()
df = a.intersect(BedTool(bed_files_final[i]).sort().merge()).to_dataframe()
df2 = a.to_dataframe()
intersect_sum = (df['end'] - df['start']).sum()
genome_size = (df2['end'] - df2['start']).sum()
final_output.append((heads[i],genome_size,intersect_sum,float(intersect_sum)/genome_size))
df_final = pd.DataFrame(final_output,columns = ['fasta_head','genome_size','length_aligned','percent_aligned'])
df_final.to_csv(work_dir+'sequence_similarity.csv')
with open(work_dir+'weighted_sum.txt','w') as f:
f.write(str((df_final['percent_aligned']*df_final['genome_size']).sum()/float(df_final['genome_size'].sum())))
@format.command(name='convert_mat2R')
@click.option('-npz','--input_matrix',default='clusteringMatrix.npz',help='Input sparse matrix, scipy sparse npz format.',show_default=True, type=click.Path(exists=False))
def convert_mat2R(input_matrix):
"""Convert any sparse matrix into a format to be read by R. Can import matrix into R metagenomics clustering programs."""
from scipy.io import mmwrite
mmwrite(input_matrix.replace('.npz','.mtx'),sps.load_npz(input_matrix))
@format.command()
@click.option('-i', '--hipmer_input', default='test.txt', help = 'Input file or directory from hipmer kmer counting run.', show_default=True, type=click.Path(exists=False))
@click.option('-o', '--kcount_output', default='test.final.kcount', help = 'Output kmer count file.', show_default=True, type=click.Path(exists=False))
@click.option('-d', '--run_on_dir', is_flag=True, help='Choose to run on all files in hipmer_input if you have specified a directory for the hipmer input. Directory can only contain hipmer files.')
def hipmer_output_to_kcount(hipmer_input, kcount_output, run_on_dir):
"""Converts hipmer kmer count output into a kmer count, kcount, file."""
if run_on_dir:
hipmer_path = hipmer_input + '/'
subprocess.call("cat %s | awk '{OFS = \"\\t\"; sum=0; for (i=2; i<=7; i++) { sum+= $i }; if (sum >= 3) print $1, sum}' > %s"%(' '.join([hipmer_path+hipmer_input for hipmer_input in os.listdir(hipmer_path)]),kcount_output),shell=True)
else:
subprocess.call("cat %s | awk '{OFS = \"\\t\"; sum=0; for (i=2; i<=7; i++) { sum+= $i }; if (sum >= 3) print $1, sum}' > %s"%(hipmer_input,kcount_output),shell=True)
@format.command()
@click.option('-a', '--anchor_file', help = 'Lifted anchor file generated from basic synteny run using jcvi tools.', type=click.Path(exists=False))
@click.option('-q', '--qbed', help='First bed file.', type=click.Path(exists=False))
@click.option('-s', '--sbed', help='Second bed file.', type=click.Path(exists=False))
def anchor2bed(anchor_file, qbed, sbed):
"""Convert syntenic blocks of genes to bed coordinates between the two genomes being compared."""
with open(anchor_file,'r') as f:
anchors = f.read().split('###')
with open(qbed,'r') as f:
qbed = {}
for line in f:
if line:
lineL = line.split()
qbed[lineL[3]] = [lineL[0]] + map(int,lineL[1:3])
#print qbed
with open(sbed,'r') as f:
sbed = {}
for line in f:
if line:
lineL = line.split()
sbed[lineL[3]] = [lineL[0]] + map(int,lineL[1:3])
with open(anchor_file.replace('.lifted.anchors','.bed'),'w') as f:
for anchor in anchors:
if anchor:
#print anchor
q_coords = []
s_coords = []
for line in anchor.splitlines():
if line:
genes = line.split()[:2]
#print genes
q_coords.append(qbed[genes[0]])
s_coords.append(sbed[genes[1]])
#print q_coords
q_coords = pd.DataFrame(np.array(q_coords)).sort_values([0,1]).as_matrix()
s_coords = pd.DataFrame(np.array(s_coords)).sort_values([0,1]).as_matrix()
f.write('\t'.join(map(str,[q_coords[0,0],q_coords[:,1:].min(),q_coords[:,1:].max(), s_coords[0,0],s_coords[:,1:].min(),s_coords[:,1:].max()]))+'\n')
with open(anchor_file.replace('.lifted.anchors','.bed'),'r') as f:
links = np.array([line.split() for line in f.read().splitlines()])
colors_set = {color:i+1 for i, color in enumerate(set(links[:,0]))}
colors = pd.DataFrame(np.vectorize(lambda color: colors_set[color])(links[:,0]),columns=['Color'])
colors.to_csv('link_colors.csv',index=False)
links = pd.DataFrame(links,columns=['seg1','start1','end1','seg2','start2','end2'])
links.to_csv('links.csv',index=False)
# FIXME, need to grab correct orientation!!!
if __name__ == '__main__':
format()
```
#### File: PolyCRACKER-Unofficial-Mirror/polycracker/helper.py
```python
def compute_correlation(mat):
rowShape, columnShape = np.shape(mat)
rowCombos = permutations(range(rowShape),rowShape)
columnCombos = permutations(range(columnShape),columnShape)
print mat
maxR = []
for idx,combos in enumerate([columnCombos,rowCombos]):
for combo in combos:
if idx == 0:
matNew = mat[:, combo]
else:
matNew = mat[combo, :]
coords = []
for i in range(rowShape):
for j in range(columnShape):
if matNew[i,j] > 0:
for k in range(matNew[i,j]):
coords.append((i,j))
xy = np.array(coords)
maxR.append(abs(pearsonr(xy[:,0],xy[:,1])[0]))
return max(maxR)
def stats(arr):
return (np.mean(arr), np.std(arr), np.min(arr), np.max(arr))
def label_new_windows(work_dir, windows_bed, original_subgenomes_bed):
windows_bed = BedTool(windows_bed)
scaffolds_subgenome_bed = BedTool(original_subgenomes_bed)
labelled_bed = windows_bed.intersect(scaffolds_subgenome_bed,wa=True,wb=True).sort().merge(d=-1,c=7,o='distinct')
ambiguous_bed = windows_bed.intersect(scaffolds_subgenome_bed,wa=True,v=True)
bed_lines = []
for line in str(ambiguous_bed).splitlines():
if line:
bed_lines.append(line.split()+['ambiguous'])
for line in str(labelled_bed).splitlines():
if line:
if ',' in line:
bed_lines.append(line.split()[:-1]+['ambiguous'])
else:
bed_lines.append(line.split())
a = BedTool(bed_lines).sort()
a.saveas(work_dir+'relabelled_windows.bed')
new_labels = np.array([line.split()[-1] for line in str(a).splitlines()])
pickle.dump(new_labels,open(work_dir+'new_labels.p','wb'))
``` |
{
"source": "jlevy/ghizmo",
"score": 3
} |
#### File: ghizmo/commands/misc.py
```python
from ghizmo.commands import lib
def stale_pr_branches(config, args):
"""
List "stale" PR branches, i.e. those for a closed PR from the same, non-forked repository.
"""
repo = config.repo
for pr in repo.pull_requests(state="closed"):
if pr.head.repo == pr.base.repo and repo.branch(pr.head.ref):
yield {
"html_url": pr.html_url,
"base_branch": pr.base.ref,
"head_branch": pr.head.ref,
}
```
#### File: ghizmo/commands/team.py
```python
from ghizmo.commands import lib
def teams(config, args):
"""
List teams in a given organization.
"""
return config.github.organization(args.org_name).teams()
``` |
{
"source": "jlewi/code-intelligence",
"score": 2
} |
#### File: py/code_intelligence/github_util_test.py
```python
import logging
import pytest
from code_intelligence import github_util
def test_build_issue_doc():
result = github_util.build_issue_doc("someOrg", "someRepo", "issue title",
["line1", "line2"])
expected = """issue title
someorg_somerepo
line1
line2"""
assert result == expected
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
```
#### File: py/code_intelligence/run_with_auto_restart.py
```python
import argparse
import subprocess
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
class RestartEventHandler(LoggingEventHandler):
def __init__(self, command):
"""Create the handler.
Args:
command: The command to run.
"""
super(RestartEventHandler, self).__init__()
self._command = command
self._p = None
self.restart()
def restart(self):
if self._p:
logging.info("Terminating the current process")
self._p.terminate()
logging.info(f"Starting a proces to run command: {' '.join(self._command)}")
self._p = subprocess.Popen(self._command)
def on_any_event(self, event):
super().on_any_event(event)
self.restart()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(description="Run and auto restart.")
parser.add_argument('--directory', dest="directories",
action="append",
help="A directory to watch for changes.")
args, unparsed = parser.parse_known_args()
# Remove "--" as an argument
while True:
if unparsed[0].strip() == "--":
del unparsed[0]
continue
break
event_handler = RestartEventHandler(unparsed)
observer = Observer()
for d in args.directories:
logging.info(f"Watching {d}")
observer.schedule(event_handler, d, recursive=True)
observer.start()
try:
while True:
if event_handler._p:
if event_handler._p.poll() is not None:
# TODO(jlewi): would it be better to exit to force a container restart
logging.info("Process has terminated restarting it")
event_handler.restart()
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
```
#### File: py/label_microservice/cli.py
```python
import logging
import json
import fire
from code_intelligence import graphql
from code_intelligence import github_util
from code_intelligence import util
from google.cloud import pubsub
import subprocess
DEFAULT_TOPIC = "projects/issue-label-bot-dev/topics/TEST_event_queue"
class Cli:
@staticmethod
def get_issue(url):
"""Get the data for a specific issue.
Args:
url: URL of the issue
"""
gh_client = graphql.GraphQLClient()
result = github_util.get_issue(url, gh_client)
print(json.dumps(result, indent=4, sort_keys=True))
@staticmethod
def label_issue(issue, pubsub_topic=DEFAULT_TOPIC):
"""Label a specific issue.
Args:
issue: The issue in the form {owner}/{repo}#{issue}
pubsub_topic: (Optional) the pubsub topic to publish to. This should
be in the form projects/{project}/topics/{topic_name}
"""
publisher = pubsub.PublisherClient()
repo_owner, repo_name, issue_num = util.parse_issue_spec(issue)
if not repo_owner:
raise ValueError(f"issue={issue} didn't match regex "
f"{util.ISSUE_RE.pattern}")
# all attributes being published to pubsub must be sent as text strings
publisher.publish(pubsub_topic,
b'New issue.',
# TODO(jlewi): Does the backend depend on the client
# providing the installation id
installation_id="",
repo_owner=repo_owner,
repo_name=repo_name,
issue_num=str(issue_num))
@staticmethod
def pod_logs(pod):
"""Pretty print pod logs
Args:
pod: Name of the pod
"""
output = subprocess.check_output(["kubectl", "logs", pod])
for l in output.splitlines():
try:
entry = json.loads(l)
filename = entry.get("filename")
line = entry.get("line")
message = entry.get("message")
print(f"{filename}:{line}: {message}")
except json.JSONDecodeError:
print(l)
continue
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(message)s|%(pathname)s|%(lineno)d|'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
fire.Fire(Cli)
``` |
{
"source": "jlewi/Issue-Label-Bot",
"score": 2
} |
#### File: argo/src/preprocess.py
```python
import pandas as pd
import dask.dataframe as df
from dask_ml.preprocessing import OneHotEncoder
import numpy as np
from keras.utils.np_utils import to_categorical
import time
from sklearn.model_selection import train_test_split
from typing import Callable, List
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.sequence import pad_sequences
from dask import array as da
from textacy.preprocess import preprocess_text
import dask.multiprocessing
from pathos.multiprocessing import cpu_count
from collections import Counter
from collections import defaultdict
import h5py
start_time = time.time()
dask.config.set(scheduler='processes')
output_dir = "/data/"
base_url = 'https://storage.googleapis.com/codenet/issue_labels/'
dd = df.from_pandas(pd.concat([pd.read_csv(base_url+f'00000000000{i}.csv.gz') for i in range(10)]), npartitions=128)
print(dd.head())
def textacy_cleaner(text: str) -> str:
"""
Defines the default function for cleaning text.
This function operates over a list.
"""
return preprocess_text(text,
fix_unicode=True,
lowercase=True,
transliterate=True,
no_urls=True,
no_emails=True,
no_phone_numbers=True,
no_numbers=True,
no_currency_symbols=True,
no_punct=True,
no_contractions=False,
no_accents=True)
def process_document(doc: str) -> List[str]:
doc = text_to_word_sequence(textacy_cleaner(doc))
return ["_start_"] + doc + ["_end_"]
test_data = 'hello world 314-903-3072, <EMAIL> wee woo'
assert process_document(test_data) == ['_start_', 'hello', 'world', 'phone', 'email', 'wee', 'woo', '_end_']
bodies_parsed = dd["body"].apply(process_document)
titles_parsed = dd["title"].apply(process_document)
now = time.time() - start_time
print(f"tokenized {now}")
def to_one_hot(df):
return to_categorical(df.values, num_classes=3)
targets = dd["class_int"].to_frame().map_partitions(to_one_hot)
body_quant = int(bodies_parsed.apply(len).quantile(q=0.75).compute())
title_quant = int(titles_parsed.apply(len).quantile(q=0.75).compute())
def count_words(partition):
c = Counter()
def count(p):
c.update(p)
return c
return partition.apply(count).iloc[0]
body_counts = bodies_parsed.map_partitions(count_words).compute()
body_counts = sum(body_counts.tolist(), Counter())
title_counts = titles_parsed.map_partitions(count_words).compute()
title_counts = sum(title_counts.tolist(), Counter())
words_to_keep_body = body_counts.most_common(n=8000)
body_vocab = defaultdict(lambda: 1)
body_vocab.update({x:i+2 for i, x in enumerate([x[0] for x in words_to_keep_body])})
words_to_keep_title = title_counts.most_common(n=4500)
titles_vocab = defaultdict(lambda: 1)
titles_vocab.update({x:i+2 for i, x in enumerate([x[0] for x in words_to_keep_title])})
numer_bodies = bodies_parsed.apply(lambda x: [body_vocab[w] for w in x])
numer_titles = titles_parsed.apply(lambda x: [titles_vocab[w] for w in x])
def pad_partition(numerized_doc):
if type(numerized_doc) != list:
return
return pad_sequences([numerized_doc], maxlen=body_quant, truncating='post')[0]
processed_bodies = numer_bodies.apply(pad_partition)
processed_titles = numer_titles.apply(pad_partition)
num_titles = processed_titles.count().compute()
num_bodies = processed_bodies.count().compute()
now = time.time() - start_time
print(f"saving {now}")
processed_titles = da.stack(processed_titles.values.compute())
processed_bodies = da.stack(processed_bodies.values.compute())
f = h5py.File('/data/output.hdf5', 'w')
f.create_dataset('/titles', data=processed_titles.compute())
f.create_dataset('/bodies', data=processed_bodies.compute())
f.create_dataset('/targets', data=targets.compute())
f.close()
now = time.time() - start_time
print(f"saved {now}")
``` |
{
"source": "jlewi/metadata",
"score": 2
} |
#### File: python/tests/test_notebook.py
```python
import tempfile
import logging
import os
import papermill
logger = logging.getLogger(__name__)
FILE_DIR = os.path.dirname(__file__)
NOTEBOOK_REL_PATH = "../sample/demo.ipynb"
NOTEBOOK_ABS_PATH = os.path.normpath(os.path.join(FILE_DIR, NOTEBOOK_REL_PATH))
GRPC_HOST = "127.0.0.1"
GRPC_PORT = 8081
def test_notebook():
temp_dir = tempfile.mkdtemp()
notebook_output_path = os.path.join(temp_dir, "out.ipynb")
parameters = {
"METADATA_STORE_HOST": GRPC_HOST,
"METADATA_STORE_PORT": GRPC_PORT,
}
papermill.execute_notebook(NOTEBOOK_ABS_PATH,
notebook_output_path,
cwd=os.path.dirname(NOTEBOOK_ABS_PATH),
parameters=parameters,
log_output=True)
check_notebook_output(notebook_output_path)
def check_notebook_output(output_path):
num_cells = 0
num_completed_cells = 0
with open(output_path, 'r') as f:
for lines in f:
if lines.find('"status": "completed"') != -1:
num_completed_cells = num_completed_cells + 1
if lines.find('cell_type') != -1:
num_cells = num_cells + 1
with open(output_path, 'r') as f:
assert num_cells == num_completed_cells, "Not all cells succeeded. Notebook output:\n {}".format(
f.read())
``` |
{
"source": "jlewi/testing",
"score": 2
} |
#### File: kubeflow/testing/prow_artifacts.py
```python
import argparse
import logging
import json
import os
import time
from google.cloud import storage # pylint: disable=no-name-in-module
from kubeflow.testing import util
# TODO(jlewi): Replace create_finished in tensorflow/k8s/py/prow.py with this
# version. We should do that when we switch tensorflow/k8s to use Argo instead
# of Airflow.
def create_started():
"""Return a string containing the contents of started.json for gubernator.
"""
# See:
# https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout
# For a list of fields expected by gubernator
started = {
"timestamp": int(time.time()),
"repos": {
},
}
repo_owner = os.getenv("REPO_OWNER", "")
repo_name = os.getenv("REPO_NAME", "")
if repo_owner:
sha = os.getenv("PULL_PULL_SHA", "")
if not sha:
# Its a post submit job.
sha = os.getenv("PULL_BASE_SHA", "")
started["repos"][repo_owner + "/" + repo_name] = sha
PULL_REFS = os.getenv("PULL_REFS", "")
if PULL_REFS:
started["pull"] = PULL_REFS
return json.dumps(started)
# TODO(jlewi): Replace create_finished in tensorflow/k8s/py/prow.py with this
# version. We should do that when we switch tensorflow/k8s to use Argo instead
# of Airflow.
def create_finished(success):
"""Create a string containing the contents for finished.json.
Args:
success: Bool indicating whether the workflow succeeded or not.
"""
if success:
result = "SUCCESS"
else:
result = "FAILED"
finished = {
"timestamp": int(time.time()),
"result": result,
# Dictionary of extra key value pairs to display to the user.
# TODO(jlewi): Perhaps we should add the GCR path of the Docker image
# we are running in. We'd have to plumb this in from bootstrap.
"metadata": {},
}
return json.dumps(finished)
def get_gcs_dir(bucket):
"""Return the GCS directory for this job."""
pull_number = os.getenv("PULL_NUMBER")
repo_owner = os.getenv("REPO_OWNER")
repo_name = os.getenv("REPO_NAME")
job_name = os.getenv("JOB_NAME")
# GCS layout is defined here:
# https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout
pull_number = os.getenv("PULL_NUMBER")
repo_owner = os.getenv("REPO_OWNER")
repo_name = os.getenv("REPO_NAME")
if pull_number:
output = ("gs://{bucket}/pr-logs/pull/{owner}_{repo}/"
"{pull_number}/{job}/{build}").format(
bucket=bucket,
owner=repo_owner, repo=repo_name,
pull_number=pull_number,
job=os.getenv("JOB_NAME"),
build=os.getenv("BUILD_NUMBER"))
elif repo_owner:
# It is a postsubmit job
output = ("gs://{bucket}/logs/{owner}_{repo}/"
"{job}/{build}").format(
bucket=bucket, owner=repo_owner,
repo=repo_name, job=job_name,
build=os.getenv("BUILD_NUMBER"))
else:
# Its a periodic job
output = ("gs://{bucket}/logs/{job}/{build}").format(
bucket=bucket,
job=job_name,
build=os.getenv("BUILD_NUMBER"))
return output
def copy_artifacts(args):
"""Sync artifacts to GCS."""
job_name = os.getenv("JOB_NAME")
# GCS layout is defined here:
# https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout
pull_number = os.getenv("PULL_NUMBER")
repo_owner = os.getenv("REPO_OWNER")
repo_name = os.getenv("REPO_NAME")
output = get_gcs_dir(args.bucket)
if os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
logging.info("GOOGLE_APPLICATION_CREDENTIALS is set; configuring gcloud "
"to use service account.")
# Since a service account is set tell gcloud to use it.
util.run(["gcloud", "auth", "activate-service-account", "--key-file=" +
os.getenv("GOOGLE_APPLICATION_CREDENTIALS")])
util.run(["gsutil", "-m", "rsync", "-r", args.artifacts_dir, output])
def create_pr_symlink(args):
"""Create a 'symlink' in GCS pointing at the results for a PR.
This is a null op if PROW environment variables indicate this is not a PR
job.
"""
gcs_client = storage.Client()
# GCS layout is defined here:
# https://github.com/kubernetes/test-infra/tree/master/gubernator#job-artifact-gcs-layout
pull_number = os.getenv("PULL_NUMBER")
if not pull_number:
# Symlinks are only created for pull requests.
return ""
path = "pr-logs/directory/{job}/{build}.txt".format(
job=os.getenv("JOB_NAME"), build=os.getenv("BUILD_NUMBER"))
pull_number = os.getenv("PULL_NUMBER")
repo_owner = os.getenv("REPO_OWNER")
repo_name = os.getenv("REPO_NAME")
build_dir = ("gs://{bucket}/pr-logs/pull/{owner}_{repo}/"
"{pull_number}/{job}/{build}").format(
bucket=args.bucket,
owner=repo_owner, repo=repo_name,
pull_number=pull_number,
job=os.getenv("JOB_NAME"),
build=os.getenv("BUILD_NUMBER"))
source = util.to_gcs_uri(args.bucket, path)
target = get_gcs_dir(args.bucket)
logging.info("Creating symlink %s pointing to %s", source, target)
bucket = gcs_client.get_bucket(args.bucket)
blob = bucket.blob(path)
blob.upload_from_string(target)
def main(unparsed_args=None): # pylint: disable=too-many-locals
logging.getLogger().setLevel(logging.INFO) # pylint: disable=too-many-locals
# create the top-level parser
parser = argparse.ArgumentParser(
description="Create prow artifacts.")
parser.add_argument(
"--artifacts_dir",
default="",
type=str,
help="Directory to use for all the gubernator artifacts.")
subparsers = parser.add_subparsers()
#############################################################################
# Copy artifacts.
parser_copy = subparsers.add_parser(
"copy_artifacts", help="Copy the artifacts.")
parser_copy.add_argument(
"--bucket",
default="",
type=str,
help="Bucket to copy the artifacts to.")
parser_copy.set_defaults(func=copy_artifacts)
#############################################################################
# Create the pr symlink.
parser_link = subparsers.add_parser(
"create_pr_symlink", help="Create a symlink pointing at PR output dir; null "
"op if prow job is not a presubmit job.")
parser_link.add_argument(
"--bucket",
default="",
type=str,
help="Bucket to copy the artifacts to.")
parser_link.set_defaults(func=create_pr_symlink)
#############################################################################
# Process the command line arguments.
# Parse the args
args = parser.parse_args(args=unparsed_args)
# Setup a logging file handler. This way we can upload the log outputs
# to gubernator.
root_logger = logging.getLogger()
test_log = os.path.join(os.path.join(args.artifacts_dir, "artifacts"),
"logs", "prow_artifacts." + args.func.__name__ +
".log")
if not os.path.exists(os.path.dirname(test_log)):
os.makedirs(os.path.dirname(test_log))
file_handler = logging.FileHandler(test_log)
root_logger.addHandler(file_handler)
# We need to explicitly set the formatter because it will not pick up
# the BasicConfig.
formatter = logging.Formatter(fmt=("%(levelname)s|%(asctime)s"
"|%(pathname)s|%(lineno)d| %(message)s"),
datefmt="%Y-%m-%dT%H:%M:%S")
file_handler.setFormatter(formatter)
logging.info("Logging to %s", test_log)
args.func(args)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
main()
``` |
{
"source": "JlexZhong/ZDEM_View",
"score": 2
} |
#### File: ZDEM_View/model/io_plot.py
```python
import io
import os
import sys
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
import cv2
from PIL import Image
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtWidgets import QApplication, QWidget
import PyQt5.QtWidgets as QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.lines import Line2D
from matplotlib.ticker import FuncFormatter, FormatStrFormatter
import numpy as np
from matplotlib.figure import Figure
from matplotlib.patches import Circle
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import pyqtgraph as pg
from pyqtgraph.functions import mkBrush
class MatplotlibFigure(FigureCanvasQTAgg):
"""
创建一个画布类,并把画布放到FigureCanvasQTAgg
"""
def __init__(self, parent=None):
"""
:param parent:
:param filePrefix:
"""
self.figs = Figure(figsize=(10, 8), dpi=300)
super(MatplotlibFigure, self).__init__(self.figs) # 在父类中激活self.fig
self.setParent(parent)
self.axes = self.figs.add_subplot(111)
filepath = "E:\\Study\\Data_Visualization ui_pyqt5\\Data_Visualization\\V2.0\\example\\easyData\\all_0000003600.dat"
self.mpl_plot(filepath, xmove=-1000, ymove=-1000)
FigureCanvasQTAgg.setSizePolicy(
self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# 用于告知包含该widget的layout:该widget的size hint已发生变化,layout会自动进行调整。
FigureCanvasQTAgg.updateGeometry(self)
def readData(self,filepath=None, xmove=0, ymove=0):
"""读取.dat格式文件内容
"""
flag = 0
WALL = []
BALL = []
CurrentStep = 0
BallNum = 0
ZDEM_File = open(filepath, 'r')
for line in ZDEM_File: # 逐行读取文件
if "current_step" in line: # 当前所在步数
step = line.split() # 将该行(list)以空格“ ”进行切片
CurrentStep = step[-1] # 取step的最后一个元素作为步数
if "ball num" in line: # 获取颗粒个数
ball_num = line.split()
BallNum = ball_num[-1]
if " index id P1[0]" in line: # 标记wall数据开始
flag = 1
continue
if " index id xF" in line: # 当读取到此行时,含wall坐标的数据结束
flag = 0
if " index id x" in line: # 标记ball数据开始
flag = 2
continue
if " index id m" in line: # 当读取到此行时,含ball坐标的数据结束
flag = 0
if flag == 0:
continue
if flag == 1:
wall = line.split() # 将该行(list)以空格“ ”进行切片
# 读取第3到第6列,并用for循环把字符串转变为浮点型
wall = [float(i) for i in wall[2:6]]
WALL.append(wall) # wall两点p1、p2的x、y坐标
if flag == 2:
ball = line.split() # 将该行(list)以空格“ ”进行切片
ball = [float(i) for i in ball[2:6]]
BALL.append(ball) # ball的x、y坐标以及半径、颜色
del WALL[-1] # 删除最后的空行
del BALL[-1] # 删除最后的空行
ZDEM_File.close() # 关闭对象,避免占用过多资源
# self.updata_progressbar_signal.emit(0)
WALL, BALL, CurrentStep = np.array(
WALL), np.array(BALL), np.array(CurrentStep)
# ZDEM颜色的RGB列表
ZDEMColor_num = np.array([[0.85, 0.85, 0.85],
[0.00, 1.00, 0.00],
[1.00, 1.00, 0.00],
[1.00, 0.00, 0.00],
[0.90, 0.90, 0.90],
[0.15, 0.15, 0.15],
[0.50, 0.50, 0.50],
[0.00, 0.00, 1.00],
[0.00, 1.00, 1.00],
[1.00, 0.00, 1.00]])
ZDEMColor_code = ['#D9D9D9',
'#00FF00',
'#FFFF00',
'#FF0000',
'#F5F5F5',
'#262626',
'#808080',
'#0000FF',
'#00FFFF',
'#FF00FF']
# 读取数组对应需要的元素
BALL_x = BALL[:, 0]
BALL_y = BALL[:, 1]
BALL_r = BALL[:, 2]
BALL_c = BALL[:, 3]
WALL_P1_x = WALL[4:7, 0]
WALL_P1_y = WALL[4:7, 1]
WALL_P2_x = WALL[4:7, 2]
WALL_P2_y = WALL[4:7, 3]
# 进行偏移量修改
for i in range(len(WALL_P1_x)):
WALL_P1_x[i] = WALL_P1_x[i] + xmove
WALL_P1_y[i] = WALL_P1_y[i] + ymove
WALL_P2_x[i] = WALL_P2_x[i] + xmove
WALL_P2_y[i] = WALL_P2_y[i] + ymove
for i in range(len(BALL_x)):
BALL_x[i] = BALL_x[i] + xmove
BALL_y[i] = BALL_y[i] + ymove
# begin_plot_signal.emit(id) # 发送信号:开始绘图
ball_c = [] # 颜色 #xxxxxx格式
for i in range(len(BALL_x)):
color_num = BALL_c[i]
color_num = int(color_num)
ballColor = ZDEMColor_num[color_num]
ball_c.append(ballColor)
return BALL_x, BALL_y, BALL_r, ball_c, WALL_P1_x, WALL_P1_y, WALL_P2_x, WALL_P2_y
def mpl_plot(self,filepath=None, xmove=0, ymove=0):
BALL_x, BALL_y, BALL_r, BALL_c, WALL_P1_x, WALL_P1_y, WALL_P2_x, WALL_P2_y = self.readData(filepath, xmove, ymove)
self.axes.cla()
for i in range(len(BALL_x)):
ball_x = BALL_x[i]
ball_y = BALL_y[i]
ball_r = BALL_r[i]
ball_c = BALL_c[i]
# 绘图
cir = Circle(xy=(ball_x, ball_y),
radius=ball_r, facecolor=ball_c)
self.axes.add_patch(cir)
print('ball id:', i)
self.draw()
class MplWidget(QWidget):
"""Qt控件,用于嵌入matplotlib画布和工具栏
Args:
QWidget ([type]): [description]
"""
def __init__(self, parent=None):
"""
:param parent:
"""
QWidget.__init__(self, parent)
self.qCanvas = MatplotlibFigure(parent)
self.mpl_toolbar = NavigationToolbar(self.qCanvas, self) # 创建工具条
# 创建布局,把画布类组件对象和工具条对象添加到QWidget控件中
self.vbl = QtWidgets.QVBoxLayout(self)
self.vbl.addWidget(self.qCanvas)
self.vbl.addWidget(self.mpl_toolbar)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = MplWidget()
ui.show()
sys.exit(app.exec_())
```
#### File: ZDEM_View/model/plot.py
```python
import io
import os
import cv2
from PIL import Image
from PyQt5.QtCore import pyqtSignal, QObject
from matplotlib.lines import Line2D
from matplotlib.ticker import FuncFormatter, FormatStrFormatter
import numpy as np
from matplotlib.patches import Circle
from matplotlib.ticker import MultipleLocator
import pyqtgraph as pg
from pyqtgraph.functions import mkBrush
"""
读取和绘图文件,负责读取文件和图像的绘制
"""
class _plot(object):
updata_progressbar_signal = pyqtSignal(int)
def __init__(self):
# self.wMain = myUi
super().__init__()
self.WALL = []
self.BALL = []
self.CurrentStep = 0
self.BallNum = 0
self.xmove = 0
self.ymove = 0
self.wallShow = "true"
self.units = 1
self.canvasObj = None
self.pagesize = 14
def readData(self, filepath):
filename = os.path.split(filepath)[1]
self.filePrefix = os.path.splitext(filename)[0]
flag = 0
self.WALL = []
self.BALL = []
self.CurrentStep = 0
self.BallNum = 0
ZDEM_File = open(filepath, 'r')
for line in ZDEM_File: # 逐行读取文件
if "current_step" in line: # 当前所在步数
step = line.split() # 将该行(list)以空格“ ”进行切片
self.CurrentStep = step[-1] # 取step的最后一个元素作为步数
if "ball num" in line: # 获取颗粒个数
ball_num = line.split()
self.BallNum = ball_num[-1]
if " index id P1[0]" in line: # 标记wall数据开始
flag = 1
continue
if " index id xF" in line: # 当读取到此行时,含wall坐标的数据结束
flag = 0
if " index id x" in line: # 标记ball数据开始
flag = 2
continue
if " index id m" in line: # 当读取到此行时,含ball坐标的数据结束
flag = 0
if flag == 0:
continue
if flag == 1:
wall = line.split() # 将该行(list)以空格“ ”进行切片
# 读取第3到第6列,并用for循环把字符串转变为浮点型
wall = [float(i) for i in wall[2:6]]
self.WALL.append(wall) # wall两点p1、p2的x、y坐标
if flag == 2:
ball = line.split() # 将该行(list)以空格“ ”进行切片
ball = [float(i) for i in ball[2:6]]
self.BALL.append(ball) # ball的x、y坐标以及半径、颜色
del self.WALL[-1] # 删除最后的空行
del self.BALL[-1] # 删除最后的空行
ZDEM_File.close() # 关闭对象,避免占用过多资源
self.updata_progressbar_signal.emit(0)
def plotJPG(self, canvasObj):
self.canvasObj = canvasObj
self.xmove = 0
self.ymove = 0
self.wallShow = 'true'
self.units = 1
self.canvasObj.axes.cla() # 清理画布后必须重新添加绘图区
# 转换为numpy数组,便于处理
self.WALL, self.BALL, self.CurrentStep = np.array(
self.WALL), np.array(self.BALL), np.array(self.CurrentStep)
# ZDEM颜色的RGB列表
Color = np.array([[0.85, 0.85, 0.85],
[0.00, 1.00, 0.00],
[1.00, 1.00, 0.00],
[1.00, 0.00, 0.00],
[0.90, 0.90, 0.90],
[0.15, 0.15, 0.15],
[0.50, 0.50, 0.50],
[0.00, 0.00, 1.00],
[0.00, 1.00, 1.00],
[1.00, 0.00, 1.00]])
ZDEM_color = ['#D9D9D9',
'#00FF00',
'#FFFF00',
'#FF0000',
'#F5F5F5',
'#262626',
'#808080',
'#0000FF',
'#00FFFF',
'#FF00FF']
# 读取数组对应需要的元素
BALL_x = self.BALL[:, 0]
BALL_y = self.BALL[:, 1]
BALL_r = self.BALL[:, 2]
BALL_c = self.BALL[:, 3]
WALL_P1_x = self.WALL[4:7, 0]
WALL_P1_y = self.WALL[4:7, 1]
WALL_P2_x = self.WALL[4:7, 2]
WALL_P2_y = self.WALL[4:7, 3]
# 获取x、y轴偏移量,并对ball、wall的坐标修改
# xmove = min(min(WALL_P1_x),min(WALL_P2_x))
# ymove = min(min(WALL_P1_y),min(WALL_P2_y))
for i in range(len(WALL_P1_x)):
WALL_P1_x[i] = WALL_P1_x[i] + self.xmove
WALL_P1_y[i] = WALL_P1_y[i] + self.ymove
WALL_P2_x[i] = WALL_P2_x[i] + self.xmove
WALL_P2_y[i] = WALL_P2_y[i] + self.ymove
for i in range(len(BALL_x)):
BALL_x[i] = BALL_x[i] + self.xmove
BALL_y[i] = BALL_y[i] + self.ymove
# 通过循环获取每个颗粒的坐标和半径、颜色,用matplotlib库的Circle绘制颗粒
# 先设置坐标轴max、min
b_xmax = max(BALL_x)
b_ymax = max(BALL_y)
w_p1_xmax = max(WALL_P1_x)
w_p2_xmax = max(WALL_P2_x)
w_p1_ymax = max(WALL_P1_y)
w_p2_ymax = max(WALL_P2_y)
w_xmax = max(w_p1_xmax, w_p2_xmax)
w_ymax = max(w_p1_ymax, w_p2_ymax)
xmax = max(b_xmax, w_xmax)
ymax = max(b_ymax, w_ymax)
b_xmin = min(BALL_x)
b_ymin = min(BALL_y)
w_p1_xmin = min(WALL_P1_x)
w_p2_xmin = min(WALL_P2_x)
w_p1_ymin = min(WALL_P1_y)
w_p2_ymin = min(WALL_P2_y)
w_xmin = min(w_p1_xmin, w_p2_xmin)
w_ymin = min(w_p1_ymin, w_p2_ymin)
xmin = min(b_xmin, w_xmin)
ymin = min(b_ymin, w_ymin)
self.canvasObj.axes.set_xlim(0, xmax)
self.canvasObj.axes.set_ylim(0, ymax)
ball_c = []
for i in range(len(BALL_x)):
color_num = BALL_c[i]
color_num = int(color_num)
ballColor = ZDEM_color[color_num]
ball_c.append(ballColor)
ballNUM = len(BALL_r)
# 绘制点图,转换坐标
rr_pix = (self.canvasObj.axes.transData.transform(np.vstack([BALL_r, BALL_r]).T) -
self.canvasObj.axes.transData.transform(np.vstack([np.zeros(ballNUM), np.zeros(ballNUM)]).T))
rpix, _ = rr_pix.T
size_pt = (2 * rpix / self.canvasObj.figs.dpi * 72) ** 2
scat = self.canvasObj.axes.scatter(BALL_x, BALL_y, s=size_pt, c=ball_c)
for i in range(len(BALL_x)):
ball_x = BALL_x[i]
ball_y = BALL_y[i]
ball_r = BALL_r[i]
# .dat文件的颜色为0-7,要对应到相应的RGB值
# color_num = BALL_c[i]
# color_num = int(color_num)
# ball_c = Color[color_num]
# 绘图
# cir = Circle(xy=(ball_x, ball_y), radius=ball_r, facecolor=ball_c)
# self.canvasObj.axes.add_patch(cir)
BallNum_45 = (int(self.BallNum) // 45)
for n in range(45):
if i == (BallNum_45 * (n + 1)):
self.updata_progressbar_signal.emit(1)
n = 0
if (self.wallShow == 'true'):
for n in range(len(WALL_P1_x)):
p1x = WALL_P1_x[n]
p1y = WALL_P1_y[n]
p2x = WALL_P2_x[n]
p2y = WALL_P2_y[n]
p12x = [p1x, p2x]
p12y = [p1y, p2y]
self.canvasObj.axes.plot(p12x, p12y, c='k')
line1 = [(p1x, p1y), (p2x, p2y)]
(line1_xs, line1_ys) = zip(*line1)
# 创建两条线,并添加
self.canvasObj.axes.add_line(
Line2D(line1_xs, line1_ys, linewidth=1, color='black'))
self.updata_progressbar_signal.emit(2)
# plt.plot(VBOXx,VBOXy,'.')
"""for i in range(len(wp1x)):
# 两条line的数据
line1 = [(wp1x[i], wp1y[i]), (wp2x[i], wp2y[i])]
(line1_xs, line1_ys) = zip(*line1)
# 创建两条线,并添加
self.canvasObj.axes.add_line(Line2D(line1_xs, line1_ys, linewidth=1, color='black'))"""
self.canvasObj.axes.axis('scaled')
def unitsformat(x, pos):
return '{:n}'.format(x / self.units)
xmajorformatter = FuncFormatter(unitsformat)
self.canvasObj.axes.xaxis.set_major_formatter(xmajorformatter)
ymajorformatter = FuncFormatter(unitsformat)
self.canvasObj.axes.yaxis.set_major_formatter(ymajorformatter)
# 修改次刻度
xminorLocator = MultipleLocator(1000)
yminorLocator = MultipleLocator(1000)
self.canvasObj.axes.xaxis.set_minor_locator(xminorLocator)
self.canvasObj.axes.yaxis.set_minor_locator(yminorLocator)
'''
b_xmax = BALL_x[0]
for i in range(len(BALL_x)):
b_xmax = max(BALL_x[i],b_xmax)
'''
b_xmax = max(BALL_x)
b_ymax = max(BALL_y)
w_p1_xmax = max(WALL_P1_x)
w_p2_xmax = max(WALL_P2_x)
w_p1_ymax = max(WALL_P1_y)
w_p2_ymax = max(WALL_P2_y)
w_xmax = max(w_p1_xmax, w_p2_xmax)
w_ymax = max(w_p1_ymax, w_p2_ymax)
xmax = max(b_xmax, w_xmax)
ymax = max(b_ymax, w_ymax)
b_xmin = min(BALL_x)
b_ymin = min(BALL_y)
w_p1_xmin = min(WALL_P1_x)
w_p2_xmin = min(WALL_P2_x)
w_p1_ymin = min(WALL_P1_y)
w_p2_ymin = min(WALL_P2_y)
w_xmin = min(w_p1_xmin, w_p2_xmin)
w_ymin = min(w_p1_ymin, w_p2_ymin)
xmin = min(b_xmin, w_xmin)
ymin = min(b_ymin, w_ymin)
wi = xmax - xmin
hi = ymax - ymin
wcm = self.pagesize
winch = wcm / 2.54
hinch = winch / wi * hi
self.canvasObj.axes.set_xlim(0, xmax)
self.canvasObj.axes.set_ylim(0, ymax)
self.canvasObj.figs.set_size_inches(w=winch, h=hinch)
self.canvasObj.figs.canvas.draw() # 这里注意是画布重绘,figs.canvas
self.canvasObj.figs.canvas.flush_events() # 画布刷新self.figs.canvas
self.canvasObj.figs.savefig(
"./temp save files/" + self.filePrefix + ".jpg", dpi=100, bbox_inches="tight")
self.updata_progressbar_signal.emit(3)
class Plot(QObject):
updata_progressbar_signal = pyqtSignal(int)
updata_canvas_signal = pyqtSignal(int)
begin_plot_signal = pyqtSignal(int)
def __init__(self, param_list, canvasObj, filepath):
super().__init__()
self.canvasObj = canvasObj
self.param_list = param_list
self.filepath = filepath
# 绘图参数
self.xmove = self.param_list[0]
self.ymove = self.param_list[1]
self.xmin = self.param_list[2]
self.xmax = self.param_list[3]
self.ymin = self.param_list[4]
self.ymax = self.param_list[5]
self.ballStyle = self.param_list[6]
self.wallShow = self.param_list[7]
self.wallLineSize = self.param_list[8]
self.colorStyle = self.param_list[9]
self.titleText = self.param_list[10]
self.titleTextFontSize = self.param_list[11]
self.xText = self.param_list[12]
self.xTextFontSize = self.param_list[13]
self.yText = self.param_list[14]
self.yTextFontSize = self.param_list[15]
self.mainTickInterval = self.param_list[16]
self.minorTickInterval = self.param_list[17]
self.isShowTop = self.param_list[18]
self.isShowBottom = self.param_list[19]
self.isShowLeft = self.param_list[20]
self.isShowRight = self.param_list[21]
self.units = self.param_list[22]
# 图片尺寸
self.pagesize = 14
def readData(self):
filename = os.path.split(self.filepath)[1]
self.filePrefix = os.path.splitext(filename)[0]
flag = 0
self.WALL = []
self.BALL = []
self.CurrentStep = 0
self.BallNum = 0
ZDEM_File = open(self.filepath, 'r')
for line in ZDEM_File: # 逐行读取文件
if "current_step" in line: # 当前所在步数
step = line.split() # 将该行(list)以空格“ ”进行切片
self.CurrentStep = step[-1] # 取step的最后一个元素作为步数
if "ball num" in line: # 获取颗粒个数
ball_num = line.split()
self.BallNum = ball_num[-1]
if " index id P1[0]" in line: # 标记wall数据开始
flag = 1
continue
if " index id xF" in line: # 当读取到此行时,含wall坐标的数据结束
flag = 0
if " index id x" in line: # 标记ball数据开始
flag = 2
continue
if " index id m" in line: # 当读取到此行时,含ball坐标的数据结束
flag = 0
if flag == 0:
continue
if flag == 1:
wall = line.split() # 将该行(list)以空格“ ”进行切片
# 读取第3到第6列,并用for循环把字符串转变为浮点型
wall = [float(i) for i in wall[2:6]]
self.WALL.append(wall) # wall两点p1、p2的x、y坐标
if flag == 2:
ball = line.split() # 将该行(list)以空格“ ”进行切片
ball = [float(i) for i in ball[2:6]]
self.BALL.append(ball) # ball的x、y坐标以及半径、颜色
del self.WALL[-1] # 删除最后的空行
del self.BALL[-1] # 删除最后的空行
ZDEM_File.close() # 关闭对象,避免占用过多资源
# self.updata_progressbar_signal.emit(0)
def plotJPG(self, id):
# 初始化
self.canvasObj.qCanvas.axes.clear() # 清理画布
# 转换为numpy数组
self.WALL, self.BALL, self.CurrentStep = np.array(
self.WALL), np.array(self.BALL), np.array(self.CurrentStep)
# ZDEM颜色的RGB列表
ZDEMColor_num = np.array([[0.85, 0.85, 0.85],
[0.00, 1.00, 0.00],
[1.00, 1.00, 0.00],
[1.00, 0.00, 0.00],
[0.90, 0.90, 0.90],
[0.15, 0.15, 0.15],
[0.50, 0.50, 0.50],
[0.00, 0.00, 1.00],
[0.00, 1.00, 1.00],
[1.00, 0.00, 1.00]])
ZDEMColor_code = ['#D9D9D9',
'#00FF00',
'#FFFF00',
'#FF0000',
'#F5F5F5',
'#262626',
'#808080',
'#0000FF',
'#00FFFF',
'#FF00FF']
# 读取数组对应需要的元素
self.BALL_x = self.BALL[:, 0]
self.BALL_y = self.BALL[:, 1]
self.BALL_r = self.BALL[:, 2]
self.BALL_c = self.BALL[:, 3]
self.WALL_P1_x = self.WALL[4:7, 0]
self.WALL_P1_y = self.WALL[4:7, 1]
self.WALL_P2_x = self.WALL[4:7, 2]
self.WALL_P2_y = self.WALL[4:7, 3]
# 进行偏移量修改
for i in range(len(self.WALL_P1_x)):
self.WALL_P1_x[i] = self.WALL_P1_x[i] + self.xmove
self.WALL_P1_y[i] = self.WALL_P1_y[i] + self.ymove
self.WALL_P2_x[i] = self.WALL_P2_x[i] + self.xmove
self.WALL_P2_y[i] = self.WALL_P2_y[i] + self.ymove
for i in range(len(self.BALL_x)):
self.BALL_x[i] = self.BALL_x[i] + self.xmove
self.BALL_y[i] = self.BALL_y[i] + self.ymove
self.plot_axis()
self.begin_plot_signal.emit(id) # 发送信号:开始绘图
ball_c = [] # 颜色 #xxxxxx格式
for i in range(len(self.BALL_x)):
color_num = self.BALL_c[i]
color_num = int(color_num)
ballColor = ZDEMColor_code[color_num]
ball_c.append(ballColor)
if self.ballStyle == 'point': # 绘制散点图
ballNUM = len(self.BALL_r)
# 绘制点图,转换坐标数据
rr_pix = (self.canvasObj.qCanvas.axes.transData.transform(np.vstack([self.BALL_r, self.BALL_r]).T) -
self.canvasObj.qCanvas.axes.transData.transform(
np.vstack([np.zeros(ballNUM), np.zeros(ballNUM)]).T))
rpix, _ = rr_pix.T
size_pt = (rpix / self.canvasObj.qCanvas.figs.dpi * 72) ** 2
scat = self.canvasObj.qCanvas.axes.scatter(
self.BALL_x, self.BALL_y, s=size_pt, c=ball_c)
if self.ballStyle == 'circle': # 绘制二维圆图
for i in range(len(self.BALL_x)):
ball_x = self.BALL_x[i]
ball_y = self.BALL_y[i]
ball_r = self.BALL_r[i]
# .dat文件的颜色为0-7,要对应到相应的RGB值
color_num = self.BALL_c[i]
color_num = int(color_num)
ball_c = ZDEMColor_num[color_num]
# 绘图
cir = Circle(xy=(ball_x, ball_y),
radius=ball_r, facecolor=ball_c)
self.canvasObj.qCanvas.axes.add_patch(cir)
if (self.wallShow == True):
for n in range(len(self.WALL_P1_x)):
p1x = self.WALL_P1_x[n]
p1y = self.WALL_P1_y[n]
p2x = self.WALL_P2_x[n]
p2y = self.WALL_P2_y[n]
p12x = [p1x, p2x]
p12y = [p1y, p2y]
line1 = [(p1x, p1y), (p2x, p2y)]
(line1_xs, line1_ys) = zip(*line1)
# 创建两条线,并添加
self.canvasObj.qCanvas.axes.add_line(
Line2D(line1_xs, line1_ys, linewidth=self.wallLineSize, color='black'))
# self.canvasObj.qCanvas.axes.margins(0,0)
# self.canvasObj.qCanvas.axes.tick_params(which='both', width=0.5, pad=1)
# self.canvasObj.qCanvas.axes.set_xlim(xmin, xmax)
# self.canvasObj.qCanvas.axes.set_ylim(xmin, ymax)
# self.canvasObj.qCanvas.axes.tick_params(bottom=True, top=False, width=1, colors='black')
# self.canvasObj.qCanvas.axes.tick_params(left=True, right=False, width=1, colors='black')
# self.canvasObj.qCanvas.axes.tick_params(top='off',bottom='on',left='on',right='off')
# top和right轴标签默认不显示
# self.canvasObj.qCanvas.axes.tick_params(bottom=False,top = False,left=True, right=False)
self.updata_canvas_signal.emit(id)
self.canvasObj.qCanvas.figs.canvas.draw_idle()
# self.canvasObj.qCanvas.figs.canvas.draw()
# self.canvasObj.qCanvas.figs.canvas.flush_events() # 画布刷新self.figs.canvas
# self.canvasObj.qCanvas.figs.savefig("./temp save files/"+self.filePrefix+".png",dpi=100,bbox_inches="tight")
def plot_axis(self):
"""
BUG: 绘图数过多时报错Traceback (most recent call last):
File "e:\Study\Data_Visualization ui_pyqt5\Data_Visualization\V2.0\model\Thread.py", line 145, in run
self.plotObj_test.plotJPG(self.id )
File "e:\Study\Data_Visualization ui_pyqt5\Data_Visualization\V2.0\model\plot.py", line 408, in plotJPG
self.plot_axis()
File "e:\Study\Data_Visualization ui_pyqt5\Data_Visualization\V2.0\model\plot.py", line 479, in plot_axis
w_p1_xmax = max(self.WALL_P1_x)
ValueError: max() arg is an empty sequence
"""
# 坐标轴
if (self.xmin == None) & (self.xmax == None) & (self.ymin == None) & (self.ymax == None): # 若未传入min、max参数,进行计算
# 计算xmax、ymax
b_xmax = max(self.BALL_x)
b_ymax = max(self.BALL_y)
w_p1_xmax = max(self.WALL_P1_x)
w_p2_xmax = max(self.WALL_P2_x)
w_p1_ymax = max(self.WALL_P1_y)
w_p2_ymax = max(self.WALL_P2_y)
w_xmax = max(w_p1_xmax, w_p2_xmax)
w_ymax = max(w_p1_ymax, w_p2_ymax)
self.xmax = max(b_xmax, w_xmax)
self.ymax = max(b_ymax, w_ymax)
# 计算xmin、ymin
b_xmin = min(self.BALL_x)
b_ymin = min(self.BALL_y)
w_p1_xmin = min(self.WALL_P1_x)
w_p2_xmin = min(self.WALL_P2_x)
w_p1_ymin = min(self.WALL_P1_y)
w_p2_ymin = min(self.WALL_P2_y)
w_xmin = min(w_p1_xmin, w_p2_xmin)
w_ymin = min(w_p1_ymin, w_p2_ymin)
self.xmin = min(b_xmin, w_xmin)
self.ymin = min(b_ymin, w_ymin)
# 坐标轴等比例缩放
self.canvasObj.qCanvas.axes.axis('scaled')
# 设置 x轴、y轴范围
self.canvasObj.qCanvas.axes.set_xlim(self.xmin, self.xmax)
self.canvasObj.qCanvas.axes.set_ylim(self.xmin, self.ymax)
# 单位
def unitsformat(x, pos):
return '{:n}'.format(x / self.units)
xmajorformatter = FuncFormatter(unitsformat)
self.canvasObj.qCanvas.axes.xaxis.set_major_formatter(xmajorformatter)
ymajorformatter = FuncFormatter(unitsformat)
self.canvasObj.qCanvas.axes.yaxis.set_major_formatter(ymajorformatter)
# 修改主刻度
xmajorLocator = MultipleLocator(
self.mainTickInterval) # 将x主刻度标签设置为20的倍数
# xmajorFormatter = FormatStrFormatter('%5.1f') # 设置x轴标签文本的格式
ymajorLocator = MultipleLocator(
self.mainTickInterval) # 将y轴主刻度标签设置为0.5的倍数
# ymajorFormatter = FormatStrFormatter('%1.1f') # 设置y轴标签文本的格式
# 设置主刻度标签的位置,标签文本的格式
self.canvasObj.qCanvas.axes.xaxis.set_major_locator(xmajorLocator)
# self.canvasObj.qCanvas.axes.xaxis.set_major_formatter(xmajorFormatter)
self.canvasObj.qCanvas.axes.yaxis.set_major_locator(ymajorLocator)
# self.canvasObj.qCanvas.axes.yaxis.set_major_formatter(ymajorFormatter)
# 修改次刻度
xminorLocator = MultipleLocator(self.minorTickInterval)
yminorLocator = MultipleLocator(self.minorTickInterval)
self.canvasObj.qCanvas.axes.xaxis.set_minor_locator(xminorLocator)
self.canvasObj.qCanvas.axes.yaxis.set_minor_locator(yminorLocator)
# 设置标签label的字体大小
self.canvasObj.qCanvas.axes.tick_params(
axis='x', labelsize=self.xTextFontSize)
self.canvasObj.qCanvas.axes.tick_params(
axis='y', labelsize=self.yTextFontSize)
# 坐标轴边框显示/隐藏
self.canvasObj.qCanvas.axes.spines['top'].set_visible(self.isShowTop)
self.canvasObj.qCanvas.axes.spines['right'].set_visible(
self.isShowRight)
self.canvasObj.qCanvas.axes.spines['bottom'].set_visible(
self.isShowBottom)
self.canvasObj.qCanvas.axes.spines['left'].set_visible(self.isShowLeft)
# 计算图片尺寸
wi = self.xmax - self.xmin
hi = self.ymax - self.ymin
wcm = self.pagesize
winch = wcm / 2.54
hinch = (winch) / wi * hi
self.canvasObj.qCanvas.figs.set_size_inches(w=winch, h=hinch)
class pg_plot(QObject):
updata_progressbar_signal = pyqtSignal(int)
updata_canvas_signal = pyqtSignal(int)
begin_plot_signal = pyqtSignal(int)
updata_pg_circle_signal = pyqtSignal(list)
updata_pg_wall_signal = pyqtSignal(list)
def __init__(self, param_list, plot_widget, filepath):
super().__init__()
self.plot_widget = plot_widget
self.param_list = param_list
self.filepath = filepath
# 绘图参数
self.xmove = self.param_list[0]
self.ymove = self.param_list[1]
self.xmin = self.param_list[2]
self.xmax = self.param_list[3]
self.ymin = self.param_list[4]
self.ymax = self.param_list[5]
self.ballStyle = self.param_list[6]
self.wallShow = self.param_list[7]
self.wallLineSize = self.param_list[8]
self.colorStyle = self.param_list[9]
self.titleText = self.param_list[10]
self.titleTextFontSize = self.param_list[11]
self.xText = self.param_list[12]
self.xTextFontSize = self.param_list[13]
self.yText = self.param_list[14]
self.yTextFontSize = self.param_list[15]
self.mainTickInterval = self.param_list[16]
self.minorTickInterval = self.param_list[17]
self.isShowTop = self.param_list[18]
self.isShowBottom = self.param_list[19]
self.isShowLeft = self.param_list[20]
self.isShowRight = self.param_list[21]
self.units = self.param_list[22]
# 图片尺寸
self.pagesize = 14
def readData(self):
filename = os.path.split(self.filepath)[1]
self.filePrefix = os.path.splitext(filename)[0]
flag = 0
self.WALL = []
self.BALL = []
self.CurrentStep = 0
self.BallNum = 0
""
ZDEM_File = open(self.filepath, 'r')
for line in ZDEM_File: # 逐行读取文件
if "current_step" in line: # 当前所在步数
step = line.split() # 将该行(list)以空格“ ”进行切片
self.CurrentStep = step[-1] # 取step的最后一个元素作为步数
if "ball num" in line: # 获取颗粒个数
ball_num = line.split()
self.BallNum = ball_num[-1]
if " index id P1[0]" in line: # 标记wall数据开始
flag = 1
continue
if " index id xF" in line: # 当读取到此行时,含wall坐标的数据结束
flag = 0
if " index id x" in line: # 标记ball数据开始
flag = 2
continue
if " index id m" in line: # 当读取到此行时,含ball坐标的数据结束
flag = 0
if flag == 0:
continue
if flag == 1:
wall = line.split() # 将该行(list)以空格“ ”进行切片
# 读取第3到第6列,并用for循环把字符串转变为浮点型
wall = [float(i) for i in wall[2:6]]
self.WALL.append(wall) # wall两点p1、p2的x、y坐标
if flag == 2:
ball = line.split() # 将该行(list)以空格“ ”进行切片
ball = [float(i) for i in ball[2:6]]
self.BALL.append(ball) # ball的x、y坐标以及半径、颜色
del self.WALL[-1] # 删除最后的空行
del self.BALL[-1] # 删除最后的空行
ZDEM_File.close() # 关闭对象,避免占用过多资源
# 转换为numpy数组
self.WALL, self.BALL, self.CurrentStep = np.array(
self.WALL), np.array(self.BALL), np.array(self.CurrentStep)
# ZDEM颜色的RGB列表
ZDEMColor_num = np.array([[0.85, 0.85, 0.85],
[0.00, 1.00, 0.00],
[1.00, 1.00, 0.00],
[1.00, 0.00, 0.00],
[0.90, 0.90, 0.90],
[0.15, 0.15, 0.15],
[0.50, 0.50, 0.50],
[0.00, 0.00, 1.00],
[0.00, 1.00, 1.00],
[1.00, 0.00, 1.00]])
ZDEMColor_code = ['#D9D9D9',
'#00FF00',
'#FFFF00',
'#FF0000',
'#F5F5F5',
'#262626',
'#808080',
'#0000FF',
'#00FFFF',
'#FF00FF']
# 读取数组对应需要的元素
self.BALL_x = self.BALL[:, 0]
self.BALL_y = self.BALL[:, 1]
self.BALL_r = self.BALL[:, 2]
self.BALL_c = self.BALL[:, 3]
self.WALL_P1_x = self.WALL[4:7, 0]
self.WALL_P1_y = self.WALL[4:7, 1]
self.WALL_P2_x = self.WALL[4:7, 2]
self.WALL_P2_y = self.WALL[4:7, 3]
# 进行偏移量修改
for i in range(len(self.WALL_P1_x)):
self.WALL_P1_x[i] = self.WALL_P1_x[i] + self.xmove
self.WALL_P1_y[i] = self.WALL_P1_y[i] + self.ymove
self.WALL_P2_x[i] = self.WALL_P2_x[i] + self.xmove
self.WALL_P2_y[i] = self.WALL_P2_y[i] + self.ymove
for i in range(len(self.BALL_x)):
self.BALL_x[i] = self.BALL_x[i] + self.xmove
self.BALL_y[i] = self.BALL_y[i] + self.ymove
# self.begin_plot_signal.emit(id) # 发送信号:开始绘图
self.ball_c = [] # 颜色 #xxxxxx格式
for i in range(len(self.BALL_x)):
color_num = self.BALL_c[i]
color_num = int(color_num)
ballColor = ZDEMColor_code[color_num]
self.ball_c.append(ballColor)
def plot_circle(self,id):
"""
使用QGraphicsEllipseItem绘制圆
Args:
id ([type]): [description]
"""
self.plot_axis()
circle_list = []
wall_list = []
# 颗粒
for i in range(len(self.BALL_x)):
x = self.BALL_x[i]
y = self.BALL_y[i]
r = self.BALL_r[i]
color = self.ball_c[i]
circle = pg.QtGui.QGraphicsEllipseItem(x - r, y - r, 2 * r, 2 * r)
circle.setPen(pg.mkPen(color=color,width=0))
circle.setBrush(pg.mkBrush(color))
circle_list.append(circle)
self.plot_widget.addItem(circle)
# self.updata_pg_circle_signal.emit(circle_list)
# 绘制墙
if (self.wallShow == True):
for n in range(len(self.WALL_P1_x)):
p1x = self.WALL_P1_x[n]
p1y = self.WALL_P1_y[n]
p2x = self.WALL_P2_x[n]
p2y = self.WALL_P2_y[n]
p12x = [p1x, p2x]
p12y = [p1y, p2y]
# plot_wall_item = pg.PlotItem(x=p12x,y=p12y,pen=pg.mkPen(width=3,color='k'))
self.plot_widget.plot(x=p12x,y=p12y,pen=pg.mkPen(width=3,color='k')) #线条粗细为2
# wall_list.append(plot_wall_item)
# self.updata_pg_wall_signal.emit(wall_list)
# 发送信号,绘图结束
self.updata_progressbar_signal.emit(id)
def plot_scatter(self,id):
self.plot_axis()
self.plot_item = pg.ScatterPlotItem(
size=5,
pen=pg.mkPen(None),
)
self.plot_item.addPoints(
x=self.BALL_x,
y=self.BALL_y,
brush=self.ball_c
)
self.plot_widget.addItem(self.plot_item)
# 绘制墙
if (self.wallShow == True):
for n in range(len(self.WALL_P1_x)):
p1x = self.WALL_P1_x[n]
p1y = self.WALL_P1_y[n]
p2x = self.WALL_P2_x[n]
p2y = self.WALL_P2_y[n]
p12x = [p1x, p2x]
p12y = [p1y, p2y]
self.plot_widget.plot(x=p12x,y=p12y,pen=pg.mkPen(width=2)) #线条粗细为2
# 发送信号,绘图结束
self.updata_progressbar_signal.emit(id)
def plot_axis(self):
if (self.xmin == None) & (self.xmax == None) & (self.ymin == None) & (self.ymax == None): # 若未传入min、max参数,进行计算
# 计算xmax、ymax
b_xmax = max(self.BALL_x)
b_ymax = max(self.BALL_y)
w_p1_xmax = max(self.WALL_P1_x)
w_p2_xmax = max(self.WALL_P2_x)
w_p1_ymax = max(self.WALL_P1_y)
w_p2_ymax = max(self.WALL_P2_y)
w_xmax = max(w_p1_xmax, w_p2_xmax)
w_ymax = max(w_p1_ymax, w_p2_ymax)
self.xmax = max(b_xmax, w_xmax)
self.ymax = max(b_ymax, w_ymax)
# 计算xmin、ymin
b_xmin = min(self.BALL_x)
b_ymin = min(self.BALL_y)
w_p1_xmin = min(self.WALL_P1_x)
w_p2_xmin = min(self.WALL_P2_x)
w_p1_ymin = min(self.WALL_P1_y)
w_p2_ymin = min(self.WALL_P2_y)
w_xmin = min(w_p1_xmin, w_p2_xmin)
w_ymin = min(w_p1_ymin, w_p2_ymin)
self.xmin = min(b_xmin, w_xmin)
self.ymin = min(b_ymin, w_ymin)
self.plot_widget.setXRange(self.xmin, self.xmax,padding=0)
self.plot_widget.setYRange(self.ymin, self.ymax,padding=0)
self.plot_widget.showAxis('right')
self.plot_widget.showAxis('top')
```
#### File: ZDEM_View/UI/leftBar.py
```python
import os
from matplotlib.ticker import FuncFormatter, MultipleLocator
from model.Thread import PlotThread
import numpy as np
from PyQt5 import QtGui, QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import QSize, Qt
class leftBar():
toolsBox_page_style = ("QWidget {"
"background-color: #ebf5ff;"
"}")
def __init__(self,leftFrame,myUi):
self.leftFrame = leftFrame
self.myUi = myUi
self.leftFrame.setMaximumWidth(350)
self.leftFrame_HLayout = QtWidgets.QHBoxLayout(self.leftFrame)
self.leftFrame_HLayout.setSpacing(0)
self.leftFrame_HLayout.setContentsMargins(0,0,0,0)
self.leftFrame_HLayout.setAlignment(QtCore.Qt.AlignCenter)
self.left_list_widget = QtWidgets.QListWidget(self.leftFrame)
self.left_stacked_widget = QtWidgets.QStackedWidget(self.leftFrame)
self.left_stacked_widget.setContentsMargins(0,0,0,0)
self.leftFrame_HLayout.addWidget(self.left_list_widget)
self.leftFrame_HLayout.addWidget(self.left_stacked_widget)
QtCore.QMetaObject.connectSlotsByName(self.leftFrame)
# init
self.init_list_widget()
self.init_dataView()
self.init_dataLog()
self.init_paramWidgets()
self.init_export()
self.retranslateUi()
#
self.left_list_widget.setCurrentRow(0)
def init_list_widget(self):
self.left_list_widget.setFrameShape(QtWidgets.QListWidget.NoFrame) # 去除边框
self.left_list_widget.setVerticalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff) # 隐藏滚动条
self.left_list_widget.setHorizontalScrollBarPolicy(Qt.ScrollBarPolicy.ScrollBarAlwaysOff)
# 字体
font_1 = QtGui.QFont()
font_1.setFamily("黑体")
font_1.setPointSize(12)
font_1.setBold(False)
self.left_list_widget.setFont(font_1)
#
dataView_icon = QtGui.QIcon()
dataView_icon.addPixmap(QtGui.QPixmap("./icons/dataView.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.dataView_item = QtWidgets.QListWidgetItem(dataView_icon,'文件管理',self.left_list_widget)
self.dataView_item.setSizeHint(QSize(30,60))
self.dataView_item.setTextAlignment(QtCore.Qt.AlignCenter)
#
dataLog_icon = QtGui.QIcon()
dataLog_icon.addPixmap(QtGui.QPixmap("./icons/dataLog.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.dataLog_item = QtWidgets.QListWidgetItem(dataLog_icon,'数据信息',self.left_list_widget)
self.dataLog_item.setSizeHint(QSize(30,60))
self.dataLog_item.setTextAlignment(QtCore.Qt.AlignCenter)
#
param_icon = QtGui.QIcon()
param_icon.addPixmap(QtGui.QPixmap("./icons/figureParam.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.param_item = QtWidgets.QListWidgetItem(param_icon,'图像参数',self.left_list_widget)
self.param_item.setSizeHint(QSize(30,60))
self.param_item.setTextAlignment(QtCore.Qt.AlignCenter)
#
export_icon = QtGui.QIcon()
export_icon.addPixmap(QtGui.QPixmap("./icons/save.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.export_item = QtWidgets.QListWidgetItem(export_icon,'导出',self.left_list_widget)
self.export_item.setSizeHint(QSize(30,60))
self.export_item.setTextAlignment(QtCore.Qt.AlignCenter)
self.export_item.whatsThis()
# 添加点击事件
self.left_list_widget.itemClicked.connect(self.item_clicked)
def init_dataView(self):
self.left_stacked_widget.addWidget(self.myUi.dataViewFrame)
self.left_stacked_widget.setMaximumWidth(230)
def init_dataLog(self):
self.export_frame = QtWidgets.QFrame()
self.export_frame.setMaximumWidth(230)
self.export_frame.setMinimumWidth(230)
self.export_frame.setObjectName("export_frame")
self.dataLog_verticalLayout = QtWidgets.QVBoxLayout(self.export_frame)
self.dataLog_verticalLayout.setContentsMargins(1, 1, 1, 1)
self.dataLog_verticalLayout.setObjectName("dataLog_verticalLayout")
self.label_dataText = QtWidgets.QLabel(self.export_frame)
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_dataText.setFont(font)
self.label_dataText.setAlignment(QtCore.Qt.AlignCenter)
self.label_dataText.setObjectName("label_dataText")
self.dataLog_verticalLayout.addWidget(self.label_dataText)
self.textBrowser_data = QtWidgets.QTextBrowser(self.export_frame)
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.textBrowser_data.setFont(font)
self.textBrowser_data.setFrameShape(QtWidgets.QFrame.NoFrame)
self.textBrowser_data.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.textBrowser_data.setObjectName("textBrowser_data")
self.dataLog_verticalLayout.addWidget(self.textBrowser_data)
spacerItem = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.dataLog_verticalLayout.addItem(spacerItem)
# 添加
self.left_stacked_widget.addWidget(self.export_frame)
def init_paramWidgets(self):
self.param_widget = QtWidgets.QWidget()
self.param_widget.setObjectName("param_widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.param_widget)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setContentsMargins(1,1,1,1)
self.label_paramTitle = QtWidgets.QLabel(self.param_widget)
self.label_paramTitle.setAlignment(QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_paramTitle.setFont(font)
self.label_paramTitle.setObjectName("label_paramTitle")
self.verticalLayout.addWidget(self.label_paramTitle)
self.toolBox_figureParam = QtWidgets.QToolBox(self.param_widget)
self.toolBox_figureParam.setFont(font)
# self.toolBox_figureParam.setMaximumWidth(480)
self.toolBox_figureParam.setToolTip("")
self.toolBox_figureParam.setFrameShape(QtWidgets.QFrame.NoFrame)
self.toolBox_figureParam.setObjectName("toolBox_figureParam")
self.tools_move = QtWidgets.QWidget()
self.tools_move.setStyleSheet(self.toolsBox_page_style)
# self.tools_move.setGeometry(QtCore.QRect(0, 0, 301, 372))
self.tools_move.setToolTip("")
self.tools_move.setWhatsThis("")
self.tools_move.setAccessibleDescription("")
self.tools_move.setAutoFillBackground(True)
self.tools_move.setObjectName("tools_move")
self.gridLayout_5 = QtWidgets.QGridLayout(self.tools_move)
self.gridLayout_5.setObjectName("gridLayout_5")
self.lineEdit_xmove = QtWidgets.QLineEdit(self.tools_move)
self.lineEdit_xmove.setObjectName("lineEdit_xmove")
# self.lineEdit_xmove.setText()
self.gridLayout_5.addWidget(self.lineEdit_xmove, 0, 1, 1, 1)
self.label_ymove = QtWidgets.QLabel(self.tools_move)
self.label_ymove.setObjectName("label_ymove")
self.gridLayout_5.addWidget(self.label_ymove, 1, 0, 1, 1)
self.label_xmove = QtWidgets.QLabel(self.tools_move)
self.label_xmove.setToolTip("")
self.label_xmove.setObjectName("label_xmove")
self.gridLayout_5.addWidget(self.label_xmove, 0, 0, 1, 1)
self.lineEdit_ymove = QtWidgets.QLineEdit(self.tools_move)
self.lineEdit_ymove.setObjectName("lineEdit_ymove")
self.gridLayout_5.addWidget(self.lineEdit_ymove, 1, 1, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem1, 2, 1, 1, 1)
self.toolBox_figureParam.addItem(self.tools_move, "")
self.page_axisRange = QtWidgets.QWidget()
self.page_axisRange.setObjectName("page_axisRange")
self.page_axisRange.setStyleSheet(self.toolsBox_page_style)
self.gridLayout_3 = QtWidgets.QGridLayout(self.page_axisRange)
self.gridLayout_3.setObjectName("gridLayout_3")
self.yminLabel = QtWidgets.QLabel(self.page_axisRange)
self.yminLabel.setObjectName("yminLabel")
self.gridLayout_3.addWidget(self.yminLabel, 2, 0, 1, 1)
self.xmax_lineEdit = QtWidgets.QLineEdit(self.page_axisRange)
self.xmax_lineEdit.setObjectName("xmax_lineEdit")
self.gridLayout_3.addWidget(self.xmax_lineEdit, 1, 1, 1, 1)
self.xmaxLabel = QtWidgets.QLabel(self.page_axisRange)
self.xmaxLabel.setObjectName("xmaxLabel")
self.gridLayout_3.addWidget(self.xmaxLabel, 1, 0, 1, 1)
self.xminLabel = QtWidgets.QLabel(self.page_axisRange)
self.xminLabel.setObjectName("xminLabel")
self.gridLayout_3.addWidget(self.xminLabel, 0, 0, 1, 1)
self.xmin_lineEdit = QtWidgets.QLineEdit(self.page_axisRange)
self.xmin_lineEdit.setObjectName("xmin_lineEdit")
self.gridLayout_3.addWidget(self.xmin_lineEdit, 0, 1, 1, 1)
self.ymaxLabel = QtWidgets.QLabel(self.page_axisRange)
self.ymaxLabel.setObjectName("ymaxLabel")
self.gridLayout_3.addWidget(self.ymaxLabel, 3, 0, 1, 1)
self.ymin_lineEdit = QtWidgets.QLineEdit(self.page_axisRange)
self.ymin_lineEdit.setObjectName("ymin_lineEdit")
self.gridLayout_3.addWidget(self.ymin_lineEdit, 2, 1, 1, 1)
self.ymax_lineEdit = QtWidgets.QLineEdit(self.page_axisRange)
self.ymax_lineEdit.setObjectName("ymax_lineEdit")
self.gridLayout_3.addWidget(self.ymax_lineEdit, 3, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem2, 4, 0, 1, 1)
self.toolBox_figureParam.addItem(self.page_axisRange, "")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.page.setStyleSheet(self.toolsBox_page_style)
self.gridLayout_10 = QtWidgets.QGridLayout(self.page)
self.gridLayout_10.setObjectName("gridLayout_10")
self.Button_plotPoint = QtWidgets.QRadioButton(self.page)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("./icons/plotPoint.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_plotPoint.setIcon(icon1)
self.Button_plotPoint.setObjectName("Button_plotPoint")
self.gridLayout_10.addWidget(self.Button_plotPoint, 1, 0, 1, 1)
self.Button_plotCircle = QtWidgets.QRadioButton(self.page)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("./icons/plot.ico"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.Button_plotCircle.setIcon(icon2)
self.Button_plotCircle.setObjectName("Button_plotCircle")
self.gridLayout_10.addWidget(self.Button_plotCircle, 0, 0, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_10.addItem(spacerItem3, 2, 0, 1, 1)
self.toolBox_figureParam.addItem(self.page, "")
self.tools_plotWall = QtWidgets.QWidget()
self.tools_plotWall.setGeometry(QtCore.QRect(0, 0, 301, 372))
self.tools_plotWall.setObjectName("tools_plotWall")
self.tools_plotWall.setStyleSheet(self.toolsBox_page_style)
self.gridLayout_9 = QtWidgets.QGridLayout(self.tools_plotWall)
self.gridLayout_9.setObjectName("gridLayout_9")
self.SpinBox_lineSize = QtWidgets.QDoubleSpinBox(self.tools_plotWall)
self.SpinBox_lineSize.setFrame(True)
self.SpinBox_lineSize.setButtonSymbols(
QtWidgets.QAbstractSpinBox.UpDownArrows)
self.SpinBox_lineSize.setSingleStep(0.1)
self.SpinBox_lineSize.setProperty("value", 0.8)
self.SpinBox_lineSize.setObjectName("SpinBox_lineSize")
self.gridLayout_9.addWidget(self.SpinBox_lineSize, 1, 1, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_9.addItem(spacerItem4, 5, 1, 1, 1)
self.label_lineSize = QtWidgets.QLabel(self.tools_plotWall)
self.label_lineSize.setObjectName("label_lineSize")
self.gridLayout_9.addWidget(self.label_lineSize, 1, 0, 1, 1)
self.checkBox_plotWall = QtWidgets.QCheckBox(self.tools_plotWall)
self.checkBox_plotWall.setMaximumSize(QtCore.QSize(100, 16777215))
self.checkBox_plotWall.setChecked(True)
self.checkBox_plotWall.setObjectName("checkBox_plotWall")
self.gridLayout_9.addWidget(self.checkBox_plotWall, 0, 0, 1, 1)
self.toolBox_figureParam.addItem(self.tools_plotWall, "")
self.page_color = QtWidgets.QWidget()
self.page_color.setStyleSheet(self.toolsBox_page_style)
self.page_color.setGeometry(QtCore.QRect(0, 0, 301, 372))
self.page_color.setObjectName("page_color")
self.gridLayout = QtWidgets.QGridLayout(self.page_color)
self.gridLayout.setObjectName("gridLayout")
self.label_color = QtWidgets.QLabel(self.page_color)
self.label_color.setObjectName("label_color")
self.gridLayout.addWidget(self.label_color, 0, 0, 1, 1)
self.comboBox_color = QtWidgets.QComboBox(self.page_color)
self.comboBox_color.setObjectName("comboBox_color")
self.comboBox_color.addItem("")
self.gridLayout.addWidget(self.comboBox_color, 0, 1, 1, 1)
self.Button_importColor = QtWidgets.QPushButton(self.page_color)
self.Button_importColor.setObjectName("Button_importColor")
self.gridLayout.addWidget(self.Button_importColor, 1, 1, 1, 1)
spacerItem5 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem5, 2, 1, 1, 1)
self.toolBox_figureParam.addItem(self.page_color, "")
self.page_figureTitle = QtWidgets.QWidget()
self.page_figureTitle.setStyleSheet(self.toolsBox_page_style)
self.page_figureTitle.setObjectName("page_figureTitle")
self.gridLayout_2 = QtWidgets.QGridLayout(self.page_figureTitle)
self.gridLayout_2.setObjectName("gridLayout_2")
self.lineEdit_title = QtWidgets.QLineEdit(self.page_figureTitle)
self.lineEdit_title.setObjectName("lineEdit_title")
self.gridLayout_2.addWidget(self.lineEdit_title, 0, 1, 1, 1)
self.fontComboBox_titleFont = QtWidgets.QFontComboBox(
self.page_figureTitle)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fontComboBox_titleFont.sizePolicy().hasHeightForWidth())
self.fontComboBox_titleFont.setSizePolicy(sizePolicy)
self.fontComboBox_titleFont.setMaximumSize(QtCore.QSize(200, 16777215))
self.fontComboBox_titleFont.setObjectName("fontComboBox_titleFont")
self.gridLayout_2.addWidget(self.fontComboBox_titleFont, 1, 1, 1, 1)
self.label_titleFont = QtWidgets.QLabel(self.page_figureTitle)
self.label_titleFont.setObjectName("label_titleFont")
self.gridLayout_2.addWidget(self.label_titleFont, 1, 0, 1, 1)
self.label_titleFontSize = QtWidgets.QLabel(self.page_figureTitle)
self.label_titleFontSize.setObjectName("label_titleFontSize")
self.gridLayout_2.addWidget(self.label_titleFontSize, 2, 0, 1, 1)
self.label_title = QtWidgets.QLabel(self.page_figureTitle)
self.label_title.setObjectName("label_title")
self.gridLayout_2.addWidget(self.label_title, 0, 0, 1, 1)
self.spinBox_title = QtWidgets.QSpinBox(self.page_figureTitle)
self.spinBox_title.setPrefix("")
self.spinBox_title.setSingleStep(1)
self.spinBox_title.setProperty("value", 12)
self.spinBox_title.setObjectName("spinBox_title")
self.gridLayout_2.addWidget(self.spinBox_title, 2, 1, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem6, 3, 1, 1, 1)
self.toolBox_figureParam.addItem(self.page_figureTitle, "")
self.page_axisTick = QtWidgets.QWidget()
self.page_axisTick.setStyleSheet(self.toolsBox_page_style)
self.page_axisTick.setObjectName("page_axisTick")
self.gridLayout_6 = QtWidgets.QGridLayout(self.page_axisTick)
self.gridLayout_6.setObjectName("gridLayout_6")
self.label_yName = QtWidgets.QLabel(self.page_axisTick)
self.label_yName.setObjectName("label_yName")
self.gridLayout_6.addWidget(self.label_yName, 4, 0, 1, 1)
self.spinBox_xTickSize = QtWidgets.QSpinBox(self.page_axisTick)
self.spinBox_xTickSize.setPrefix("")
self.spinBox_xTickSize.setSingleStep(1)
self.spinBox_xTickSize.setProperty("value", 9)
self.spinBox_xTickSize.setObjectName("spinBox_xTickSize")
self.gridLayout_6.addWidget(self.spinBox_xTickSize, 2, 1, 1, 1)
self.label_xName = QtWidgets.QLabel(self.page_axisTick)
self.label_xName.setObjectName("label_xName")
self.gridLayout_6.addWidget(self.label_xName, 0, 0, 1, 1)
self.label_xTickFont = QtWidgets.QLabel(self.page_axisTick)
self.label_xTickFont.setObjectName("label_xTickFont")
self.gridLayout_6.addWidget(self.label_xTickFont, 1, 0, 1, 1)
self.label_yTickSize = QtWidgets.QLabel(self.page_axisTick)
self.label_yTickSize.setObjectName("label_yTickSize")
self.gridLayout_6.addWidget(self.label_yTickSize, 6, 0, 1, 1)
self.label_yTickFont = QtWidgets.QLabel(self.page_axisTick)
self.label_yTickFont.setObjectName("label_yTickFont")
self.gridLayout_6.addWidget(self.label_yTickFont, 5, 0, 1, 1)
self.label_minorTickInterval = QtWidgets.QLabel(self.page_axisTick)
self.label_minorTickInterval.setObjectName("label_minorTickInterval")
self.gridLayout_6.addWidget(self.label_minorTickInterval, 10, 0, 1, 2)
self.line_2 = QtWidgets.QFrame(self.page_axisTick)
self.line_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setObjectName("line_2")
self.gridLayout_6.addWidget(self.line_2, 7, 0, 1, 2)
self.line = QtWidgets.QFrame(self.page_axisTick)
self.line.setFrameShadow(QtWidgets.QFrame.Plain)
self.line.setLineWidth(1)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setObjectName("line")
self.gridLayout_6.addWidget(self.line, 3, 0, 1, 2)
self.lineEdit_xName = QtWidgets.QLineEdit(self.page_axisTick)
self.lineEdit_xName.setObjectName("lineEdit_xName")
self.gridLayout_6.addWidget(self.lineEdit_xName, 0, 1, 1, 1)
self.label_mainTickInterval = QtWidgets.QLabel(self.page_axisTick)
self.label_mainTickInterval.setObjectName("label_mainTickInterval")
self.gridLayout_6.addWidget(self.label_mainTickInterval, 8, 0, 1, 2)
self.lineEdit_yName = QtWidgets.QLineEdit(self.page_axisTick)
self.lineEdit_yName.setObjectName("lineEdit_yName")
self.gridLayout_6.addWidget(self.lineEdit_yName, 4, 1, 1, 1)
self.label_xTickSzie = QtWidgets.QLabel(self.page_axisTick)
self.label_xTickSzie.setObjectName("label_xTickSzie")
self.gridLayout_6.addWidget(self.label_xTickSzie, 2, 0, 1, 1)
self.spinBox_yTickSize = QtWidgets.QSpinBox(self.page_axisTick)
self.spinBox_yTickSize.setPrefix("")
self.spinBox_yTickSize.setSingleStep(1)
self.spinBox_yTickSize.setProperty("value", 9)
self.spinBox_yTickSize.setObjectName("spinBox_yTickSize")
self.gridLayout_6.addWidget(self.spinBox_yTickSize, 6, 1, 1, 1)
self.fontComboBox_yTick = QtWidgets.QFontComboBox(self.page_axisTick)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fontComboBox_yTick.sizePolicy().hasHeightForWidth())
self.fontComboBox_yTick.setSizePolicy(sizePolicy)
self.fontComboBox_yTick.setMaximumSize(QtCore.QSize(220, 16777215))
self.fontComboBox_yTick.setObjectName("fontComboBox_yTick")
self.gridLayout_6.addWidget(self.fontComboBox_yTick, 5, 1, 1, 1)
self.fontComboBox_xTick = QtWidgets.QFontComboBox(self.page_axisTick)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.fontComboBox_xTick.sizePolicy().hasHeightForWidth())
self.fontComboBox_xTick.setSizePolicy(sizePolicy)
self.fontComboBox_xTick.setMaximumSize(QtCore.QSize(220, 16777215))
self.fontComboBox_xTick.setObjectName("fontComboBox_xTick")
self.gridLayout_6.addWidget(self.fontComboBox_xTick, 1, 1, 1, 2)
spacerItem7 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem7, 12, 0, 1, 1)
self.lineEdit_mainTickInterval = QtWidgets.QLineEdit(
self.page_axisTick)
self.lineEdit_mainTickInterval.setMaximumSize(
QtCore.QSize(200, 16777215))
self.lineEdit_mainTickInterval.setObjectName(
"lineEdit_mainTickInterval")
self.gridLayout_6.addWidget(self.lineEdit_mainTickInterval, 9, 0, 1, 2)
self.lineEdit_minorTickInterval = QtWidgets.QLineEdit(
self.page_axisTick)
self.lineEdit_minorTickInterval.setMaximumSize(
QtCore.QSize(200, 16777215))
self.lineEdit_minorTickInterval.setObjectName(
"lineEdit_minorTickInterval")
self.gridLayout_6.addWidget(
self.lineEdit_minorTickInterval, 11, 0, 1, 2)
self.toolBox_figureParam.addItem(self.page_axisTick, "")
self.page_showAxis = QtWidgets.QWidget()
self.page_showAxis.setStyleSheet(self.toolsBox_page_style)
self.page_showAxis.setObjectName("page_showAxis")
self.gridLayout_7 = QtWidgets.QGridLayout(self.page_showAxis)
self.gridLayout_7.setObjectName("gridLayout_7")
self.checkBox_top = QtWidgets.QCheckBox(self.page_showAxis)
self.checkBox_top.setChecked(True)
self.checkBox_top.setObjectName("checkBox_top")
self.gridLayout_7.addWidget(self.checkBox_top, 0, 0, 1, 1)
self.checkBox_bottom = QtWidgets.QCheckBox(self.page_showAxis)
self.checkBox_bottom.setChecked(True)
self.checkBox_bottom.setObjectName("checkBox_bottom")
self.gridLayout_7.addWidget(self.checkBox_bottom, 0, 1, 1, 1)
self.checkBox_left = QtWidgets.QCheckBox(self.page_showAxis)
self.checkBox_left.setChecked(True)
self.checkBox_left.setObjectName("checkBox_left")
self.gridLayout_7.addWidget(self.checkBox_left, 1, 0, 1, 1)
self.checkBox_right = QtWidgets.QCheckBox(self.page_showAxis)
self.checkBox_right.setChecked(True)
self.checkBox_right.setObjectName("checkBox_right")
self.gridLayout_7.addWidget(self.checkBox_right, 1, 1, 1, 1)
spacerItem8 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_7.addItem(spacerItem8, 2, 0, 1, 1)
self.toolBox_figureParam.addItem(self.page_showAxis, "")
self.page_units = QtWidgets.QWidget()
self.page_units.setStyleSheet(self.toolsBox_page_style)
self.page_units.setObjectName("page_units")
self.gridLayout_8 = QtWidgets.QGridLayout(self.page_units)
self.gridLayout_8.setObjectName("gridLayout_8")
self.comboBox_units = QtWidgets.QComboBox(self.page_units)
self.comboBox_units.setObjectName("comboBox_units")
self.comboBox_units.addItem("")
self.comboBox_units.addItem("")
self.gridLayout_8.addWidget(self.comboBox_units, 0, 1, 1, 1)
self.label_units = QtWidgets.QLabel(self.page_units)
self.label_units.setObjectName("label_units")
self.gridLayout_8.addWidget(self.label_units, 0, 0, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_8.addItem(spacerItem9, 1, 0, 1, 1)
self.toolBox_figureParam.addItem(self.page_units, "")
self.verticalLayout.addWidget(self.toolBox_figureParam)
# 重绘功能
self.reDraw_buttton = QtWidgets.QPushButton(
QtGui.QIcon("./icons/reDraw.png"), '重新绘图', self.param_widget)
self.reDraw_action = QtWidgets.QAction(self.param_widget)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./icons/reDraw.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.reDraw_action.setIcon(icon)
self.reDraw_action.setText('重新绘图')
self.reDraw_action.setObjectName("reDraw_action")
self.reDraw_buttton.addAction(self.reDraw_action)
self.reDraw_buttton.setFont(font)
# self.reDraw_buttton.setMaximumWidth(200)
# 重置参数功能
self.reSet_button = QtWidgets.QPushButton(
QtGui.QIcon("./icons/reSet.png"), '重置参数', self.param_widget)
self.reSet_action = QtWidgets.QAction(self.param_widget)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("./icons/reSet.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.reSet_action.setIcon(icon2)
self.reSet_action.setText('重新绘图')
self.reSet_action.setObjectName("reSet_action")
self.reSet_button.addAction(self.reSet_action)
self.reSet_button.setFont(font)
self.verticalLayout.addWidget(self.reSet_button)
self.verticalLayout.addWidget(self.reDraw_buttton)
spacerItem10 = QtWidgets.QSpacerItem(
20, 150, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.verticalLayout.addItem(spacerItem10)
self.toolBox_figureParam.layout().setSpacing(1)
self.left_stacked_widget.addWidget(self.param_widget)
def init_export(self):
self.export_widget = QtWidgets.QWidget()
self.export_widget.setMaximumWidth(230)
self.export_widget.setObjectName("export_widget")
icon4 = QtGui.QIcon()
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
icon4.addPixmap(QtGui.QPixmap("./icons/save.png"),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
# 为导出页添加控件
self.vLayout_export = QtWidgets.QVBoxLayout(self.export_widget)
self.vLayout_export.setObjectName("vLayout_export")
self.saveAll_button = QtWidgets.QPushButton(
QtGui.QIcon("./icons/save.ico"), '保存全部图片', self.export_widget)
self.saveAll_button.setFont(font)
self.vLayout_export.addWidget(self.saveAll_button)
self.left_stacked_widget.addWidget(self.export_widget)
def item_clicked(self):
item = self.left_list_widget.selectedItems()[0]
if item.text() == '文件管理':
self.switch_dataView()
elif (item.text() == '数据信息'):
self.switch_dataLog()
elif item.text() == '图像参数':
self.switch_paramWidget()
else:
self.switch_export()
def switch_dataView(self):
self.left_stacked_widget.setCurrentWidget(self.myUi.dataViewFrame)
self.leftFrame.setMaximumWidth(350)
self.leftFrame.setMinimumWidth(350)
def switch_dataLog(self):
self.left_stacked_widget.setCurrentWidget(self.export_frame)
self.leftFrame.setMaximumWidth(350)
self.leftFrame.setMinimumWidth(350)
def switch_paramWidget(self):
self.left_stacked_widget.setCurrentWidget(self.param_widget)
self.left_stacked_widget.setMinimumWidth(380)
self.left_stacked_widget.setMaximumWidth(380)
self.leftFrame.setMaximumWidth(500)
self.leftFrame.setMinimumWidth(500)
def switch_export(self):
self.left_stacked_widget.setCurrentWidget(self.export_widget)
self.leftFrame.setMaximumWidth(350)
self.leftFrame.setMinimumWidth(350)
def getParam(self):
# 默认参数表
paramList = []
xMove = None
yMove = None
xMin = 0.0
xMax = 0.0
yMin = 0.0
yMax = 0.0
ballStyle = ''
isPlotWall = True
wallLineSize = 0.8
colorStyle = 'ZDEMColor'
titleText = ''
titleTextFontSize = 12
xText = ''
xTextFontSize = 9
yText = ''
yTextFontSize = 9
mainTickInterval = 10000.0
minorTickInterval = 1000.0
isShowTop = True
isShowBottom = True
isShowLeft = True
isShowRight = True
unit = 1000
# 坐标偏移量
if self.lineEdit_xmove.text() != '':
xMove = float(self.lineEdit_xmove.text())
if self.lineEdit_ymove.text() != '':
yMove = float(self.lineEdit_ymove.text())
# 坐标轴范围
xMin = float(self.xmin_lineEdit.text())
xMax = float(self.xmax_lineEdit.text())
yMin = float(self.ymin_lineEdit.text())
yMax = float(self.ymax_lineEdit.text())
# 颗粒
if (self.Button_plotCircle.isChecked() == True) & (self.Button_plotPoint.isChecked() == False):
ballStyle = 'circle'
if (self.Button_plotCircle.isChecked() == False) & (self.Button_plotPoint.isChecked() == True):
ballStyle = 'point'
if (self.Button_plotCircle.isChecked() == True) & (self.Button_plotPoint.isChecked() == True):
msg_box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, '警告', '请选择颗粒样式')
msg_box.exec_()
if (self.Button_plotCircle.isChecked() == False) & (self.Button_plotPoint.isChecked() == False):
msg_box1 = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, '警告', '请选择颗粒样式')
msg_box1.exec_()
# 墙
isPlotWall = self.checkBox_plotWall.isChecked() # 返回值为bool
wallLineSize = self.SpinBox_lineSize.value()
# 颜色设置
if (self.comboBox_color.currentText() == 'ZDEM默认颜色'):
colorStyle = 'ZDEMColor'
# 图名 !!!!暂时未实现自定义字体样式功能,x、y轴too
titleText = self.lineEdit_title.text()
titleTextFontSize = self.spinBox_title.value()
# 轴标签
xText = self.lineEdit_xName.text()
xTextFontSize = self.spinBox_xTickSize.value()
yText = self.lineEdit_yName.text()
yTextFontSize = self.spinBox_yTickSize.value()
if self.lineEdit_mainTickInterval.text() != '':
mainTickInterval = float(self.lineEdit_mainTickInterval.text())
if self.lineEdit_minorTickInterval.text() != '':
minorTickInterval = float(self.lineEdit_minorTickInterval.text())
# 是否显示坐标轴
isShowTop = self.checkBox_top.isChecked() # bool
isShowBottom = self.checkBox_bottom.isChecked()
isShowLeft = self.checkBox_left.isChecked()
isShowRight = self.checkBox_right.isChecked()
# 单位
if self.comboBox_units.currentText() == 'km':
unit = 1000
if self.comboBox_units.currentText() == 'm':
unit = 1
# 将得到的参数存放到参数表
paramList.append(xMove)
paramList.append(yMove)
paramList.append(xMin)
paramList.append(xMax)
paramList.append(yMin)
paramList.append(yMax)
paramList.append(ballStyle)
paramList.append(isPlotWall)
paramList.append(wallLineSize)
paramList.append(colorStyle)
paramList.append(titleText)
paramList.append(titleTextFontSize)
paramList.append(xText)
paramList.append(xTextFontSize)
paramList.append(yText)
paramList.append(yTextFontSize)
paramList.append(mainTickInterval)
paramList.append(minorTickInterval)
paramList.append(isShowTop)
paramList.append(isShowBottom)
paramList.append(isShowLeft)
paramList.append(isShowRight)
paramList.append(unit)
return paramList
def reDraw(self):
self.paramList = self.getParam()
if self.paramList[6] == '':
return False
self.mplWidgetList = self.myUi.dataView.mplWidget_list
self.list_select_files = self.myUi.dataView.list_select_files
self.absulotePathList = self.myUi.dataView.absulotePathList
if (self.paramList[0] is None) and (self.paramList[1] is None) and (self.paramList[7] == True) and (self.paramList[8] == 0.8):
self.reDraw_axis()
else:
for i in range(len(self.list_select_files)): # 循环创建多个线程对象,添加到线程池
PlotALLThread = PlotThread(
self.mplWidgetList[i], self.absulotePathList[i], self.myUi, i, paramList=self.paramList)
PlotALLThread.plotObj_test.updata_canvas_signal.connect(
self.myUi.dataView.updataCanvas)
PlotALLThread.plotObj_test.begin_plot_signal.connect(
self.myUi.dataView.beginPlot_labelupdata)
self.myUi.dataView.poolManager.addThread(
PlotALLThread) # 添加到线程池
PlotALLThread.autoDelete() # 线程执行完毕自动删除
self.myUi.dataView.poolManager.start() # 所有的绘图子线程已经添加到线程池,start开启执行
self.myUi.ProgressBar.statusLabel.setText("绘图中,请稍后...")
def reDraw_axis(self):
for i in range(len(self.list_select_files)):
# 设置 x轴、y轴范围
self.mplWidgetList[i].qCanvas.axes.set_xlim(
self.paramList[2], self.paramList[3])
self.mplWidgetList[i].qCanvas.axes.set_ylim(
self.paramList[4], self.paramList[5])
# 单位
def unitsformat(x, pos):
return '{:n}'.format(x / self.paramList[22])
xmajorformatter = FuncFormatter(unitsformat)
self.mplWidgetList[i].qCanvas.axes.xaxis.set_major_formatter(
xmajorformatter)
ymajorformatter = FuncFormatter(unitsformat)
self.mplWidgetList[i].qCanvas.axes.yaxis.set_major_formatter(
ymajorformatter)
# 修改主刻度
xmajorLocator = MultipleLocator(
self.paramList[16]) # 将x主刻度标签设置为20的倍数
# xmajorFormatter = FormatStrFormatter('%5.1f') # 设置x轴标签文本的格式
ymajorLocator = MultipleLocator(
self.paramList[16]) # 将y轴主刻度标签设置为0.5的倍数
# ymajorFormatter = FormatStrFormatter('%1.1f') # 设置y轴标签文本的格式
# 设置主刻度标签的位置,标签文本的格式
self.mplWidgetList[i].qCanvas.axes.xaxis.set_major_locator(
xmajorLocator)
# self.canvasObj.qCanvas.axes.xaxis.set_major_formatter(xmajorFormatter)
self.mplWidgetList[i].qCanvas.axes.yaxis.set_major_locator(
ymajorLocator)
# self.canvasObj.qCanvas.axes.yaxis.set_major_formatter(ymajorFormatter)
# 修改次刻度
xminorLocator = MultipleLocator(self.paramList[17])
yminorLocator = MultipleLocator(self.paramList[17])
self.mplWidgetList[i].qCanvas.axes.xaxis.set_minor_locator(
xminorLocator)
self.mplWidgetList[i].qCanvas.axes.yaxis.set_minor_locator(
yminorLocator)
# 设置标签label的字体大小
self.mplWidgetList[i].qCanvas.axes.tick_params(
axis='x', labelsize=self.paramList[13])
self.mplWidgetList[i].qCanvas.axes.tick_params(
axis='y', labelsize=self.paramList[15])
# 坐标轴边框显示/隐藏
self.mplWidgetList[i].qCanvas.axes.spines['top'].set_visible(
self.paramList[18])
self.mplWidgetList[i].qCanvas.axes.spines['right'].set_visible(
self.paramList[21])
self.mplWidgetList[i].qCanvas.axes.spines['bottom'].set_visible(
self.paramList[19])
self.mplWidgetList[i].qCanvas.axes.spines['left'].set_visible(
self.paramList[20])
# 计算图片尺寸
wi = self.paramList[3] - self.paramList[2]
hi = self.paramList[5] - self.paramList[4]
wcm = 14
winch = wcm/2.54
hinch = (winch+0.1)/wi*hi
self.mplWidgetList[i].qCanvas.figs.set_size_inches(
w=winch, h=hinch)
self.mplWidgetList[i].qCanvas.figs.canvas.draw()
# 画布刷新self.figs.canvas
self.mplWidgetList[i].qCanvas.figs.canvas.flush_events()
def saveAll(self):
"""
:return:
"""
QCanvas_list = []
QCanvas_list = self.myUi.dataView.QCanvas_list
num = len(self.myUi.dataView.list_select_files)
for i in range(num):
QCanvas_list[i].saveFig()
def retranslateUi(self):
_translate = QtCore.QCoreApplication.translate
self.label_dataText.setText(_translate("self.siderBarWidget", "数据信息"))
self.label_paramTitle.setText(
_translate("self.siderBarWidget", "图像参数"))
self.tools_move.setStatusTip(_translate("self.siderBarWidget", ")"))
self.tools_move.setAccessibleName(
_translate("self.siderBarWidget", ")"))
self.lineEdit_xmove.setToolTip(_translate(
"self.siderBarWidget", "设置坐标沿X轴的偏移量,单位(m)"))
self.label_ymove.setText(_translate("self.siderBarWidget", "Y轴"))
self.label_xmove.setText(_translate("self.siderBarWidget", "X轴"))
self.lineEdit_ymove.setToolTip(_translate(
"self.siderBarWidget", "设置坐标沿Y轴的偏移量,单位(m)"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.tools_move), _translate("self.siderBarWidget", "坐标偏移量"))
self.yminLabel.setText(_translate("self.siderBarWidget", "Y轴最小值"))
self.xmax_lineEdit.setToolTip(
_translate("self.siderBarWidget", "单位(m)"))
self.xmaxLabel.setText(_translate("self.siderBarWidget", "X轴最大值"))
self.xminLabel.setText(_translate("self.siderBarWidget", "X轴最小值"))
self.xmin_lineEdit.setToolTip(
_translate("self.siderBarWidget", "单位(m)"))
self.ymaxLabel.setText(_translate("self.siderBarWidget", "Y轴最大值"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_axisRange), _translate("self.siderBarWidget", "坐标轴范围"))
self.Button_plotPoint.setText(_translate("self.siderBarWidget", "散点"))
self.Button_plotCircle.setText(
_translate("self.siderBarWidget", "二维圆"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page), _translate("self.siderBarWidget", "颗粒"))
self.label_lineSize.setText(_translate("self.siderBarWidget", "线条粗细"))
self.checkBox_plotWall.setText(
_translate("self.siderBarWidget", "绘制墙体"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.tools_plotWall), _translate("self.siderBarWidget", "墙"))
self.label_color.setText(_translate("self.siderBarWidget", "颜色设置"))
self.comboBox_color.setItemText(
0, _translate("self.siderBarWidget", "ZDEM默认颜色"))
self.Button_importColor.setText(
_translate("self.siderBarWidget", "导入其他"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_color), _translate("self.siderBarWidget", "颜色设置"))
self.label_titleFont.setText(_translate("self.siderBarWidget", "字体"))
self.label_titleFontSize.setText(
_translate("self.siderBarWidget", "大小"))
self.label_title.setText(_translate("self.siderBarWidget", "图名"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_figureTitle), _translate("self.siderBarWidget", "图名"))
self.label_yName.setText(_translate("self.siderBarWidget", "Y轴名"))
self.label_xName.setText(_translate("self.siderBarWidget", "X轴名"))
self.label_xTickFont.setText(_translate("self.siderBarWidget", "字体"))
self.label_yTickSize.setText(_translate("self.siderBarWidget", "大小"))
self.label_yTickFont.setText(_translate("self.siderBarWidget", "字体"))
self.label_minorTickInterval.setText(
_translate("self.siderBarWidget", "次坐标刻度间隔"))
self.label_mainTickInterval.setText(
_translate("self.siderBarWidget", "主坐标刻度间隔"))
self.label_xTickSzie.setText(_translate("self.siderBarWidget", "大小"))
self.lineEdit_mainTickInterval.setText(
_translate("self.siderBarWidget", "10000.0"))
self.lineEdit_minorTickInterval.setText(
_translate("self.siderBarWidget", "1000.0"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_axisTick), _translate("self.siderBarWidget", "轴标签"))
self.checkBox_top.setText(_translate("self.siderBarWidget", "Top"))
self.checkBox_bottom.setText(
_translate("self.siderBarWidget", "Bottom"))
self.checkBox_left.setText(_translate("self.siderBarWidget", "Left"))
self.checkBox_right.setText(_translate("self.siderBarWidget", "Right"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_showAxis), _translate("self.siderBarWidget", "显示/隐藏坐标轴"))
self.comboBox_units.setItemText(
0, _translate("self.siderBarWidget", "km"))
self.comboBox_units.setItemText(
1, _translate("self.siderBarWidget", "m"))
self.label_units.setText(_translate("self.siderBarWidget", "单位"))
self.toolBox_figureParam.setItemText(self.toolBox_figureParam.indexOf(
self.page_units), _translate("self.siderBarWidget", "单位"))
# caohshu
self.reDraw_buttton.clicked.connect(self.reDraw)
self.saveAll_button.clicked.connect(self.saveAll)
```
#### File: ZDEM_View/UI/wMain.py
```python
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QRectF, QSize, Qt, pyqtSignal
from PyQt5.QtGui import QBrush, QColor, QIcon, QPainter, QPainterPath, QPalette, QPixmap
from PyQt5.QtWidgets import QFileDialog, QFrame, QGraphicsDropShadowEffect, QGroupBox, QMessageBox, QWidget
from UI.parametersFrame import Ui_parametersFrame
from model import canvas, plot, progressbar
from model.dataView import FileSystemTableView
from UI.mainWidgets import Ui_mainWidgets
from UI.siderBar import siderBar
from UI.leftBar import leftBar
import shutil
class Ui_MainWindow(object):
"""
主窗口UI类
"""
log_signal = pyqtSignal(str)
def setupUi(self, MainWindow):
"""
:param MainWindow:
:return:
"""
self.MainWindow = MainWindow
self.MainWindow.setObjectName("MainWindow")
self.MainWindow.resize(1600, 900)
self.MainWindow.setContentsMargins(0, 0, 0, 0)
# 设置无边框圆角带阴影窗口
# self.MainWindow.setWindowFlag(QtCore.Qt.FramelessWindowHint) # 无边框
# ===============透明阴影====================
# self.MainWindow.setAutoFillBackground(True) #一定要加上
#self.MainWindow.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 窗口透明
#shadow=QGraphicsDropShadowEffect() # 创建阴影
#shadow.setBlurRadius(20) # 设置阴影大小为9px
#shadow.setColor(QColor("#444444")) # 设置颜色透明度为100的(0,0,0)黑色
#shadow.setOffset(0,0) # 阴影偏移距离为0px
#self.MainWindow.setGraphicsEffect(shadow) # 添加阴影
# 最大化显示窗口
# self.MainWindow.showMaximized()
self.MainWindow.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.MainLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.MainLayout.setObjectName("MainLayout")
self.MainLayout.setContentsMargins(1, 1, 1, 1)
self.MainLayout.setSpacing(0) # 控件之间的距离为0
# 创建绘图区Frame
self.mainWorkFrame = QtWidgets.QFrame(self.centralwidget)
self.mainWorkFrame.setObjectName("mainWorkFrame")
# 实例化绘图区类对象
self.mainWidgetsObj = Ui_mainWidgets(self.mainWorkFrame, self)
# 文件树
self.dataViewFrame = QtWidgets.QFrame()
self.dataViewFrame.setObjectName("dataViewFrame")
self.dataViewFrame.setStyleSheet(("QFrame{\n"
" border-radius: 9px;\n"
"}"))
# 创建dataview对象
self.dataView = FileSystemTableView(self.dataViewFrame, self)
# 设置水平和垂直布局策略,这里水平固定
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.dataView.sizePolicy().hasHeightForWidth())
self.dataView.setSizePolicy(sizePolicy)
self.dataView.setObjectName("dataView")
self.dataviewLayout = QtWidgets.QVBoxLayout(self.dataViewFrame)
self.dataviewLayout.setContentsMargins(1, 0, 0, 0)
self.dataviewLayout.setSpacing(0)
self.dataviewLabel = QtWidgets.QLabel()
self.dataviewLabel.setText('文件管理器')
self.dataviewLabel.setStyleSheet(("QLabel{\n"
" background-color: #c7e0ff;\n"
"border-radius: 9px;"
"}"))
# 字体
font_1 = QtGui.QFont()
font_1.setFamily("黑体")
font_1.setPointSize(12)
font_1.setBold(False)
self.dataviewLabel.setFont(font_1)
self.dataviewLabel.setMaximumWidth(230)
self.dataviewLabel.setMinimumHeight(35)
self.dataviewLabel.setMaximumHeight(35)
self.dataviewLabel.setAlignment(QtCore.Qt.AlignCenter)
self.dataviewLayout.addWidget(self.dataviewLabel)
self.dataviewLayout.addWidget(self.dataView)
#
MainWindow.setCentralWidget(self.centralwidget)
# 菜单栏
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 886, 24))
self.menuBar.setObjectName("menuBar")
self.file_menu = QtWidgets.QMenu(self.menuBar)
self.file_menu.setObjectName("file_menu")
self.edit_menu = QtWidgets.QMenu(self.menuBar)
self.edit_menu.setObjectName("edit_menu")
self.view_menu = QtWidgets.QMenu(self.menuBar)
self.view_menu.setObjectName("view_menu")
self.help_menu = QtWidgets.QMenu(self.menuBar)
self.help_menu.setObjectName("help_menu")
MainWindow.setMenuBar(self.menuBar)
# 工具栏
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.toolBar.setMouseTracking(False)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
# 状态栏
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
#self.statusBar.setMaximumHeight(25)
self.statusBar.setStyleSheet(("QStatusBar{\n"
"background:#0F6BAE;\n"
"}\n"))
# 实例化进度条类对象
self.ProgressBar = progressbar.ProgressBar(self)
# self.plotObj.updata_progressbar_signal.connect(self.ProgressBar.updata_PBar)
# 为工具栏添加功能
self.openFile_action = QtWidgets.QAction(MainWindow)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./icons/open.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.openFile_action.setIcon(icon)
self.openFile_action.setObjectName("openFile_action")
self.saveFile_action = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("./icons/save.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.saveFile_action.setIcon(icon1)
self.saveFile_action.setObjectName("saveFile_action")
self.cleanALl_action = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("./icons/clear.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.cleanALl_action.setIcon(icon3)
self.cleanALl_action.setObjectName("cleanALl_action")
self.plotCircle_action = QtWidgets.QAction(MainWindow) # 画二维圆图
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("./icons/plot.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.plotCircle_action.setIcon(icon4)
self.plotCircle_action.setObjectName("plotCircle_action")
self.plotPoint_action = QtWidgets.QAction(MainWindow) # 画点图
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("./icons/plotPoint.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.plotPoint_action.setIcon(icon5)
self.plotPoint_action.setObjectName("plotPoint_action")
self.file_menu.addAction(self.openFile_action)
self.file_menu.addAction(self.saveFile_action)
self.file_menu.addSeparator()
self.edit_menu.addAction(self.plotCircle_action)
self.edit_menu.addAction(self.plotPoint_action)
self.edit_menu.addAction(self.cleanALl_action)
self.menuBar.addAction(self.file_menu.menuAction())
self.menuBar.addAction(self.edit_menu.menuAction())
self.menuBar.addAction(self.view_menu.menuAction())
self.menuBar.addAction(self.help_menu.menuAction())
self.toolBar.addAction(self.openFile_action)
# self.toolBar.addAction(self.saveFile_action)
self.toolBar.addSeparator()
self.toolBar.addAction(self.cleanALl_action)
self.toolBar.addSeparator()
self.toolBar.addAction(self.plotPoint_action)
self.toolBar.addAction(self.plotCircle_action)
self.toolBar.addSeparator()
self.previousPage_action = self.toolBar.addAction(QIcon("./icons/previous.png"), u'上一张')
self.nextPage_action = self.toolBar.addAction(QIcon("./icons/next.png"), u'下一张')
self.toolBar.addSeparator()
self.composeGIF_action = self.toolBar.addAction(QIcon("./icons/GIF.png"), u'生成GIF')
self.playGIF_action = self.toolBar.addAction(QIcon("./icons/interface.png"), u'播放GIF')
self.pauseGIF_action = self.toolBar.addAction(QIcon("./icons/pause.png"), u'暂停GIF')
self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
# 创建侧边栏控件对象
# 左侧边栏listwidget
self.leftFrame = QtWidgets.QFrame(self.centralwidget)
self.leftFrame.setObjectName('leftFrame')
self.leftFrame.setContentsMargins(0,0,0,0)
self.leftBar = leftBar(self.leftFrame,self)
#self.siderBar = siderBar(self.leftFrame, self)
# 为centralwidget添加控件
self.MainLayout.addWidget(self.leftFrame)
self.MainLayout.addWidget(self.mainWorkFrame)
#
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
"""
:param MainWindow:
:return:
"""
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.file_menu.setTitle(_translate("MainWindow", "文件"))
self.edit_menu.setTitle(_translate("MainWindow", "编辑"))
self.view_menu.setTitle(_translate("MainWindow", "可视化操作"))
self.help_menu.setTitle(_translate("MainWindow", "帮助"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.openFile_action.setText(_translate("MainWindow", "打开"))
self.saveFile_action.setText(_translate("MainWindow", "另存为"))
self.saveFile_action.setShortcut(_translate("MainWindow", "Ctrl+S"))
self.cleanALl_action.setText(_translate("MainWindow", "清除全部"))
self.plotCircle_action.setText(_translate("MainWindow", "绘制二维圆"))
self.plotPoint_action.setText(_translate("MainWindow", "绘制散点图"))
# 绑定事件函数
self.openFile_action.triggered.connect(self.open_FileDir)
self.plotCircle_action.triggered.connect(self.dataView.plotAllSlot_circle)
self.plotPoint_action.triggered.connect(self.dataView.plotAllSlot_point)
self.nextPage_action.triggered.connect(self.mainWidgetsObj.nextPage)
self.previousPage_action.triggered.connect(self.mainWidgetsObj.previousPage)
self.composeGIF_action.triggered.connect(self.mainWidgetsObj.compose_gif)
self.playGIF_action.triggered.connect(self.mainWidgetsObj.playGif)
self.pauseGIF_action.triggered.connect(self.mainWidgetsObj.pauseGif)
self.cleanALl_action.triggered.connect(self.clear_all)
def open_FileDir(self):
"""
打开文件对话框,读取文件夹下.dat格式的文件列表,储存在列表中。并调用dataview对象的addData方法,初始化文件管理器和画布
:return:
"""
self.dataView.myModel.removeRows(0,self.dataView.myModel.rowCount()) # 删除所有行
self.folderDir = None
self.folderDir = QFileDialog.getExistingDirectory(self.MainWindow, '选取文件夹', "./") # 打开文件夹选择对话框
if self.folderDir == "":
pass # 防止未选择文件或者关闭对话框程序闪退
else:
AllFileList = os.listdir(self.folderDir) # os.listdir读取文件夹目录的所有文件,并返回文件名列表
self.prefixList = []
self.absolute_FileList = []
self.FileNameList = []
for FileName in AllFileList:
absolutePath = os.path.join(self.folderDir, FileName) # 拼接成绝对路径
if os.path.isfile(absolutePath): # 判断对象是否为文件,传入的参数必须是绝对路径,而os.listdir()获得的是文件名,需要join拼接成绝对路径,
self.prefixList.append(os.path.splitext(FileName)[0]) # 前缀名列表
if os.path.splitext(FileName)[1] == '.dat': # os.path.splitext()分离文件名和后缀,返回元组
self.FileNameList.append(FileName) # 文件名列表
self.absolute_FileList.append(absolutePath) # 绝对路径文件列表
# 文件名列表删除init_xyr.dat
if self.FileNameList[-1] == 'init_xyr.dat':
del self.FileNameList[-1]
self.dataView.addData(self.dataView.myModel, self.FileNameList, self.folderDir) # 初始化文件管理器和matplotlib画布
def clear_all(self):
"""清空所有
TODO:实现清空“图像参数”模块的功能
"""
self.dataView.myModel.removeRows(0,self.dataView.myModel.rowCount()) # 删除所有行
self.mainWidgetsObj.tabWidget.clear() # 输出所有的可视化图像
self.mainWidgetsObj.select_comBox.clear() # 输出文件选择框
self.ProgressBar.statusLabel.clear() # 状态栏信息
self.ProgressBar.plotAllLabel.clear()
self.ProgressBar.beginPlotLabel.clear()
self.ProgressBar.pBar.setValue(0)
self.ProgressBar.pBar.close()
# leftbar
self.leftBar.textBrowser_data.clear() # 清空数据记录信息
class RoundShadow(QWidget):
"""
圆角边框类
"""
def __init__(self, parent=None):
super(RoundShadow, self).__init__(parent)
self.border_width = 8
# 设置 窗口无边框和背景透明 *必须
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.Window)
def paintEvent(self, paintEvent):
# 阴影
path = QPainterPath()
path.setFillRule(Qt.WindingFill)
pat = QPainter(self)
pat.setRenderHint(pat.Antialiasing)
pat.fillPath(path, QBrush(Qt.white))
color = QColor(192, 192, 192, 50)
for i in range(10):
i_path = QPainterPath()
i_path.setFillRule(Qt.WindingFill)
ref = QRectF(10-i, 10-i, self.width()-(10-i)*2, self.height()-(10-i)*2)
# i_path.addRect(ref)
i_path.addRoundedRect(ref, self.border_width, self.border_width)
color.setAlpha(150 - i**0.5*50)
pat.setPen(color)
pat.drawPath(i_path)
# 圆角
pat2 = QPainter(self)
pat2.setRenderHint(pat2.Antialiasing) # 抗锯齿
pat2.setBrush(Qt.white)
pat2.setPen(Qt.transparent)
rect = self.rect()
rect.setLeft(9)
rect.setTop(9)
rect.setWidth(rect.width()-9)
rect.setHeight(rect.height()-9)
pat2.drawRoundedRect(rect, 4, 4)
class myMainWindow(QtWidgets.QMainWindow):
"""对QMainWindow类重写,实现一些功能"""
def closeEvent(self, event):
"""
重写closeEvent方法,实现窗体关闭时执行一些代码
:param event: close()触发的事件
:return: None
"""
reply = QtWidgets.QMessageBox.question(self,
'本程序',
"是否要退出程序?",
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
QtWidgets.QMessageBox.No)
if reply == QtWidgets.QMessageBox.Yes:
event.accept()
# FIXME: !报错,无法删除gif文件
shutil.rmtree('./temp save files')
os.mkdir('./temp save files')
else:
event.ignore()
def paintEvent(self, event):
# 阴影
self.border_width = 8
path = QPainterPath()
path.setFillRule(Qt.WindingFill)
pat = QPainter(self)
pat.setRenderHint(pat.Antialiasing)
pat.fillPath(path, QBrush(Qt.white))
color = QColor(192, 192, 192, 50)
for i in range(10):
i_path = QPainterPath()
i_path.setFillRule(Qt.WindingFill)
ref = QRectF(10-i, 10-i, self.width()-(10-i)*2, self.height()-(10-i)*2)
# i_path.addRect(ref)
i_path.addRoundedRect(ref, self.border_width, self.border_width)
color.setAlpha(150 - i**0.5*50)
pat.setPen(color)
pat.drawPath(i_path)
# 圆角
pat2 = QPainter(self)
pat2.setRenderHint(pat2.Antialiasing) # 抗锯齿
pat2.setBrush(Qt.white)
pat2.setPen(Qt.transparent)
rect = self.rect()
rect.setLeft(9)
rect.setTop(9)
rect.setWidth(rect.width()-9)
rect.setHeight(rect.height()-9)
pat2.drawRoundedRect(rect, 4, 4)
``` |
{
"source": "jlezama/disentangling-jacobian",
"score": 3
} |
#### File: conditional_image_manipulation/web_demo/aux.py
```python
attr_dict = {
0: '5 o Clock shadow',
1: 'Arched Eyebrows',
2: 'Attractive',
3: 'Bags Under Eyes',
4: 'Bald',
5: 'Bangs',
6: 'Big Lips',
7: 'Big Nose',
8: 'Black Hair',
9: 'Blond Hair',
10: 'Blurry',
11: 'Brown hair',
12: 'Bushy Eyebrows',
13: 'Chubby',
14: 'Double Chin',
15: 'Eyeglasses',
16: 'Goatee',
17: 'Gray Hair',
18: 'Heavy Makeup',
19: 'High Cheekbones',
20: 'Male',
21: 'Mouth Slightly Open',
22: 'Mustache',
23: 'Narrow Eyes',
24: 'No Beard',
25: 'Oval Face',
26: 'Pale Skin',
27: 'Pointy nose',
28: 'Receding Hairline',
29: 'Rosy Cheeks',
30: 'Sideburns',
31: 'Smiling',
32: 'Straight Hair',
33: 'Wavy Hair',
34: 'Wearing Earrings',
35: 'Wearing Hat',
36: 'Wearing Lipstick',
37: 'Wearing Necklace',
38: 'Wearing Necktie',
39: 'Young',
}
################################################################################
def create_html_result(outfname, y_pred, params):
# create html table
rows1 = ''
rows2 = ''
for i in range(40):
row = '<tr><td style="font-size:14"> %s </td><td > <div class="slidecontainer"> <input type="range" min="-40" max="40" step="0.1" class="slider" id="myRange_%i" name="attr_%i" value="%2.2f"> </div> </td> <td id="value_%i" style="width:40"> %2.2f</td></tr>' % (attr_dict[i], i,i, y_pred[i], i, y_pred[i])
if i<20:
rows1 += row
else:
rows2 += row
model_description = params.model_path
table_txt = """
<div style="margin-left:2cm;margin-top:3cm;font-family:'Trebuchet MS'">
<p style="text-align:center;font-size:28px">%s</p>
<table>
<tr>
<td>
<table border=0 >%s</table>
</td>
<td>
<table border=0 >%s</table>
</td>
<td style="text-align:center;font-size:28px">
Original / Reconstruction / Manipulation <br><br>
<img src="%s" id="result_img">
</td>
</tr>
</table>
</div>
""" % (model_description, rows1, rows2, outfname)
javascript = '<script>'
for i in range(40):
javascript += """
var slider{i} = document.getElementById("myRange_{i}");
var output{i} = document.getElementById("value_{i}");
output{i}.innerHTML = slider{i}.value;
slider{i}.oninput = function() {{ output{i}.innerHTML = this.value;}}
slider{i}.onmouseup = function(){{document.getElementById("sliders_form").submit();}};
""".format(i=i)
javascript += '</script>'
html = """
<html><body> <form action='/get_image' method=GET id="img_id_form"> Image ID: <input type="text" name="fname" value="%i">
<input type="submit" value="Submit">
</form>
<br> <hr> <br>
<form action='.' method=POST id="sliders_form">
%s
</form>
%s
</body></html>
""" % (params.offset, table_txt, javascript)
return html
```
#### File: conditional_image_manipulation/web_demo/web_server.py
```python
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
# for interpolation
import os
import argparse
import numpy as np
import torch
from torch.autograd import Variable
from torchvision.utils import make_grid
import matplotlib.image
import sys
sys.path.append('../')
from src.logger import create_logger
from src.loader import load_images, DataSampler
from src.utils import bool_flag
from os import curdir, sep
from aux import *
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
if self.path == "/":
self._set_headers()
params.offset = 1106
interpolations, y_pred = run_interpolations(params, test_data)
outfname = compute_grid(interpolations, params)
html = create_html_result(outfname, y_pred, params)
self.wfile.write(html)
elif self.path.startswith('/get_image'):
self._set_headers()
print self.path
params.offset = int(self.path.split('fname=')[1])
interpolations, y_pred = run_interpolations(params, test_data)
outfname = compute_grid(interpolations, params)
html = create_html_result(outfname, y_pred, params)
self.wfile.write(html)
sendReply = False
if self.path.endswith(".png"):
mimetype='image/png'
sendReply = True
if sendReply == True:
#Open the static file requested and send it
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
self._set_headers()
# parse post data
alphas = np.zeros(40)
for i in range(40):
tmp = post_data.split('attr_%i=' % i)[1].split('&')[0]
alphas[i] = float(tmp)
interpolations, y_pred = run_interpolations(params, test_data, alphas=alphas)
outfname = compute_grid(interpolations, params)
html = create_html_result(outfname, alphas, params)
self.wfile.write(html)
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
####################################################################
# INTERPOLATION
# parse parameters
parser = argparse.ArgumentParser(description='Attributes swapping')
parser.add_argument("--model_path", type=str, default="",
help="Trained model path")
parser.add_argument("--outdir", type=str, default="",
help="out dir suffix")
parser.add_argument("--dataset", type=str, default="test",
help="dataset type: train, val, test")
parser.add_argument("--port", type=str, default="9999",
help="http server port")
parser.add_argument("--mode", type=str, default="grid",
help="alpha mode, mult or grid")
parser.add_argument("--n_images", type=int, default=1,
help="Number of images to modify")
parser.add_argument("--offset", type=int, default=6,
help="First image index")
parser.add_argument("--n_interpolations", type=int, default=10,
help="Number of interpolations per image")
parser.add_argument("--alpha_mult", type=float, default=100,
help="How much multiply alpha by")
parser.add_argument("--alpha_min", type=float, default=1,
help="Min interpolation value")
parser.add_argument("--alpha_max", type=float, default=1,
help="Max interpolation value")
parser.add_argument("--plot_size", type=int, default=5,
help="Size of images in the grid")
parser.add_argument("--selected_attr", type=str, default="0",
help="selected attribute")
parser.add_argument("--row_wise", type=bool_flag, default=True,
help="Represent image interpolations horizontally")
parser.add_argument("--output_path", type=str, default="output.png",
help="Output path")
params = parser.parse_args()
# check parameters
assert os.path.isfile(params.model_path), params.model_path
assert params.n_images >= 1 and params.n_interpolations >= 2
# patch to load model trained with newer pytorch version
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
logger = create_logger(None)
ae = torch.load(params.model_path).eval()
# restore main parameters
params.debug = False
params.batch_size = 32
params.v_flip = False
params.h_flip = False
params.img_sz = ae.img_sz
params.attr = ae.attr
params.n_attr = ae.n_attr
# load dataset
data, attributes = load_images(params)
#test_data = DataSampler(data[2], attributes[2], params)
if params.dataset == 'train':
data_ix = 0
elif params.dataset == 'val':
data_ix = 1
elif params.dataset == 'test':
data_ix = 2
test_data = DataSampler(data[data_ix], attributes[data_ix], params)
def get_interpolations(ae, images, attributes, params, alphas):
"""
Reconstruct images / create interpolations
"""
ae.eval()
assert len(images) == len(attributes)
enc_outputs = ae.encode(images)
# separate latent code and attribute prediction
bs = enc_outputs[0].size(0)
z_all = enc_outputs[-1] # full latent code
n_pred = params.n_attr
y_pred = z_all[:,-n_pred:,:,:]
z_latent = z_all[:,:-n_pred,:,:]
enc_outputs[-1] = z_latent.contiguous()
y_pred = torch.mean(y_pred.contiguous().view(bs, params.n_attr, -1), dim=2)
outputs = []
# original image / reconstructed image / interpolations
new_image = ae.decode(enc_outputs, y_pred)[-1]
outputs.append(images)
outputs.append(new_image)
y_pred_tmp = y_pred.clone()
if alphas is not None:
print 'fixing alphas:', alphas
for attr in range(40):
y_pred_tmp[:,attr] = alphas[attr]
outputs.append(ae.decode(enc_outputs, y_pred_tmp)[-1])
# return stacked images
return torch.cat([x.unsqueeze(1) for x in outputs], 1).data.cpu(), y_pred.data.cpu().numpy().tolist()[0]
def run_interpolations(params, test_data, alphas=None):
interpolations = []
for k in range(0, params.n_images, 100):
i = params.offset + k
j = params.offset + min(params.n_images, k + 100)
images, attributes = test_data.eval_batch(i, j)
generated_images, y_pred = get_interpolations(ae, images, attributes, params, alphas)
interpolations.append(generated_images)
interpolations = torch.cat(interpolations, 0)
return interpolations, y_pred
def get_grid(images, row_wise, plot_size=5):
"""
Create a grid with all images.
"""
n_images, n_columns, img_fm, img_sz, _ = images.size()
if not row_wise:
images = images.transpose(0, 1).contiguous()
images = images.view(n_images * n_columns, img_fm, img_sz, img_sz)
images.add_(1).div_(2.0)
return make_grid(images, nrow=(n_columns if row_wise else n_images))
def compute_grid(interpolations, params):
# generate the grid / save it to a PNG file
grid = get_grid(interpolations, params.row_wise, params.plot_size)
attrs = [int(x) for x in params.selected_attr.split(',')]
outdir = 'imgs'
os.system('mkdir -p %s' % outdir)
outfname = '%s/tmp.png' % (outdir)
matplotlib.image.imsave(outfname, grid.numpy().transpose((1, 2, 0)))
print 'saved', outfname
return outfname
#################################################################
# MAIN
if __name__ == "__main__":
run(port=int(params.port))
``` |
{
"source": "jlfilho/CISRDCNN-keras",
"score": 2
} |
#### File: CISRDCNN-keras/libs/cisrdcnn.py
```python
import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ReLU, BatchNormalization, Add
from tensorflow.keras.layers import UpSampling2D,Conv2DTranspose
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, LambdaCallback
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.initializers import RandomNormal
import restore
from util import DataLoader, plot_test_images
from losses import psnr3 as psnr
from losses import euclidean, cosine, charbonnier
class CISRDCNN():
def __init__(self,
height_lr=16, width_lr=16, channels=3,
upscaling_factor=4, lr = 1e-4,
stage=None,
colorspace = 'RGB',
fulltrain = False
):
# Low-resolution image dimensions
self.height_lr = height_lr
self.width_lr = width_lr
# High-resolution image dimensions
if upscaling_factor not in [1, 2, 4, 8]:
raise ValueError(
'Upscaling factor must be either 2, 4, or 8. You chose {}'.format(upscaling_factor))
self.upscaling_factor = upscaling_factor
self.height_hr = int(self.height_lr * self.upscaling_factor)
self.width_hr = int(self.width_lr * self.upscaling_factor)
# Low-resolution and high-resolution shapes
self.channels = channels
self.colorspace = colorspace
self.stage = stage
self.shape_lr = (self.height_lr, self.width_lr, self.channels)
self.shape_hr = (self.height_hr, self.width_hr, self.channels)
self.loss = "mse"
self.lr = lr
if (stage=='dbcnn'):
print("Compiling DBCNN")
self.dbcnn = self.build_dbcnn()
self.compile_model(self.dbcnn)
if (stage=='uscnn'):
print("Compiling USCNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = False
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.compile_model(self.uscnn)
if (stage=='qecnn'):
print("Compiling QECNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = False
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.uscnn.trainable = False
self.compile_model(self.uscnn)
self.qecnn = self.build_qecnn()
self.compile_model(self.qecnn)
if (stage=='cisrdcnn'):
print("Compiling CISRDCNN")
self.dbcnn = self.build_dbcnn()
self.dbcnn.trainable = True
self.compile_model(self.dbcnn)
self.uscnn = self.build_uscnn()
self.uscnn.trainable = True
self.compile_model(self.uscnn)
self.qecnn = self.build_qecnn()
self.qecnn.trainable = True
self.compile_model(self.qecnn)
self.cisrdcnn = self.build_cisrdcnn()
self.cisrdcnn.trainable = True
self.compile_model(self.cisrdcnn)
def compile_model(self, model):
"""Compile the DBCNN with appropriate optimizer"""
model.compile(
loss=self.loss,
optimizer= SGD(lr=self.lr, momentum=0.9, decay=1e-6, nesterov=True),# Adam(lr=self.lr,beta_1=0.9, beta_2=0.999),
metrics=[psnr]
)
def build_dbcnn(self,k1=20):
def DBCNN(input):
x=input
for i in range(k1-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters= self.channels, kernel_size = (3,3), strides=1, padding='same', name='K1')(x)
x = ReLU()(x)
x = Add()([x, input])
return x
inputs = Input(shape=(None, None, self.channels))
x = DBCNN(inputs)
model = Model(inputs=inputs, outputs=x,name="DBCNN")
#logging.debug(model.summary())
return model
def build_uscnn(self,k2=10):
def USCNN(input):
x = input
for i in range(k2-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = UpSampling2D(size=(self.upscaling_factor, self.upscaling_factor),interpolation="nearest")(x)
x = Conv2D(filters= self.channels, kernel_size = (9,9), strides=1,padding='same')(x)
x = ReLU()(x)
return x
inputs = Input(shape=(None, None, self.channels))
x = self.dbcnn(inputs)
x = USCNN(x)
model = Model(inputs=inputs, outputs=x, name="USCNN")
#logging.debug(model.summary())
return model
def build_qecnn(self,k3=20):
def QECNN(input):
x=input
for i in range(k3-1):
x = Conv2D(filters= 64, kernel_size = (3,3), strides=1,padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters= self.channels, kernel_size = (3,3), strides=1, padding='same', name='K3')(x)
x = ReLU()(x)
x = Add()([x, input])
return x
z = Input(shape=(None, None, self.channels))
x = self.uscnn(z)
hr = QECNN(x)
model = Model(inputs=z, outputs=hr,name="QECNN")
#logging.debug(model.summary())
return model
def build_cisrdcnn(self):
z = Input(shape=(None, None, self.channels))
hr = self.qecnn(z)
model = Model(inputs=z, outputs=hr,name="CISRDCNN")
#logging.debug(model.summary())
return model
def train_dbcnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='DBCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.dbcnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.dbcnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_uscnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.uscnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.uscnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_qecnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.qecnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.qecnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def train_cisrdcnn(self,
epochs=50,
batch_size=8,
steps_per_epoch=5,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=5,
model_name='CISRDCNN',
media_type='i',
datapath_train='../../../videos_harmonic/MYANMAR_2160p/train/',
datapath_validation='../../../videos_harmonic/MYANMAR_2160p/validation/',
datapath_test='../../../videos_harmonic/MYANMAR_2160p/test/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/',
qf=30
):
# Create data loaders
train_loader = DataLoader(
datapath_train, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
validation_loader = None
if datapath_validation is not None:
validation_loader = DataLoader(
datapath_validation, batch_size,
self.height_hr, self.width_hr,
self.upscaling_factor,
crops_per_image,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
test_loader = None
if datapath_test is not None:
test_loader = DataLoader(
datapath_test, 1,
self.height_hr, self.width_hr,
self.upscaling_factor,
1,
media_type,
self.channels,
self.colorspace,
self.stage,
qf
)
# Callback: tensorboard
callbacks = []
if log_tensorboard_path:
tensorboard = TensorBoard(
log_dir=os.path.join(log_tensorboard_path, model_name),
histogram_freq=0,
write_graph=True,
update_freq=log_tensorboard_update_freq
)
callbacks.append(tensorboard)
else:
print(">> Not logging to tensorboard since no log_tensorboard_path is set")
# Callback: Stop training when a monitored quantity has stopped improving
earlystopping = EarlyStopping(
monitor='val_loss',
patience=30, verbose=1,
restore_best_weights=True )
callbacks.append(earlystopping)
# Callback: Reduce lr when a monitored quantity has stopped improving
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=10, min_lr=1e-6,verbose=1)
callbacks.append(reduce_lr)
# Callback: save weights after each epoch
modelcheckpoint = ModelCheckpoint(
os.path.join(log_weight_path, model_name + '_{}X.tf'.format(self.upscaling_factor)),
monitor='val_loss',
save_best_only=True,
save_weights_only=True)
callbacks.append(modelcheckpoint)
# Callback: test images plotting
if datapath_test is not None:
testplotting = LambdaCallback(
on_epoch_end=lambda epoch, logs: None if ((epoch+1) % print_frequency != 0 ) else plot_test_images(
self.cisrdcnn,
test_loader,
datapath_test,
log_test_path,
epoch+1,
name=model_name,
channels=self.channels,
colorspace=self.colorspace))
callbacks.append(testplotting)
#callbacks.append(TQDMCallback())
self.cisrdcnn.fit(
train_loader,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_loader,
validation_steps=steps_per_validation,
callbacks=callbacks,
shuffle=True,
use_multiprocessing=False,
workers=workers
)
def predict_dbcnn(self,
lr_path = None,
sr_path = None,
print_frequency = False,
qp = 8,
fps = None,
media_type = None,
gpu = False
):
""" lr_videopath: path of video in low resoluiton
sr_videopath: path to output video
print_frequency: print frequncy the time per frame and estimated time, if False no print
crf: [0,51] QP parameter 0 is the best quality and 51 is the worst one
fps: framerate if None is use the same framerate of the LR video
media_type: type of media 'v' to video and 'i' to image
"""
if(media_type == 'v'):
time_elapsed = restore.write_srvideo(self.dbcnn,lr_path,sr_path,self.upscaling_factor,print_frequency=print_frequency,crf=qp,fps=fps,gpu=gpu)
elif(media_type == 'i'):
time_elapsed = restore.write_sr_images(self.dbcnn, lr_imagepath=lr_path, sr_imagepath=sr_path,scale=self.upscaling_factor)
else:
print(">> Media type not defined or not suported!")
return 0
return time_elapsed
def predict_uscnn(self,
lr_path = None,
sr_path = None,
print_frequency = False,
qp = 8,
fps = None,
media_type = None,
gpu = False
):
""" lr_videopath: path of video in low resoluiton
sr_videopath: path to output video
print_frequency: print frequncy the time per frame and estimated time, if False no print
crf: [0,51] QP parameter 0 is the best quality and 51 is the worst one
fps: framerate if None is use the same framerate of the LR video
media_type: type of media 'v' to video and 'i' to image
"""
if(media_type == 'v'):
time_elapsed = restore.write_srvideo(self.uscnn,lr_path,sr_path,self.upscaling_factor,print_frequency=print_frequency,crf=qp,fps=fps,gpu=gpu)
elif(media_type == 'i'):
time_elapsed = restore.write_sr_images(self.uscnn, lr_imagepath=lr_path, sr_imagepath=sr_path,scale=self.upscaling_factor)
else:
print(">> Media type not defined or not suported!")
return 0
return time_elapsed
def predict_cisrdcnn(self,
lr_path = None,
sr_path = None,
print_frequency = False,
qp = 8,
fps = None,
media_type = None,
gpu = False
):
""" lr_videopath: path of video in low resoluiton
sr_videopath: path to output video
print_frequency: print frequncy the time per frame and estimated time, if False no print
crf: [0,51] QP parameter 0 is the best quality and 51 is the worst one
fps: framerate if None is use the same framerate of the LR video
media_type: type of media 'v' to video and 'i' to image
"""
if(media_type == 'v'):
time_elapsed = restore.write_srvideo(self.cisrdcnn,lr_path,sr_path,self.upscaling_factor,print_frequency=print_frequency,crf=qp,fps=fps,gpu=gpu)
elif(media_type == 'i'):
time_elapsed = restore.write_sr_images(self.cisrdcnn, lr_imagepath=lr_path, sr_imagepath=sr_path,scale=self.upscaling_factor)
else:
print(">> Media type not defined or not suported!")
return 0
return time_elapsed
def main():
logging.basicConfig(filename='../logs/cisrdcnn.log', level=logging.INFO)
logging.info('Started')
#------------------------------------------------------
# Instantiate the TSRGAN object
logging.info(">> Creating the CISRDCNN network")
cisrdcnn = CISRDCNN(height_lr=16, width_lr=16,lr=1e-3,upscaling_factor=2,channels=3,colorspace = 'RGB',fulltrain = True)
#cisrdcnn.load_weights(weights='../model/CISRDCNN_v1_2X.tf')
""" datapath = '../../data/videoset/540p/'
outpath = '../out/540p_2X/'
for dirpath, _, filenames in os.walk(datapath):
for filename in [f for f in sorted(filenames) if any(filetype in f.lower() for filetype in ['jpeg', 'png', 'jpg','mp4','264','webm','wma'])]:
print(os.path.join(dirpath, filename),outpath+filename.split('.')[0]+'.mp4')
t = cisrdcnn.predict(
lr_path=os.path.join(dirpath, filename),
sr_path=outpath+filename.split('.')[0]+'.mp4',
qp=0,
media_type='v',
gpu=False
) """
""" datapath = '../../data/videoset/360p/'
outpath = '../out/360p_2X/'
i=1
for dirpath, _, filenames in os.walk(datapath):
for filename in [f for f in sorted(filenames) if any(filetype in f.lower() for filetype in ['jpeg', 'png', 'jpg','mp4','264','webm','wma'])]:
if(i==17):
print(os.path.join(dirpath, filename),outpath+filename.split('.')[0]+'.mp4')
t = cisrdcnn.predict(
lr_path=os.path.join(dirpath, filename),
sr_path=outpath+filename.split('.')[0]+'.mp4',
qp=0,
media_type='v',
gpu=False
)
i+=1 """
cisrdcnn.train_cisrdcnn(
epochs=10000,
batch_size=32,
steps_per_epoch=10,
steps_per_validation=5,
crops_per_image=4,
print_frequency=5,
log_tensorboard_update_freq=10,
workers=1,
max_queue_size=10,
model_name='CISRDCNN',
datapath_train='../../../Documents/data/train_large/data_large/',
datapath_validation='../../data/val_large/',
datapath_test='../../data/benchmarks/Set5/',
log_weight_path='../model/',
log_tensorboard_path='../logs/',
log_test_path='../test/'
)
#------------------------------------------------------
logging.info('Finished')
# Run the CISRDCNN network
if __name__ == "__main__":
main()
``` |
{
"source": "jlfilho/SR-LiveS",
"score": 2
} |
#### File: collector_server/api/collector.py
```python
import json
from flask_restful import Resource, Api
from flask import make_response
from flask_cors import CORS
import logging
def output_json(obj, code, headers=None):
resp = make_response(json.dumps(obj), code)
resp.headers.extend(headers or {})
return resp
def create_api(app):
DEFAULT_REPRESENTATIONS = {'application/json': output_json}
app.config['CORS_HEADERS'] = "Content-Type"
CORS(app)
api = Api(app)
api.representations = DEFAULT_REPRESENTATIONS
logging.getLogger('flask_cors').level = logging.DEBUG
#Registrando os Blueprints
from api.resources.logs import metrics_blue, LogsResource, Index, GeneralResource
app.register_blueprint(metrics_blue)
#Registrando os recursos
api.add_resource(Index, '/')
api.add_resource(LogsResource, '/metrics')
api.add_resource(GeneralResource, '/general')
return app
```
#### File: share/tools/metrics.py
```python
import os
import ast
import pandas as pd
import numpy as np
from datetime import datetime
import time
import logging
level_config = {'debug': logging.DEBUG, 'info': logging.INFO}
FILE_SIZE = 500
BYTES_PER_PKT = 1500.0*8
MILLISEC_IN_SEC = 1000.0
EXP_LEN = 1000 # millisecond
class Metric:
def __init__(self,name,mi=1., lbd=1., mi_s=1.,log_level='debug'):
self.name = name
self.mi = mi
self.lbd = lbd
self.mi_s = mi_s
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
self.logger = logging.getLogger(__name__)
def calc(self,listRate,listRebuffer):
pass
def tabulation(self,listQoE,scores = pd.DataFrame(),abrRule = 'abr Rule',prefix=''):
scores_tmp = pd.DataFrame()
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores_tmp['Average value'] = np.asarray([i[0] for i in listQoE])
scores_tmp['Metrics'] = [ self.name for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[1] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Bitrate Utility' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp['Average value'] = np.asarray([i[2] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Smoothness Penalty' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[3] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Rebuffering Penalty' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
scores_tmp = pd.DataFrame()
scores_tmp['Average value'] = np.asarray([i[4] for i in listQoE])
scores_tmp['Metrics'] = [ prefix+'_'+'Startup Delay' for i in listQoE]
scores_tmp['abr Rule'] = [ abrRule for i in listQoE]
scores = scores.append(scores_tmp)
return scores
class MetricQoElin(Metric):
def __init__(self,name='',mi=1., lbd=1., mi_s=1.,log_level='debug'):
super().__init__(name,mi, lbd, mi_s,log_level)
def calc(self,listRate,listRebuffer):
bitrateUtility = np.asarray(listRate).sum()
startupDelay = self.mi_s*np.asarray(listRebuffer[0])
rebufferingPenalty = self.mi*np.asarray(listRebuffer[1:]).sum()
smoothnessPenalty = self.lbd*np.abs(np.asarray(listRate[1:])-np.asarray(listRate[:-1])).sum()
qoe = bitrateUtility - (smoothnessPenalty + rebufferingPenalty + startupDelay)
# print(qoe)
return qoe,bitrateUtility,smoothnessPenalty,rebufferingPenalty,startupDelay
class MetricQoEMean(Metric):
def __init__(self,name='',mi=1., lbd=1., mi_s=1.,log_level='debug'):
super().__init__(name,mi, lbd, mi_s,log_level)
def calc(self,listRate,listRebuffer):
bitrateUtility = np.asarray(listRate[1:])
startupDelay = self.mi_s*np.asarray(listRebuffer[0])
rebufferingPenalty = self.mi*np.asarray(listRebuffer[1:])
smoothnessPenalty = self.lbd*np.abs(np.asarray(listRate[1:])-np.asarray(listRate[:-1]))
qoe = bitrateUtility - (smoothnessPenalty + rebufferingPenalty + startupDelay)
# print(qoe.sum())
return qoe.sum(),bitrateUtility.sum(),smoothnessPenalty.sum(),rebufferingPenalty.sum(),startupDelay.sum()
class MetricQoElog(Metric):
def __init__(self,name='',mi=1., lbd=1., mi_s=1.,log_level='debug'):
super().__init__(name+'_'+'QoE_log',mi, lbd, mi_s,log_level)
def calc(self,listRate,listRebuffer):
bitrateUtility = np.log(np.asarray(listRate)/np.asarray(listRate).min()).sum()
startupDelay = self.mi_s*np.asarray(listRebuffer[0])
rebufferingPenalty = self.mi*np.asarray(listRebuffer[1:]).sum()
smoothnessPenalty = self.lbd*np.abs(np.log(np.asarray(listRate[1:])/np.asarray(listRate[1:]).min()) \
- np.log(np.asarray(listRate[:-1])/np.asarray(listRate[1:]).min())).sum()
qoe=bitrateUtility - (smoothnessPenalty + rebufferingPenalty + startupDelay)
return qoe,bitrateUtility,smoothnessPenalty,rebufferingPenalty,startupDelay
class MetricQoEhd(Metric):
def __init__(self,name='',mi=1., lbd=1., mi_s=1.,log_level='debug'):
super().__init__(name+'_'+'QoE_hd',mi, lbd, mi_s,log_level)
def calc(self,listRate,listRebuffer):
bitrateUtility = (np.asarray(listRate)*100).mean()
rebufferingPenalty = self.rebufferPenalty*(np.asarray(listRebuffer)).mean()
smoothnessPenalty = np.abs((np.asarray(listRate[1:])*100)-(np.asarray(listRate[:-1])*100)).mean()
qoe=(np.asarray(listRate)*100).sum()-self.rebufferPenalty*(np.asarray(listRebuffer)).sum()-np.abs((np.asarray(listRate[1:])*100)-(np.asarray(listRate[:-1])*100)).sum()
return qoe,bitrateUtility,rebufferingPenalty,smoothnessPenalty
def parseLogs(metricQoE,path = '../results-collector/abrBola/',log_level='info',div_by=1e3):
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
files = os.listdir(path)
listQoE = []
for file in files:
# print(path+file)
f = open(path+file, 'r')
lines = f.readlines()
logs = []
i=0
for line in lines:
logs.append(ast.literal_eval(line.strip()))
#print("bitrate: {} rebufferTime: {}".format(logs[i]['bitrate'],logs[i]['rebufferTime']))
i+=1
# print("Count segments: {}".format(i))
df = pd.DataFrame(logs)
#print(df['bitrate']/1e6,df['rebufferTime'])
mt = metricQoE.calc(df['bitrate']/div_by,df['rebufferTime'])
logger.debug(mt)
listQoE.append(mt)
return listQoE
def parseLogsBy(path = '../results-collector/abrBola',file_type='json',log_level='debug'):
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
frames = []
for dirpath, subdirpath, filenames in os.walk(path):
client = 0
for filename in [f for f in filenames if any(filetype in f.lower() for filetype in [file_type])]:
current_file = os.path.join(dirpath, filename)
logger.debug(current_file)
f = open(current_file, 'r')
lines = f.readlines()
logs = []
for line in lines:
logs.append(ast.literal_eval(line.strip()))
df = pd.DataFrame(logs)
df['scenario'] = dirpath.split('/')[-2]
df['abr Rule'] = filename.split('_')[1]
df['client'] = client
df['calc_bitrate'] = (((df['totalBytesLength']*8)/1000)/df['mediaduration'])
frames.append(df)
client +=1
result = pd.concat(frames)
return result
def writeTrace(output=None,df=None):
t = df.Timestamp.apply(lambda x: time.mktime(datetime.strptime(x, "%Y.%m.%d_%H.%M.%S").timetuple()))
bw = df['DL_bitrate']
dfbw = pd.DataFrame()
dfbw['time']=range(0,len(t))
dfbw['DL_bitrate']=bw.reset_index(drop=True)
dfbw.to_csv(output,index=False)
def parseTraces(input_path = '../../traces/5G-production-dataset/5G-production-dataset/Amazon_Prime/Driving/',
output_path=None,minimum=0,maximum=1e15,file_type='csv',parameter='DL_bitrate',log_level='info'):
log_level = level_config[log_level.lower()]
logging.basicConfig(level=log_level)
logger = logging.getLogger(__name__)
frames = []
for dirpath, subdirpath, filenames in os.walk(input_path):
for filename in [f for f in filenames if any(filetype in f.lower() for filetype in [file_type])]:
current_file = os.path.join(dirpath, filename)
logger.debug("input file: {}".format(current_file))
df = pd.read_csv(current_file)
if output_path is not None:
df = df[(df[parameter] >= minimum) & (df[parameter] <= maximum) ]
logger.debug("output file: {}".format(output_path+filename))
writeTrace(output=output_path+filename,df=df)
frames.append(df)
result = pd.concat(frames)
return result
def maker_mahimahi_trace(IN_FILE = None,OUT_FILE = None):
files = os.listdir(IN_FILE)
for trace_file in files:
if os.stat(IN_FILE + trace_file).st_size >= FILE_SIZE:
df = pd.read_csv(IN_FILE + trace_file)
with open(OUT_FILE + trace_file, 'w') as mf:
millisec_time = 0
mf.write(str(millisec_time) + '\n')
for i in range(1,len(df.DL_bitrate)):
throughput = (float(df.DL_bitrate[i])*1000)
pkt_per_millisec = throughput / BYTES_PER_PKT / MILLISEC_IN_SEC
#print("pkt_per_millisec: {}".format(pkt_per_millisec))
millisec_count = 0
pkt_count = 0
while True:
millisec_count += 1
millisec_time += 1
to_send = (millisec_count * pkt_per_millisec) - pkt_count
to_send = np.floor(to_send)
#print("to_send: {}".format(to_send))
for i in range(int(to_send)):
mf.write(str(millisec_time) + '\n')
# print(millisec_time)
pkt_count += to_send
if millisec_count >= EXP_LEN:
break
``` |
{
"source": "jlfilho/sr-on-fog",
"score": 2
} |
#### File: sr-on-fog/real_exp/run_video.py
```python
import os
import sys
import signal
import subprocess
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from pyvirtualdisplay import Display
from time import sleep
# TO RUN: download https://pypi.python.org/packages/source/s/selenium/selenium-2.39.0.tar.gz
# run sudo apt-get install python-setuptools
# run sudo apt-get install xvfb
# after untar, run sudo python setup.py install
# follow directions here: https://pypi.python.org/pypi/PyVirtualDisplay to install pyvirtualdisplay
# For chrome, need chrome driver: https://code.google.com/p/selenium/wiki/ChromeDriver
# chromedriver variable should be path to the chromedriver
# the default location for firefox is /usr/bin/firefox and chrome binary is /usr/bin/google-chrome
# if they are at those locations, don't need to specify
def timeout_handler(signum, frame):
raise Exception("Timeout")
abr_algo = sys.argv[1]
run_time = int(sys.argv[2])
exp_id = sys.argv[3]
# ---------------------------------------------------
# ---- change localhost in url to server address ----
# ---------------------------------------------------
# |
# v
url = 'localhost/' + 'myindex_' + abr_algo + '.html'
# timeout signal
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(run_time + 30)
try:
# copy over the chrome user dir
#default_chrome_user_dir = '../abr_browser_dir/chrome_data_dir'
default_chrome_user_dir = '/home/joao/.config/google-chrome'
chrome_user_dir = '/tmp/chrome_user_dir_real_exp_' + abr_algo
os.system('rm -r ' + chrome_user_dir)
os.system('cp -r ' + default_chrome_user_dir + ' ' + chrome_user_dir)
# start abr algorithm server
if abr_algo == 'RL':
command = 'exec /usr/bin/python ../rl_server/rl_server_no_training.py ' + exp_id
elif abr_algo == 'fastMPC':
command = 'exec /usr/bin/python ../rl_server/mpc_server.py ' + exp_id
elif abr_algo == 'robustMPC':
command = 'exec /usr/bin/python ../rl_server/robust_mpc_server.py ' + exp_id
else:
command = 'exec /usr/bin/python ../rl_server/simple_server.py ' + abr_algo + ' ' + exp_id
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
sleep(2)
# to not display the page in browser
display = Display(visible=1, size=(800,600))
display.start()
# initialize chrome driver
options=Options()
chrome_driver = '../abr_browser_dir/chromedriver'
options.add_argument('--no-sandbox')
options.add_argument('--user-data-dir=' + chrome_user_dir)
options.add_argument('--ignore-certificate-errors')
options.add_argument('--disable-dev-shm-usage')
driver=webdriver.Chrome(chrome_driver, chrome_options=options)
# run chrome
print(url)
driver.set_page_load_timeout(60)
driver.get(url)
sleep(run_time)
driver.quit()
display.stop()
# kill abr algorithm server
proc.send_signal(signal.SIGINT)
# proc.kill()
print('done')
except Exception as e:
try:
display.stop()
except:
pass
try:
driver.quit()
except:
pass
try:
proc.send_signal(signal.SIGINT)
except:
pass
print(e)
``` |
{
"source": "jlfilho/sr",
"score": 2
} |
#### File: jlfilho/sr/teste.py
```python
import tensorflow as tf
import json
import argparse
from models.dataset import Dataset
from models.model import Model
class RTVSRGAN(Model):
def __init__(self, args):
super().__init__(args)
self._prediction_offset = self._scale_factor * 4
def get_data(self):
data_batch, initializer = self.dataset.get_data()
lr_batch = tf.cast(data_batch['lr1'], tf.float32) / 255.0
hr_batch = tf.cast(data_batch['hr'], tf.float32) / 255.0
return [lr_batch, hr_batch], initializer
def get_placeholder(self):
input_ph = tf.placeholder(tf.float32, shape=[1, None, None, 1], name="x")
return [input_ph]
def load_model(self, data_batch):
lr_batch = data_batch[0]
with tf.variable_scope('espcn'):
if not self._using_dataset:
lr_batch = tf.pad(lr_batch, [[0, 0], [4, 4], [4, 4], [0, 0]], 'SYMMETRIC')
net = tf.layers.conv2d(lr_batch, 64, 5, activation=tf.nn.tanh, padding='valid', name='conv1',
kernel_initializer=tf.keras.initializers.he_normal())
net = tf.layers.conv2d(net, 32, 3, activation=tf.nn.tanh, padding='valid', name='conv2',
kernel_initializer=tf.keras.initializers.he_normal())
net = tf.layers.conv2d(net, self._scale_factor ** 2, 3, activation=tf.nn.sigmoid, padding='valid',
name='conv3', kernel_initializer=tf.keras.initializers.he_normal())
predicted_batch = tf.depth_to_space(net, self._scale_factor, name='prediction')
espcn_variables = tf.trainable_variables(scope='espcn')
for variable in espcn_variables:
if 'conv3' in variable.name:
self.lr_multipliers[variable.name] = 0.1
else:
self.lr_multipliers[variable.name] = 1.0
if self._using_dataset:
tf.summary.image('Low_resolution', data_batch[0][:, 4:-4, 4:-4], max_outputs=self._save_num)
tf.summary.image('High_resolution',
data_batch[1][:, self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
max_outputs=self._save_num)
tf.summary.image('High_resolution_prediction', predicted_batch, max_outputs=self._save_num)
return predicted_batch
def get_loss(self, data_batch, predicted_batch):
loss = tf.losses.mean_squared_error(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch)
tf.summary.scalar('MSE', loss)
tf.summary.scalar('PSNR', tf.reduce_mean(tf.image.psnr(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
tf.summary.scalar('SSIM', tf.reduce_mean(tf.image.ssim(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
return loss
def calculate_metrics(self, data_batch, predicted_batch):
diff = data_batch[1][:, self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset] - predicted_batch
diff_sqr = tf.square(diff)
mse = ('MSE', tf.reduce_mean(diff_sqr, axis=[1, 2, 3]))
psnr = ('PSNR', tf.squeeze(tf.image.psnr(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
ssim = ('SSIM', tf.squeeze(tf.image.ssim(
data_batch[1][:,
self._prediction_offset:-self._prediction_offset,
self._prediction_offset:-self._prediction_offset],
predicted_batch,
max_val=1.0)))
return [mse, psnr, ssim]
TRAINING_LOGDIR='logdir/espcn_batch_32_lr_1e-3_decay_adam/train'
EVAL_LOGDIR='logdir/espcn_batch_32_lr_1e-3_decay_adam/test'
TRAINING_DATASET_PATH='datasets/train_div2k/dataset.tfrecords'
TRAINING_DATASET_INFO_PATH='datasets/train_div2k/dataset_info.txt'
TESTING_DATASET_PATH='datasets/test_div2k/dataset.tfrecords'
TESTING_DATASET_INFO_PATH='datasets/test_div2k/dataset_info.txt'
MODEL='rtvsrgan'
BATCH_SIZE=32
OPTIMIZER='adam'
LEARNING_RATE=1e-3
USE_LR_DECAY_FLAG='--use_lr_decay'
LEARNING_DECAY_RATE=0.1
LEARNING_DECAY_EPOCHS=30
STAIRCASE_LR_DECAY_FLAG='--staircase_lr_decay'
STEPS_PER_LOG=1000
NUM_EPOCHS=100
EPOCHS_PER_EVAL=1
EPOCHS_PER_SAVE=1
SHUFFLE_BUFFER_SIZE=100000
def get_arguments():
parser = argparse.ArgumentParser(description='train one of the models for image and video super-resolution')
parser.add_argument('--model', type=str, default=MODEL, choices=['rtvsrgan','srcnn', 'espcn', 'vespcn', 'vsrnet'],
help='What model to train')
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='Number of images in batch')
parser.add_argument('--dataset_path', type=str, default=TRAINING_DATASET_PATH,
help='Path to the dataset')
parser.add_argument('--dataset_info_path', type=str, default=TRAINING_DATASET_INFO_PATH,
help='Path to the dataset info')
parser.add_argument('--ckpt_path', default=None,
help='Path to the model checkpoint to evaluate')
parser.add_argument('--shuffle_buffer_size', type=int, default=SHUFFLE_BUFFER_SIZE,
help='Buffer size used for shuffling examples in dataset')
parser.add_argument('--optimizer', type=str, default=OPTIMIZER, choices=['adam', 'momentum', 'sgd'],
help='What optimizer to use for training')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate used for training')
parser.add_argument('--use_lr_decay', action='store_true',
help='Whether to apply exponential decay to the learning rate')
parser.add_argument('--lr_decay_rate', type=float, default=LEARNING_DECAY_RATE,
help='Learning rate decay rate used in exponential decay')
parser.add_argument('--lr_decay_epochs', type=int, default=LEARNING_DECAY_EPOCHS,
help='Number of epochs before full decay rate tick used in exponential decay')
parser.add_argument('--staircase_lr_decay', action='store_true',
help='Whether to decay the learning rate at discrete intervals')
parser.add_argument('--num_epochs', type=int, default=NUM_EPOCHS,
help='Number of training epochs')
parser.add_argument('--save_num', type=int, default=NUM_EPOCHS,
help='How many images to write to summary')
parser.add_argument('--steps_per_log', type=int, default=STEPS_PER_LOG,
help='How often to save summaries')
parser.add_argument('--epochs_per_save', type=int, default=EPOCHS_PER_SAVE,
help='How often to save checkpoints')
parser.add_argument('--use_mc', action='store_true',
help='Whether to use motion compensation in video super resolution model')
parser.add_argument('--mc_independent', action='store_true',
help='Whether to train motion compensation network independent from super resolution network')
parser.add_argument('--logdir', type=str, default=TRAINING_LOGDIR,
help='Where to save checkpoints and summaries')
return parser.parse_args()
def main():
args = get_arguments()
print(args)
if args.model == 'rtvsrgan':
model = RTVSRGAN(args)
data_batch, data_initializer = model.get_data()
print(data_batch)
main()
``` |
{
"source": "jlfilho/sr-tf2",
"score": 2
} |
#### File: models/evsrnet/block.py
```python
import tensorflow as tf
class RB(tf.keras.Model):
def __init__(self,filters=64):
super(RB, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters, 3,padding='same',strides=(1, 1),
kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1., mode='fan_in',
distribution='truncated_normal', seed=None))
self.act = tf.keras.layers.ReLU()
self.conv2 = tf.keras.layers.Conv2D(filters, 3,padding='same',strides=(1, 1),
kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1., mode='fan_in',
distribution='truncated_normal', seed=None))
def call(self, inputs):
identity = inputs
out = self.act(self.conv1(inputs))
out = self.conv2(out)
out_fused = tf.keras.layers.add([identity, out])
return out_fused
class Upsample(tf.keras.Model):
def __init__(self,channels=1,scale_factor=2):
super(Upsample, self).__init__()
self.conv = tf.keras.layers.Conv2D(channels*(scale_factor ** 2), 3,
padding='same',strides=(1, 1), kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1.,
mode='fan_in', distribution='truncated_normal', seed=None))
self.upsample = tf.keras.layers.Lambda(lambda x:tf.nn.depth_to_space(x,scale_factor))
def call(self, inputs):
x = self.conv(inputs)
return self.upsample(x)
```
#### File: sr-tf2/models/metrics.py
```python
import tensorflow as tf
from lpips_tf import lpips_tf
def psnr(y, y_pred,max_val=1.0):
y = tf.image.convert_image_dtype(y, tf.float32)
y_pred = tf.image.convert_image_dtype(y_pred, tf.float32)
if(len(y.shape)==4):
values = tf.image.psnr(y[:, 4:-4, 4:-4], y_pred[:, 4:-4, 4:-4], max_val=max_val)
if (len(y.shape)==3):
values = tf.image.psnr(y[4:-4, 4:-4], y_pred[4:-4, 4:-4], max_val=max_val)
return tf.reduce_mean(values)
def ssim(y, y_pred,max_val=1.0):
y = tf.image.convert_image_dtype(y, tf.float32)
y_pred = tf.image.convert_image_dtype(y_pred, tf.float32)
if(len(y.shape)==4):
values = tf.image.ssim(y[:, 4:-4, 4:-4], y_pred[:, 4:-4, 4:-4], max_val=max_val, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
if (len(y.shape)==3):
values = tf.image.ssim(y[4:-4, 4:-4], y_pred[4:-4, 4:-4], max_val=max_val, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
return tf.reduce_mean(values)
rmse = tf.keras.metrics.RootMeanSquaredError(name='rmse')
def psnr_loss(y, y_pred,max_val=1.0):
y = tf.image.convert_image_dtype(y, tf.float32)
y_pred = tf.image.convert_image_dtype(y_pred, tf.float32)
if(len(y.shape)==4):
values = tf.image.psnr(y[:, 4:-4, 4:-4], y_pred[:, 4:-4, 4:-4], max_val=max_val)
if (len(y.shape)==3):
values = tf.image.psnr(y[4:-4, 4:-4], y_pred[4:-4, 4:-4], max_val=max_val)
return values
def ssim_loss(y, y_pred,max_val=1.0):
y = tf.image.convert_image_dtype(y, tf.float32)
y_pred = tf.image.convert_image_dtype(y_pred, tf.float32)
if(len(y.shape)==4):
values = tf.image.ssim(y[:, 4:-4, 4:-4], y_pred[:, 4:-4, 4:-4], max_val=max_val, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
if (len(y.shape)==3):
values = tf.image.ssim(y[4:-4, 4:-4], y_pred[4:-4, 4:-4], max_val=max_val, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03)
return values
@tf.function
def lpips(y, y_pred):
y = (y*255.)
if(y.shape[-1]==1):
y = tf.keras.layers.Concatenate()([y, y, y])
y_pred = (y_pred*255.)
if(y_pred.shape[-1]==1):
y_pred = tf.keras.layers.Concatenate()([y_pred, y_pred, y_pred])
if(len(y.shape)==4):
values = lpips_tf.lpips(y[:, 4:-4, 4:-4], y_pred[:, 4:-4, 4:-4], model='net-lin', net='alex')
if (len(y.shape)==3):
values = lpips_tf.lpips(y[4:-4, 4:-4], y_pred[4:-4, 4:-4], model='net-lin', net='alex')
return tf.reduce_mean(values)
```
#### File: models/rtsrgan/block.py
```python
import tensorflow as tf
class RRDB(tf.keras.Model):
def __init__(self,filters=32,kernel_size=3,name=None):
super(RRDB, self).__init__()
self.c1 = tf.keras.layers.Conv2D(filters, kernel_size,padding='valid',strides=(1, 1), name=name,kernel_initializer=tf.keras.initializers.he_normal())
self.bn = tf.keras.layers.BatchNormalization()
self.act = tf.keras.layers.ReLU()
def call(self, inputs):
x = self.c1(inputs)
x = self.bn(x)
return self.act(x)
```
#### File: models/rtsrgan/model_gan.py
```python
import tensorflow as tf
from functools import reduce
class GAN(tf.keras.Model):
def __init__(self, discriminator, generator):
super(GAN, self).__init__()
self.discriminator = discriminator
self.generator = generator
def compile(self, d_optimizer, g_optimizer, d_loss, g_loss, metrics):
super(GAN, self).compile(metrics = metrics)
self.d_optimizer = d_optimizer
self.d_loss = d_loss
self.g_optimizer = g_optimizer
self.g_loss = g_loss
def load_weights_gen(self,checkpoint_filepath):
self.generator.load_weights(checkpoint_filepath)
def load_weights_dis(self,checkpoint_filepath):
self.discriminator.load_weights(checkpoint_filepath)
def save_weights_gen(self,checkpoint_filepath):
# Save the weights
self.generator.save_weights(checkpoint_filepath)
def train_step(self, data):
if isinstance(data, tuple):
img_lr, img_hr = data
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
img_sr = self.generator(img_lr, training=True)
real_output = self.discriminator(img_hr, training=True)
fake_output = self.discriminator(img_sr, training=True)
g_loss,c_loss, a_loss, p_loss = self.g_loss(fake_output,img_hr,img_sr)
d_loss = self.d_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(g_loss, self.generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(d_loss, self.discriminator.trainable_variables)
self.g_optimizer.apply_gradients(zip(gradients_of_generator, self.generator.trainable_variables))
self.d_optimizer.apply_gradients(zip(gradients_of_discriminator, self.discriminator.trainable_variables))
self.compiled_metrics.update_state(img_hr, img_sr)
return reduce(lambda x,y: dict(x, **y),
({"d_loss": d_loss, "g_loss": g_loss,"a_loss": a_loss, "c_loss": c_loss, "p_loss": p_loss },
{m.name: m.result() for m in self.metrics}))
```
#### File: models/rtvsrgan/KnowledgeDistillation.py
```python
import tensorflow as tf
from models.metrics import ssim_loss
class Distiller(tf.keras.Model):
def __init__(self, student, teacher):
super(Distiller, self).__init__()
self.teacher = teacher
self.student = student
self.time = []
def get_run_time(self):
if(len(self.time)>0):
return sum(self.time)/len(self.time)
else:
return -1
def compile(
self,
optimizer,
metrics,
student_loss_fn,
distillation_loss_fn,
perc_loss_fn,
alpha=0.1,
beta=0.2,
):
""" Configure the distiller.
Args:
optimizer: Keras optimizer for the student weights
metrics: Keras metrics for evaluation
student_loss_fn: Loss function of difference between student
predictions and ground-truth
distillation_loss_fn: Loss function of difference between soft
student predictions and soft teacher predictions
alpha: weight to student_loss_fn and 1-alpha to distillation_loss_fn
temperature: Temperature for softening probability distributions.
Larger temperature gives softer distributions.
"""
super(Distiller, self).compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.perc_loss_fn = perc_loss_fn
self.alpha = alpha
self.beta = beta
@tf.function
def train_step(self, data):
# Unpack data
x, y = data
# Forward pass of teacher
teacher_predictions = self.teacher(x, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(x, training=True)
# Compute losses
student_loss = self.student_loss_fn(y, student_predictions)
distillation_loss = self.distillation_loss_fn(teacher_predictions,student_predictions)
#distillation_loss = self.distillation_loss_fn(ssim_loss(y,y),ssim_loss(y,student_predictions))
teacher_predictions = tf.keras.layers.Concatenate()([teacher_predictions, teacher_predictions, teacher_predictions])
student_predictions = tf.keras.layers.Concatenate()([student_predictions, student_predictions, student_predictions])
y = tf.keras.layers.Concatenate()([y, y, y])
perc_loss = self.perc_loss_fn(y, student_predictions)
loss = (1 - (self.alpha + self.beta)) * student_loss + self.alpha * distillation_loss + self.beta * perc_loss
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`.
self.compiled_metrics.update_state(y, student_predictions)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update(
{"student_loss": student_loss, "distillation_loss": distillation_loss, "perceptual_loss": perc_loss}
)
return results
@tf.function
def test_step(self, data):
# Unpack the data
x, y = data
# Compute predictions
y_prediction = self.student(x, training=False)
# Calculate the loss
#student_loss = self.student_loss_fn(y, y_prediction)
# Update the metrics.
self.compiled_metrics.update_state(y, y_prediction)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
#results.update({"student_loss": student_loss})
return results
```
#### File: models/rtvsrgan/model_generator.py
```python
import tensorflow as tf
from models.rtvsrgan.blocks import RB,DRB, Upsample
class G_RTVSRGAN(tf.keras.Model):
def __init__(self,channels=1,scale_factor=2,file_writer_cm=None,method=None):
super(G_RTVSRGAN, self).__init__()
self.method = method
self.scale_factor = scale_factor
self.RB1 = RB(filters=32,kernel_size=3)
self.RB2 = RB(filters=32,kernel_size=3)
self.RB3 = RB(filters=32,kernel_size=3)
self.upsample = Upsample(channels=channels,scale_factor=scale_factor)
self.time = []
def get_run_time(self):
if(len(self.time)>0):
return sum(self.time)/len(self.time)
else:
return -1
def call(self, inputs):
x = tf.pad(inputs, [[0, 0], [0, 0], [0, 0], [0, 0]], 'SYMMETRIC')
rb1 = self.RB1(x)
rb2 = self.RB2(rb1)
x = tf.keras.layers.add([rb1, rb2])
rb3 = self.RB3(x)
x = tf.keras.layers.concatenate([rb1, rb2, rb3],axis=3)
x = self.upsample(x)
if self.method != None:
input_resized = tf.image.resize(inputs, [inputs.shape[1]*self.scale_factor,inputs.shape[2]*self.scale_factor],method=self.method)
x = tf.keras.layers.add([x,input_resized])
return x
# class G_RTVSRGAN_2(tf.keras.Model):
# def __init__(self,channels=1,scale_factor=2,file_writer_cm=None,distillation_rate=0.8):
# super(G_RTVSRGAN_2, self).__init__()
# self.RB1 = RB(filters=72,kernel_size=3)
# self.RB2 = RB(filters=72,kernel_size=3)
# self.RB3 = RB(filters=72,kernel_size=3)
# self.upsample = Upsample(channels=channels,scale_factor=scale_factor)
# self.time = []
# def get_run_time(self):
# if(len(self.time)>0):
# return sum(self.time)/len(self.time)
# else:
# return -1
# def call(self, inputs):
# x = tf.pad(inputs, [[0, 0], [0, 0], [0, 0], [0, 0]], 'SYMMETRIC')
# rb1 = self.RB1(x)
# rb2 = self.RB2(rb1)
# x = tf.keras.layers.add([rb1, rb2])
# rb3 = self.RB3(x)
# x = tf.keras.layers.concatenate([rb1, rb2, rb3],axis=3)
# return self.upsample(x)
# class G_RTVSRGAN2(tf.keras.Model):
# def __init__(self,channels=1,scale_factor=2,file_writer_cm=None,distillation_rate=0.8):
# super(G_RTVSRGAN2, self).__init__()
# self.c1 = tf.keras.layers.Conv2D(32, 3,padding='same',strides=(1, 1),
# kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1., mode='fan_in',
# distribution='truncated_normal', seed=None))
# self.RB1 = DRB(filters=32,kernel_size=3,distillation_rate=distillation_rate)
# self.RB2 = DRB(filters=32,kernel_size=3,distillation_rate=distillation_rate)
# self.RB3 = DRB(filters=32,kernel_size=3,distillation_rate=distillation_rate)
# self.RB4 = DRB(filters=32,kernel_size=3,distillation_rate=distillation_rate)
# self.RB5 = DRB(filters=32,kernel_size=3,distillation_rate=distillation_rate)
# self.c2 = tf.keras.layers.Conv2D(32, 1,padding='same',strides=(1, 1),
# kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1., mode='fan_in',
# distribution='truncated_normal', seed=None))
# self.c3 = tf.keras.layers.Conv2D(32, 3,padding='same',strides=(1, 1),
# kernel_initializer=tf.keras.initializers.VarianceScaling(scale=1., mode='fan_in',
# distribution='truncated_normal', seed=None))
# self.lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
# self.upsample = Upsample(channels=channels,scale_factor=scale_factor)
# self.time = []
# def get_run_time(self):
# if(len(self.time)>0):
# return sum(self.time)/len(self.time)
# else:
# return -1
# def call(self, inputs):
# x = tf.pad(inputs, [[0, 0], [0, 0], [0, 0], [0, 0]], 'SYMMETRIC')
# x1 = self.lrelu(self.c1(x))
# rb1 = self.RB1(x1)
# rb2 = self.RB2(rb1)
# rb3 = self.RB3(rb2)
# rb4 = self.RB4(rb3)
# rb5 = self.RB4(rb4)
# x = tf.keras.layers.concatenate([rb1, rb2, rb3, rb4, rb5],axis=3)
# x = self.lrelu(self.c2(x))
# x = self.lrelu(self.c3(x))
# x_fused = tf.keras.layers.add([x, x1])
# return self.upsample(x_fused)
# class G_RTVSRGAN(tf.keras.Model):
# def __init__(self,channels=1,scale_factor=2,file_writer_cm=None,distillation_rate=0.8):
# super(G_RTVSRGAN, self).__init__()
# self.RB1 = RB(filters=64,kernel_size=3)
# self.RB2 = RB(filters=64,kernel_size=3)
# self.RB3 = RB(filters=64,kernel_size=3)
# self.RB4 = RB(filters=64,kernel_size=3)
# self.RB5 = RB(filters=64,kernel_size=3)
# self.lrelu = tf.keras.layers.LeakyReLU(alpha=0.2)
# self.upsample = Upsample(channels=channels,scale_factor=scale_factor)
# self.time = []
# def get_run_time(self):
# if(len(self.time)>0):
# return sum(self.time)/len(self.time)
# else:
# return -1
# def call(self, inputs):
# x = tf.pad(inputs, [[0, 0], [0, 0], [0, 0], [0, 0]], 'SYMMETRIC')
# rb1 = self.RB1(x)
# rb2 = self.RB2(rb1)
# x = tf.keras.layers.add([rb1, rb2])
# rb3 = self.RB3(x)
# x = tf.keras.layers.add([rb2, rb3])
# rb4 = self.RB4(x)
# x = tf.keras.layers.add([rb3, rb4])
# return self.upsample(x)
```
#### File: jlfilho/sr-tf2/train.py
```python
import tensorflow as tf
import argparse
import os
import statistics as stat
from models.utils import plot_test_images, plot_images, print_metrics
from models.espcn.model_espcn import ESPCN as espcn
from models.evsrnet.model_evsrnet import EVSRNet
from models.rtsrgan.model_generator import G_RTSRGAN as g_rtsrgan
from models.rtsrgan.model_discriminator import d_rtsrgan
from models.rtsrgan.model_gan import GAN
from models.rtvsrgan.model_generator import G_RTVSRGAN as g_rtvsrgan
from models.rtvsrgan.KnowledgeDistillation import Distiller
from models.rtvsrgan.model_discriminator import d_rtvsrgan, rad_rtvsrgan
from models.rtvsrgan.model_ragan import RaGAN
from models.percsr.model_discriminator import d_percsr, rad_percsr
from models.percsr.model_percsr import PercSR
from models.percsr.model_teacher import Teacher
from models.imdn.model_imdn import IMDN
from models.dataset import Dataset
from models.metrics import psnr, ssim, rmse, lpips
from models.losses import VGGLossNoActivation as VGGLoss, GANLoss
from models.save_img_callback import SaveImageCallback
from models.utils import scale_1 as scale
hot_test= {'hot_test_generic': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/generic/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/generic/hr/1080p/"
},
'hot_test_game': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/game/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/game/hr/1080p/"
},
'hot_test_sport': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/sport/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/sport/hr/1080p/"
},
'hot_test_podcast': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/podcast/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/podcast/hr/1080p/"
}}
test= {
'test_generic': {
'lr_test_path': "/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/lr/270p_qp17/",
'hr_test_path': "/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/hr/1080p/",
'logdir': "test_logdir/test/generic/"
},
'test_game': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/game/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/game/img_test/hr/1080p/",
'logdir': "test_logdir/test/game/"
},
'test_sport': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/sport/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/sport/img_test/hr/1080p/",
'logdir': "test_logdir/test/sport/"
},
'test_podcast': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/podcast/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/podcast/img_test/hr/1080p/",
'logdir': "test_logdir/test/podcast/"
}}
test_datasets = {
'test_generic': {
'test_dataset_path': "datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset_info.txt"
},
'test_game': {
'test_dataset_path': "datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset_info.txt"
},
'test_sport': {
'test_dataset_path': "datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset_info.txt"
},
'test_podcast': {
'test_dataset_path': "datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset_info.txt"
}}
LIST_MODEL=['espcn','g_rtsrgan','rtsrgan','g_rtvsrgan','teacher','rtvsrgan','imdn','k_dist','percsr','evsrnet']
MODEL='rtvsrgan'
LIST_GENERATOR=[None,'espcn','g_rtsrgan','imdn','evsrnet','g_rtvsrgan']
GENERATOR=None
BATCH_SIZE = 32
VAL_BATCH_SIZE = 16
TEST_BATCH_SIZE = 4
SHUFFLE_BUFFER_SIZE = 64
LIST_TEST_CLUSTER = ['generic','game','sport','podcast']
TEST_CLUSTER = ['sport']
SCHEDULE_VALUES=[100]
# Knowledge distillation model
LOSS_FN='mae'
DISTILLATION_RATE=0.8
ALPHA=0.3
BETA=0.65
LIST_WEIGHTS=[1e-5,1e-2,1e-2]
TYPE_REDUCE_LR='schedules'
LEARNING_RATE = 1e-4
LEARNING_DECAY_RATE = 1e-1
LEARNING_DECAY_EPOCHS = 20
NUM_EPOCHS = 100
STEPS_PER_EPOCH = 100
VAL_STEPS = 1
TEST_STEPS = 0
EPOCHS_PER_SAVE = 5
LOGDIR = 'logdir'
CHECKPOINT = 'checkpoint/'
TRAINNABLE_LAYER = 'final'
PATH_TO_EVAL = 'test_logdir/stats.txt'
TEST_LOGDIR='test_logdir/'
HOT_TEST_SIZE=5
LR_HOT_TEST_PATH="datasets/loaded_harmonic/img_test/lr/270p_qp28/"
HR_HOT_TEST_PATH="datasets/loaded_harmonic/img_test/hr/1080p/"
TRAIN_DATASET_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset.tfrecords'
TRAIN_DATASET_INFO_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset_info.txt'
VAL_DATASET_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset.tfrecords'
VAL_DATASET_INFO_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset_info.txt'
TEST_DATASET_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset.tfrecords'
TEST_DATASET_INFO_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset_info.txt'
def get_arguments():
parser = argparse.ArgumentParser(description='train one of the models for image and video super-resolution')
parser.add_argument('--model', type=str, default=MODEL, choices=LIST_MODEL,
help='What model to train', required=True)
parser.add_argument('--generator', type=str, default=GENERATOR, choices=LIST_GENERATOR,
help='What model to train', required=False)
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='Number of images in batch', required=True)
parser.add_argument('--train_dataset_path', type=str, default=TRAIN_DATASET_PATH,
help='Path to the train dataset', required=True)
parser.add_argument('--train_dataset_info_path', type=str, default=TRAIN_DATASET_INFO_PATH,
help='Path to the train dataset info', required=True)
parser.add_argument('--num_epochs', type=int, default=NUM_EPOCHS,
help='Number of training epochs', required=True)
parser.add_argument('--steps_per_epoch', type=int, default=STEPS_PER_EPOCH,
help='Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch.')
parser.add_argument('--val_batch_size', type=int, default=VAL_BATCH_SIZE,
help='Number of images in val batch')
parser.add_argument('--val_dataset_path', type=str, default=VAL_DATASET_PATH,
help='Path to the val dataset')
parser.add_argument('--val_dataset_info_path', type=str, default=VAL_DATASET_INFO_PATH,
help='Path to the val dataset info')
parser.add_argument('--validation_steps', type=int, default=VAL_STEPS,
help='Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch.')
parser.add_argument('--test_batch_size', type=int, default=TEST_BATCH_SIZE,
help='Number of images in test batch')
parser.add_argument('--test_dataset_path', type=str, default=TEST_DATASET_PATH,
help='Path to the test dataset')
parser.add_argument('--test_dataset_info_path', type=str, default=TEST_DATASET_INFO_PATH,
help='Path to the test dataset info')
parser.add_argument('--test_steps', type=int, default=TEST_STEPS,
help='Total number of steps (batches of samples) to draw before stopping when performing evaluate at the end of every epoch.')
parser.add_argument('--test_cluster', nargs='*', type=str, default=TEST_CLUSTER, choices=LIST_TEST_CLUSTER,
help='What cluster dataset to eval', required=False)
parser.add_argument('--hot_test_size', type=int, default=HOT_TEST_SIZE,
help='Number of images in hot test')
parser.add_argument('--lr_hot_test_path', type=str, default=LR_HOT_TEST_PATH,
help='Path to the hot test dataset')
parser.add_argument('--hr_hot_test_path', type=str, default=HR_HOT_TEST_PATH,
help='Path to the hr hot test path')
parser.add_argument('--ckpt_path', default=CHECKPOINT,
help='Path to the model checkpoint to evaluate')
parser.add_argument('--load_weights', action='store_true',
help='Load weights')
parser.add_argument('--load_weights_perc', action='store_true',
help='Load weights perceptual')
parser.add_argument('--eval', action='store_true',
help='Avaluete model')
parser.add_argument('--range_to_save', type=int, default=10,
help='Range of image to save for teste.' )
parser.add_argument('--transfer_learning', action='store_true',
help='Transfer learning from lower-upscale model')
parser.add_argument('--trainable_layer', type=str, default=TRAINNABLE_LAYER,
help='Transfer learning from lower-upscale model')
parser.add_argument('--scaleFrom', type=int, default=2,
help='Perform transfer learning from lower-upscale model' )
parser.add_argument('--shuffle_buffer_size', type=int, default=SHUFFLE_BUFFER_SIZE,
help='Buffer size used for shuffling examples in dataset')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate used for training')
parser.add_argument('--lr_decay_rate', type=float, default=LEARNING_DECAY_RATE,
help='Learning rate decay rate used in exponential decay')
parser.add_argument('--lr_decay_epochs', type=int, default=LEARNING_DECAY_EPOCHS,
help='Number of epochs before full decay rate tick used in exponential decay')
parser.add_argument('--type_reduce_lr', type=str, default=TYPE_REDUCE_LR, choices=['plateau','schedules'],
help='Type of reduce learning rate')
parser.add_argument('--schedule_values',nargs='*', type=int, default=SCHEDULE_VALUES,
help='list of epochs values to reduce lr')
parser.add_argument('--loss_fn', type=str, default=LOSS_FN, choices=['mse','mae','huber', 'fea'],
help='Set the loss function to knowledge distillation model')
parser.add_argument('--distillation_rate', type=float, default=DISTILLATION_RATE,
help='Distillation rate in knowledge distillation model')
parser.add_argument('--alpha', type=float, default=ALPHA,
help='Weight for distillation loss function in knowledge distillation model')
parser.add_argument('--beta', type=float, default=BETA,
help='Weight for perceptual loss function in knowledge distillation model')
parser.add_argument('--list_weights', nargs='*', type=float, default=LIST_WEIGHTS,
help='Auxiliary list to weight values')
parser.add_argument('--inter_method', type=str, default=None, choices=['bilinear','lanczos3','lanczos5','bicubic','nearest','mitchellcubic'],
help='Type of interpolation resize used of same models')
parser.add_argument('--epochs_per_save', type=int, default=EPOCHS_PER_SAVE,
help='How often to save checkpoints')
parser.add_argument('--logdir', type=str, default=LOGDIR,
help='Where to save checkpoints and summaries')
parser.add_argument('--test_logdir', type=str, default=TEST_LOGDIR,
help='Where to save tests images')
parser.add_argument('--path_to_eval', type=str, default=PATH_TO_EVAL,
help='Path to save evals')
return parser.parse_args()
def main():
args = get_arguments()
# train dataset
train_dataset = Dataset(args.batch_size,
args.train_dataset_path,
args.train_dataset_info_path,
args.shuffle_buffer_size)
scale_factor = train_dataset.scale_factor
if args.steps_per_epoch == 0:
steps_per_epoch = train_dataset.examples_num // args.batch_size \
if train_dataset.examples_num % args.batch_size != 0 else 0
else:
steps_per_epoch = args.steps_per_epoch
train_dataset = train_dataset.get_data(args.num_epochs)
train_batch = train_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# val dataset
val_dataset = Dataset(args.val_batch_size,
args.val_dataset_path,
args.val_dataset_info_path,
args.shuffle_buffer_size)
if args.validation_steps == 0:
validation_steps = val_dataset.examples_num // args.val_batch_size \
if val_dataset.examples_num % args.val_batch_size != 0 else 0
else:
validation_steps = args.validation_steps
val_dataset = val_dataset.get_data()
val_batch = val_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# test dataset
test_dataset = Dataset(args.test_batch_size,
args.test_dataset_path,
args.test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# hot test
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_loss',
save_freq= 'epoch',
mode='min',
save_best_only=True)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=args.logdir+"/"+args.model,
histogram_freq=1,
write_graph=True,
write_images=True,
write_steps_per_second=True,
update_freq='batch')
file_writer_cm = tf.summary.create_file_writer(args.logdir+"/"+args.model + '/validation')
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=1e-5,
patience=100, verbose=1,
mode='min',
restore_best_weights=True)
if args.type_reduce_lr == 'plateau':
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_rmse', factor=args.lr_decay_rate,
patience=args.lr_decay_epochs, mode='min', min_lr=1e-6,verbose=1)
elif args.type_reduce_lr == 'schedules':
def scheduler(epoch, lr):
if epoch in args.schedule_values:
return lr * tf.math.exp(-0.1)
else:
return lr
reduce_lr=tf.keras.callbacks.LearningRateScheduler(scheduler)
else:
print("--type_reduce_lr not valid!")
exit(1)
if args.model == 'espcn':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'imdn':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'g_rtsrgan':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval, run_time=train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'rtsrgan':
callbacks=[tensorboard_callback]
eval,run_time=train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'evsrnet':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
# Ours models
elif args.model == 'g_rtvsrgan':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'teacher':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'rtvsrgan':
callbacks=[tensorboard_callback,reduce_lr]
eval,run_time=train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'k_dist':
callbacks=[tensorboard_callback, reduce_lr]
eval,run_time=train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'percsr':
callbacks=[tensorboard_callback, reduce_lr]
print("CALLING MODEL {}".format(args.model))
eval,run_time=train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+'_'+args.generator+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
else:
exit(1)
def trainable_weights(model):
print("Weights:", len(model.weights))
print("Trainable_weights:", len(model.trainable_weights))
print("Non_trainable_weights:", len(model.non_trainable_weights))
def trainable_layers(model, trainable_layer):
for i in range(len(model.layers)):
if(i+1 == trainable_layer):
break
else:
model.layers[i].trainable=False
def print_eval(file_stats,eval,model_name,run_time):
statsFile=open(file_stats,"a")
print(model_name, file = statsFile)
print(eval, file = statsFile)
print(run_time, file = statsFile)
statsFile.close()
def saved_model(model, filepath):
tf.keras.models.save_model(model, filepath, save_traces=True)
def train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = espcn(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = espcn(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = IMDN(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = IMDN(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval, model.get_run_time()
def train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = g_rtsrgan(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=g_rtsrgan(scale_factor=scale_factor)
g.compile(metrics=[psnr,ssim,rmse,lpips])
d=d_rtsrgan(input_shape=(36*scale_factor,36*scale_factor,1))
gan = GAN(discriminator = d, generator = g)
if args.loss_fn == "mse":
cont_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
cont_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
cont_loss = tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,cont_loss)
perc_loss = vgg_loss.custom_perceptual_loss
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
lbd = 1 * 1e-5
eta = 1 * 1e-2
mu = 1 * 1e-2
gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}g_rtsrgan_{}x/model.ckpt".format(args.ckpt_path,scale_factor)
gan.load_weights_gen(checkpoint_paph)
for i in range(len(g.layers)):
if(g.layers[i].name == trainable_layer):
break
else:
g.layers[i].trainable=False
gan.compile(d_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
d_loss = gan_loss.discriminator_loss,
g_loss = gan_loss.generator_loss,
metrics=[psnr,ssim,rmse,lpips])
trainable_weights(gan)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch)
checkpoint_paph="{}{}_{}x/g_rtsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = g.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(g, 'saved_model/{}/'.format(args.model))
return eval, g.get_run_time()
def train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = EVSRNet(scale_factor=scale_factor,method=args.inter_method)
model.build((None, None, None,1))
#print(model.summary())
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae": # default
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
model.build((None, None, None,1))
print(model.summary())
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if(args.eval==True):
print("Loading weights...")
model.load_weights(checkpoint_paph)
print("Evaluate model")
model.compile(metrics=[psnr,ssim,rmse,lpips])
get_test_dataset(model,scale_factor,args)
exit(1)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
if args.loss_fn == "fea":
loss_aux = tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,loss_aux)
loss_fn = vgg_loss.custom_perceptual_loss
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
if args.loss_fn == "fea":
eval = []
else:
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval, model.get_run_time()
def train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
model = g_rtvsrgan(scale_factor=scale_factor,method=args.inter_method)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
opt=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
aux_loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss_fn = tf.keras.losses.MeanAbsoluteError()
student_loss_fn = tf.keras.losses.MeanSquaredError()
distillation_loss_fn= tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss_fn)
perc_loss = vgg_loss.custom_perceptual_loss
teacher = g_rtvsrgan(channels=1,scale_factor=scale_factor)
print("Loading teacher weights...")
weights_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
teacher.load_weights(weights_paph)
student = g_rtvsrgan(channels=1,scale_factor=scale_factor)
student.build((None, None, None,1))
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=opt,
metrics=[psnr,ssim,rmse,lpips],
student_loss_fn=student_loss_fn,
distillation_loss_fn=distillation_loss_fn,
perc_loss_fn=perc_loss,
alpha=args.alpha,
beta=args.beta
)
trainable_weights(student)
if args.load_weights:
print("Loading student weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
student.load_weights(checkpoint_paph)
trainable_layers(student, len(student.layers)-1)
trainable_weights(student)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = student(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(student.layers[i].name))
student.layers[i].set_weights(modelFrom.layers[i].get_weights())
student.layers[i].trainable=False
save_img_callback = SaveImageCallback(
model=distiller.student,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor='val_rmse',
min_delta=1e-5,
patience=50, verbose=1,
mode='min',
restore_best_weights=True)
callbacks.append(earlystopping)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
# Distill teacher to student
distiller.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/g_rtsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
student.save_weights(checkpoint_paph)
print("Evaluate model")
# Evaluate student on test dataset
eval = distiller.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(distiller.student, 'saved_model/{}/'.format(args.model))
return eval,distiller.student.get_run_time()
def train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=g_rtvsrgan(scale_factor=scale_factor)
g.build((None, None, None,1))
d=d_rtvsrgan(input_shape=(36*scale_factor,36*scale_factor,1))
ra_d=rad_rtvsrgan(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))
if args.loss_fn == "mse":
aux_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss = tf.keras.losses.MeanAbsoluteError()
cont_loss = tf.keras.losses.MeanSquaredError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss)
perc_loss = vgg_loss.custom_perceptual_loss
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
lbd = args.list_weights[0]
eta = args.list_weights[1]
mu = args.list_weights[2]
gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)
ra_gan = RaGAN(ra_discriminator=ra_d, generator=g)
ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
ra_d_loss=gan_loss.discriminator_loss,
g_loss = gan_loss.generator_loss,
metrics=[psnr,ssim,rmse,lpips])
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
ra_gan.load_weights_gen(checkpoint_paph)
trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/g_rtvsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
ra_gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = ra_gan.evaluate(test_batch, verbose=1)
saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))
return eval,ra_gan.student.get_run_time()
def model_generator(args=None,scale_factor=None):
if args.generator== 'espcn':
model= espcn(scale_factor=scale_factor)
elif args.generator== 'g_rtsrgan':
model= g_rtsrgan(scale_factor=scale_factor)
elif args.generator== 'imdn':
model= IMDN(scale_factor=scale_factor)
elif args.generator== 'evsrnet':
model= EVSRNet(scale_factor=scale_factor,method=args.inter_method)
elif args.generator== 'g_rtvsrgan':
model= g_rtvsrgan(scale_factor=scale_factor)
elif args.generator== 'teacher':
model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
else:
exit(1)
return model
def print_hot_test(lr_hot_test_path,hr_hot_test_path,model=None,model_name=None,args=None,scale_factor=2):
time_elapsed = plot_test_images(model,lr_hot_test_path,hr_hot_test_path,
args.test_logdir,scale_factor=scale_factor,model_name=model_name,epoch=0)
return time_elapsed
def get_test_dataset(model,scale_factor,args):
bic = True
if ('generic' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_generic']['test_dataset_path']
test_dataset_info_path=test_datasets['test_generic']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator!=None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_generic']['lr_test_path']
hr_path=test['test_generic']['hr_test_path']
logdir=test['test_generic']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_generic']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_generic']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "generic"+'_'+args.model+'_'+args.generator if args.generator != None else "generic"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('game' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_game']['test_dataset_path']
test_dataset_info_path=test_datasets['test_game']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_game']['lr_test_path']
hr_path=test['test_game']['hr_test_path']
logdir=test['test_game']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_game']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_game']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "game"+'_'+args.model+'_'+args.generator if args.generator != None else "game"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('sport' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_sport']['test_dataset_path']
test_dataset_info_path=test_datasets['test_sport']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_sport']['lr_test_path']
hr_path=test['test_sport']['hr_test_path']
logdir=test['test_sport']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_sport']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_sport']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "sport"+'_'+args.model+'_'+args.generator if args.generator != None else "sport"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('podcast' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_podcast']['test_dataset_path']
test_dataset_info_path=test_datasets['test_podcast']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_podcast']['lr_test_path']
hr_path=test['test_podcast']['hr_test_path']
logdir=test['test_podcast']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_podcast']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_podcast']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "podcast"+'_'+args.model+'_'+args.generator if args.generator != None else "podcast"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
def train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=model_generator(scale_factor=scale_factor,args=args)
g.build((None, None, None,1))
d=d_percsr(input_shape=(36*scale_factor,36*scale_factor,1))
ra_d=rad_percsr(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))
if args.loss_fn == "mse":
aux_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss = tf.keras.losses.MeanAbsoluteError()
loss_pix = tf.keras.losses.MeanSquaredError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss)
loss_fea = vgg_loss.custom_perceptual_loss
loss_dis = tf.keras.losses.MeanAbsoluteError()
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
alfa = args.list_weights[0]
eta = args.list_weights[1]
lbd = args.list_weights[2]
mu = args.list_weights[3]
gan_loss=GANLoss(loss_pix, loss_fea, loss_dis, adv_loss, alfa, eta, lbd, mu)
teacher = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
print("Loading teacher weights...")
weights_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'teacher',scale_factor)
teacher.load_weights(weights_paph)
teacher.build((None, None, None,1))
ra_gan = PercSR(ra_discriminator=ra_d, generator=g,teacher=teacher)
ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
perc_loss=gan_loss.generative_loss,
metrics=[psnr,ssim,rmse,lpips])
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
ra_gan.load_weights(checkpoint_paph)
print("Evaluate model")
g.compile(metrics=[psnr,ssim,rmse,lpips])
get_test_dataset(g,scale_factor,args)
exit(1)
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.generator,scale_factor)
ra_gan.load_weights_gen(checkpoint_paph)
# trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
if (args.load_weights_perc):
print("Loading weights perceptual...")
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
ra_gan.load_weights(checkpoint_paph)
for i in range(len(g.layers)):
print("Camada: {}".format(g.layers[i].name))
if(g.layers[i].name == trainable_layer):
break
else:
g.layers[i].trainable=False
#trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model+'_'+args.generator,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/{}/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator,'generator')
ra_gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = ra_gan.evaluate(test_batch, verbose=1)
saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))
return eval, ra_gan.generator.get_run_time()
if __name__ == '__main__':
main()
``` |
{
"source": "jlfilho/VSRGAN-keras",
"score": 2
} |
#### File: jlfilho/VSRGAN-keras/train.py
```python
import os
import sys
os.environ['CUDA_VISIBLE_DEVICES']='0' #Set a single gpu
#stderr = sys.stderr
#sys.stderr = open(os.devnull, 'w')
sys.path.append('libs/')
import gc
import numpy as np
import matplotlib.pyplot as plt
# Import backend without the "Using X Backend" message
from argparse import ArgumentParser
from PIL import Image
from vsrganplus import VSRGANplus
from util import plot_test_images, DataLoader
from tensorflow.keras import backend as K
# Sample call
"""
# Train 2X VSRGANplus
python3 train.py --train ../data/videoset/1080p/ --validation ../data/val_large/ --test /media/joao/SAMSUNG1/data/out/test/ --log_test_path ./test/ --steps_per_epoch 200 --scale 2 --stage all
# Train the 4X VSRGANplus
python3 train.py --train ../../data/train_large/ --validation ../data/val_large/ --test ../data/benchmarks/Set5/ --log_test_path ./test/ --scale 4 --scaleFrom 2 --stage all
# Train the 8X VSRGANplus
python3 train.py --train ../../data/train_large/ --validation ../data/val_large/ --test ../data/benchmarks/Set5/ --log_test_path ./test/ --scale 8 --scaleFrom 4 --stage all
"""
def parse_args():
parser = ArgumentParser(description='Training script for VSRGANplus')
parser.add_argument(
'-s', '--stage',
type=str, default='all',
help='Which stage of training to run',
choices=['all', 'mse', 'gan', 'gan-finetune']
)
parser.add_argument(
'-e', '--epochs',
type=int, default=1000000,
help='Number epochs per train'
)
parser.add_argument(
'-fe', '--first_epoch',
type=int, default=0,
help='Number of the first epoch to start in logs train'
)
parser.add_argument(
'-t', '--train',
type=str, default='../../data/train_large/',
help='Folder with training images'
)
parser.add_argument(
'-spe', '--steps_per_epoch',
type=int, default=2000,
help='Steps per epoch'
)
parser.add_argument(
'-v', '--validation',
type=str, default='../data/val_large/',
help='Folder with validation images'
)
parser.add_argument(
'-spv', '--steps_per_validation',
type=int, default=10,
help='Steps per validation'
)
parser.add_argument(
'-te', '--test',
type=str, default='../data/benchmarks/Set5/',
help='Folder with testing images'
)
parser.add_argument(
'-pf', '--print_frequency',
type=int, default=5,
help='Frequency of print test images'
)
parser.add_argument(
'-sc', '--scale',
type=int, default=2,
help='How much should we upscale images'
)
parser.add_argument(
'-scf', '--scaleFrom',
type=int, default=None,
help='Perform transfer learning from lower-upscale model'
)
parser.add_argument(
'-w', '--workers',
type=int, default=1,
help='How many workers to user for pre-processing'
)
parser.add_argument(
'-mqs', '--max_queue_size',
type=int, default=100,
help='Max queue size to workers'
)
parser.add_argument(
'-bs', '--batch_size',
type=int, default=16,
help='What batch-size should we use'
)
parser.add_argument(
'-cpi', '--crops_per_image',
type=int, default=4,
help='Increase in order to reduce random reads on disk (in case of slower SDDs or HDDs)'
)
parser.add_argument(
'-wp', '--weight_path',
type=str, default='./model/',
help='Where to output weights during training'
)
parser.add_argument(
'-lwf', '--log_weight_frequency',
type=int, default=1,
help='Where to output weights during training'
)
parser.add_argument(
'-ltf', '--log_test_frequency',
type=int, default=1,
help='Frequency to output test'
)
parser.add_argument(
'-ltuf', '--log_tensorboard_update_freq',
type=int, default=1,
help='Frequency of update tensorboard weight'
)
parser.add_argument(
'-lp', '--log_path',
type=str, default='./logs/',
help='Where to output tensorboard logs during training'
)
parser.add_argument(
'-ltp', '--log_test_path',
type=str, default='./test/',
help='Path to generate images in train'
)
parser.add_argument(
'-hlr', '--height_lr',
type=int, default=64,
help='height of lr crop'
)
parser.add_argument(
'-wlr', '--width_lr',
type=int, default=64,
help='width of lr crop'
)
parser.add_argument(
'-c', '--channels',
type=int, default=3,
help='channels of images'
)
parser.add_argument(
'-cs', '--colorspace',
type=str, default='RGB',
help='Colorspace of images, e.g., RGB or YYCbCr'
)
parser.add_argument(
'-mt', '--media_type',
type=str, default='v',
help='Type of media i to image or v to video'
)
parser.add_argument(
'-mn', '--modelname',
type=str, default='_places365',
help='Name for the model'
)
return parser.parse_args()
def reset_layer_names(args):
'''In case of transfer learning, it's important that the names of the weights match
between the different networks (e.g. 2X and 4X). This function loads the lower-lever
SR network from a reset keras session (thus forcing names to start from naming index 0),
loads the weights onto that network, and saves the weights again with proper names'''
# Find lower-upscaling model results
BASE_G = os.path.join(args.weight_path, 'VSRGANplus'+args.modelname+'_generator_'+str(args.scaleFrom)+'X.h5')
BASE_D = os.path.join(args.weight_path, 'VSRGANplus'+args.modelname+'_discriminator_'+str(args.scaleFrom)+'X.h5')
assert os.path.isfile(BASE_G), 'Could not find '+BASE_G
assert os.path.isfile(BASE_D), 'Could not find '+BASE_D
# Load previous model with weights, and re-save weights so that name ordering will match new model
prev_gan = VSRGANplus(upscaling_factor=args.scaleFrom)
prev_gan.load_weights(BASE_G, BASE_D)
prev_gan.save_weights(args.weight_path+'VSRGANplus{}'.format(args.modelname))
del prev_gan
K.reset_uids()
gc.collect()
return BASE_G, BASE_D
def gan_freeze_layers(args, gan):
'''In case of transfer learning, this function freezes lower-level generator
layers according to the scaleFrom argument, and recompiles the model so that
only the top layer is trained in the generator'''
# Map scalings to layer name
s2l = {2: '1', 4: '2', 8: '3'}
# 4X -> 8X block always trainable. 2X -> 4X only if going from 2X.
up_trainable = ["3", s2l[args.scale]]
if args.scaleFrom == 2:
up_trainable.append("2")
trainable=False
for layer in gan.generator.layers:
if 'upSample' in layer.name and any([layer.name.endswith('_'+s) for s in up_trainable]) :
trainable = True
layer.trainable = trainable
# Compile generator with frozen layers
gan.compile_generator(gan.generator)
def train_generator(args, gan, common, epochs=None):
'''Just a convenience function for training the GAN'''
print("TRAINING GENERATOR ONLY WITH MSE LOSS")
gan.train_generator(
epochs=epochs,
modelname='SRResNet'+args.modelname,
steps_per_epoch=args.steps_per_epoch,
**common
)
def train_gan(args, gan, common, epochs=None):
'''Just a convenience function for training the GAN'''
gan.train_vsrganplus(
epochs=epochs,
modelname='VSRGANplus'+args.modelname,
log_weight_frequency=args.log_weight_frequency,
log_test_frequency=args.log_test_frequency,
first_epoch=args.first_epoch,
**common
)
# Run script
if __name__ == '__main__':
# Parse command-line arguments
args = parse_args()
# Common settings for all training stages
args_train = {
"batch_size": args.batch_size,
"steps_per_validation": args.steps_per_validation,
"crops_per_image": args.crops_per_image,
"print_frequency": args.print_frequency,
"log_tensorboard_update_freq": args.log_tensorboard_update_freq,
"workers": args.workers,
"max_queue_size": args.max_queue_size,
"datapath_train": args.train,
"datapath_validation": args.validation,
"datapath_test": args.test,
"log_weight_path": args.weight_path,
"log_tensorboard_path": args.log_path,
"log_test_path": args.log_test_path,
"media_type": args.media_type
}
# Specific of the model
args_model = {
"height_lr": args.height_lr,
"width_lr": args.width_lr,
"channels": args.channels,
"upscaling_factor": args.scale,
"colorspace": args.colorspace,
}
# Generator weight paths
srresnet_path = os.path.join(args.weight_path, 'SRResNet{}_{}X.h5'.format(args.modelname,args.scale))
srrgan_G_path = os.path.join(args.weight_path, 'VSRGANplus{}_generator_{}X.h5'.format(args.modelname,args.scale))
srrgan_D_path = os.path.join(args.weight_path, 'VSRGANplus{}_discriminator_{}X.h5'.format(args.modelname,args.scale))
# Generator weight paths
## FIRST STAGE: TRAINING GENERATOR ONLY WITH MSE LOSS
######################################################
# If we are doing transfer learning, only train top layer of the generator
# And load weights from lower-upscaling model
if args.stage in ['all', 'mse']:
if args.scaleFrom:
print("TRANSFERING LEARN")
# Ensure proper layer names
BASE_G, BASE_D = reset_layer_names(args)
# Load the properly named weights onto this model and freeze lower-level layers
gan = VSRGANplus(gen_lr=1e-4, **args_model)
gan.load_weights(BASE_G, BASE_D, by_name=True)
gan_freeze_layers(args, gan)
train_generator(args, gan, args_train, epochs=3)
# Train entire generator for 3 epochs
gan = VSRGANplus(gen_lr=1e-4, **args_model)
gan.load_weights(srresnet_path)
train_generator(args, gan, args_train, epochs=3)
else:
# As in paper - train for 10 epochs
gan = VSRGANplus(gen_lr=2*1e-4, **args_model)
gan.load_weights(srresnet_path)#Teste
trainable=False
for layer in gan.generator.layers:
#print(layer.name)
if 'upSample_Conv2d_1' == layer.name:
trainable = True
if 'upSample_SubPixel_1' == layer.name:
trainable = True
if 'upSample_SubPixel_1' == layer.name:
trainable = True
if 'conv2d_14' == layer.name:
trainable = True
if 'conv2d_15' == layer.name:
trainable = True
if 'conv2d_16' == layer.name:
trainable = True
layer.trainable = trainable
gan.compile_generator(gan.generator)
gan.generator.summary()
train_generator(args, gan, args_train, epochs=args.epochs)
## SECOND STAGE: TRAINING GAN WITH HIGH LEARNING RATE
######################################################
# Re-initialize & train the GAN - load just created generator weights
if args.stage in ['all', 'gan']:
gan = VSRGANplus(gen_lr=1e-4, dis_lr=1e-4, ra_lr = 1e-4, loss_weights=[1., 5e-3,1e-2],
**args_model)
gan.load_weights(srresnet_path)
trainable=False
for layer in gan.generator.layers:
#print(layer.name)
if 'upSample_Conv2d_1' == layer.name:
trainable = True
if 'upSample_SubPixel_1' == layer.name:
trainable = True
if 'upSample_SubPixel_1' == layer.name:
trainable = True
if 'conv2d_14' == layer.name:
trainable = True
if 'conv2d_15' == layer.name:
trainable = True
if 'conv2d_16' == layer.name:
trainable = True
layer.trainable = trainable
gan.compile_generator(gan.generator)
gan.generator.summary()
#gan.load_weights(srrgan_G_path, srrgan_D_path)
print("TRAINING GAN WITH HIGH LEARNING RATE")
train_gan(args, gan, args_train, epochs= args.epochs//10 if args.epochs == int(4e5) else args.epochs)
``` |
{
"source": "jlfly12/qrsim",
"score": 2
} |
#### File: jlfly12/qrsim/Circuit_ops.py
```python
from Gate_bases import x, y, z, s_phi, identity
import matplotlib.pyplot as plt
import time
import numpy as np
try:
import cupy as cp
from cupy import rint, array, pi, exp, log2, sin, cos, random, linspace, sort, copy, zeros, roll, swapaxes, multiply, matmul, angle, binary_repr
from cupy import sum as arr_sum
from cupy import absolute as arr_abs
except ImportError:
from numpy import rint, array, pi, exp, log2, sin, cos, random, linspace, sort, copy, zeros, roll, swapaxes, multiply, matmul, angle, binary_repr
from numpy import sum as arr_sum
from numpy import absolute as arr_abs
# Check if cupy is imported
def check_if_cupy_is_imported():
import sys
return True if 'cupy' in sys.modules else False
# Give basic statistical info about given set of fidelities
def stat_analysis(results):
print(f"Number of data = {len(results)}")
print(f"Average fidelity = {sum(results) / len(results)}")
print(f"Max fidelity minus Min fidelity = {max(results) - min(results)}")
print(f"Variance = {var(results)}")
# Convert state index to a bit string (useful for extracting specific state amplitudes)
def int_to_bit_str(integer, N):
return array(list(binary_repr(integer, width=N)), dtype=int)
# Do the opposite
def bit_str_to_int(bit_str):
return int(''.join(str(e) for e in bit_str), 2)
def zero_state(n):
state = zeros(2 ** n, dtype=complex)
state[0] = 1
return state
def remove_global_phase(states):
gp = exp(- 1j * angle(states[0]))
c = swapaxes(states, 0, 1)
states = multiply(gp.T, c).T
return states
# -- Apply gate to state --
def apply(gate, states, global_phase=False):
# A shorthand for the original states
a = states
# d1 = number of circuit runs with noise, d2 = 2 ** N = dimension of state vector
d1, d2 = states.shape
N = int(rint(log2(d2)))
# A copy of state a, to be flipped by qubit-wise Pauli operations
b = copy(a)
# print("d1 = ", d1)
# print("d2 = ", d2)
# print("N = ", N)
# Reshape to rank-(N+1) tensor
b = b.reshape([d1] + [2] * N)
for k in range(len(gate[0])):
basis = gate[0][k]
q = gate[1][k]
if basis == identity:
pass
if basis == x:
b = roll(b, 1, q+1)
if basis == y:
b = roll(b, 1, q+1)
b = swapaxes(b, 0, q+1)
b[0] *= -1j
b[1] *= 1j
b = swapaxes(b, 0, q+1)
if basis == s_phi:
phi = array(gate[3][k])
b = roll(b, 1, q+1)
b = swapaxes(b, 0, q+1)
b = swapaxes(b, N, q+1)
phase1 = cos(phi) + 1j * sin(phi)
phase2 = cos(phi) - 1j * sin(phi)
b[0] = multiply(phase2, b[0])
b[1] = multiply(phase1, b[1])
b = swapaxes(b, N, q+1)
b = swapaxes(b, 0, q+1)
if basis == z:
b = swapaxes(b, 0, q+1)
b[1] *= -1
b = swapaxes(b, 0, q+1)
b = b.reshape(d1, d2)
angles = array(gate[2][0])
states = (cos(angles/2) * a.T - 1j * sin(angles/2) * b.T).T
# Remove global phase (may be awkward if first amplitude is close to zero)
if global_phase == False:
pass
return states
# Plot state probabilities for one state vector only
# ADD COLOUR FOR PHASE?
def state_prob_plot(state, title='State probability plot', save=False):
if check_if_cupy_is_imported():
state = cp.asnumpy(state)
for i in range(len(state)):
plt.title(title)
plt.plot(np.linspace(i, i, 2), np.linspace(
0, np.absolute(state[i]) ** 2, 2), 'b', linewidth=5)
if save:
import os.path
count = 1
while os.path.exists(f'{count}.pdf'):
count += 1;
plt.savefig(f'{count}.pdf')
plt.show()
# Calculate, plot, save and read fidelities
def find_fidelities(states, ideal):
if states.shape == ideal.shape:
fidelities = arr_abs(arr_sum(states * ideal.conj(), axis=1)) ** 2
else:
fidelities = arr_abs(matmul(states, ideal.conj())) ** 2
return fidelities
def plot_fidelities(fidelities, bins=20, range=None, title="Fidelity plot"):
if check_if_cupy_is_imported():
fidelities = cp.asnumpy(fidelities)
runs = len(fidelities)
plt.title(title)
plt.hist(fidelities, bins, range)
plt.show()
print(f'Average fidelity = {arr_sum(fidelities) / len(fidelities)}')
print(f'10-th percentile fidelity = {sort(fidelities)[int(runs/10)]}')
print(f'90-th percentile fidelity = {sort(fidelities)[int(9*runs/10)]}')
def save_fidelities(fidelities, N, n_gates, err, runs, fn="Fidelities"):
with open(fn, 'w') as f:
f.write(
f'Results for {N} qubits, {n_gates} gates with max error = {err * 100}% over {runs} runs \n')
for fidelity in fidelities:
f.write("%s\n" % fidelity)
f.close()
def read_fidelities(fn):
with open(fn) as f:
fidelities = []
F = f.read().splitlines()
# The first line in the txt file is just a description
for i in range(len(F)-1):
fidelities.append(float(F[i+1]))
return fidelities
# Find probability of measuring a K-qubit state given an N-qubit state
def find_prob(measured_qubits, sub_state, states):
# Make sure measured qubit numbers are in ascending order
qubits = measured_qubits
qubits.sort()
# Make a copy of given states in order not to alter them
a = states.copy()
d1, d2 = a.shape # d1 = number of circuit runs, d2 = 2 ** N
N = int(rint(log2(d2)))
# Reshape to rank-(N+1) tensor
a = a.reshape([d1] + [2] * N)
# K = number of measured qubits, M = number of qubits not measured
K = len(qubits)
M = N - K
# Reorder qubit number axes
for i in range(K):
a = swapaxes(a, i + 1, qubits[i] + 1)
# Flatten arrays for 2 groups of qubits
a = a.reshape([d1] + [2 ** K] + [2 ** M])
# Broadcast multiply coefficients
a = swapaxes(a, 0, 1)
a = multiply(a.T, sub_state).T
# Sum over coefficients
a = a.sum(axis=0)
a = abs(a) ** 2
a = a.sum(axis=1)
# Return probability of measuring a substate for all circuit runs
return a
def plot_prob(probabilities, bins=20, range=None, title="Probability of measuring a specific state from subsystem"):
if check_if_cupy_is_imported():
probabilities = cp.asnumpy(probabilities)
runs = len(probabilities)
plt.title(title)
plt.hist(probabilities, bins, range)
plt.show()
print(f'Average probability = {arr_sum(probabilities) / len(probabilities)}')
print(f'10-th percentile probability = {sort(probabilities)[int(runs/10)]}')
print(f'90-th percentile probability = {sort(probabilities)[int(9*runs/10)]}')
```
#### File: jlfly12/qrsim/Circuit.py
```python
from Circuit_ops import apply, zero_state
from Compiler import compile_gates
# -- The quantum circuit --
class Circuit:
def __init__(self, N):
# Circuit name
self.name = "Circuit"
# Number of qubits
self.N = N
# Single-qubit over-rotation, phase-error, two-qubit-over-rotation
self.errors = [.0, .0, .0]
# Ideal gates used for plotting circuits
self.ideal_gates = []
# Native gates (e.g. in ion traps)
self.native_gates = []
# Noisy native gates: [bases, qubits, rotation angle, axis angle]
self.noisy_gates = []
# Number of circuit execution with randomized gate noises
self.runs = 2
# Initialize state to be zero state
self.init_state = zero_state(N)
self.Z_is_native = False
self.library = "cupy"
def compile_circuit_gates(self):
errors = self.errors
ideal_gates = self.ideal_gates
runs = self.runs
Z_is_native = self.Z_is_native
native_gates, noisy_gates = compile_gates(ideal_gates, errors, runs, Z_is_native=Z_is_native)
self.native_gates = native_gates
self.noisy_gates = noisy_gates
# Computes the final state given the initial state and circuit
def compute(self, mode="single-init-state", compile_gates=True):
# Run circuit with different noise distributions given one initial state
if mode == "single-init-state":
# Make sure number of runs is 2 or larger
runs = self.runs if self.runs > 1 else 2
# Clone initial state for multiple runs with different gate errors
try:
from cupy import tile
except ImportError:
from numpy import tile
states = tile(self.init_state, (runs, 1))
# Run the same circuit given multiple initial states
elif mode == "mulitple-init-states":
self.runs = len(self.init_state)
states = self.init_state
if compile_gates:
self.compile_circuit_gates()
noisy_gates = self.noisy_gates
for gate in noisy_gates:
states = apply(gate, states)
return states
# Clear gates
def clear_gates(self):
self.ideal_gates = []
self.noisy_gates = []
# -- Ideal gates in a circuit --
def S_phi(self, q, t, phi):
self.ideal_gates.append(["S_phi", q, t, phi])
return self
def X(self, q, t):
self.ideal_gates.append(["X", q, t, None])
return self
def Y(self, q, t):
self.ideal_gates.append(["Y", q, t, None])
return self
def Z(self, q, t):
self.ideal_gates.append(["Z", q, t, None])
return self
def XX(self, q1, q2, t):
self.ideal_gates.append(["XX", [q1, q2], t, None])
return self
# -- Synthesized gates --
def H(self, q):
self.ideal_gates.append(["H", q, None, None])
return self
def CNOT(self, q1, q2):
self.ideal_gates.append(["CNOT", [q1, q2], None, None])
return self
# -- Plot circuit for ideal gates --
def plot_circuit(self):
return
```
#### File: jlfly12/qrsim/Compiler.py
```python
try:
from cupy import sqrt, pi, sin, cos, array, zeros, swapaxes
except ImportError:
from numpy import sqrt, pi, sin, cos, array, zeros, swapaxes
from Gate_bases import *
from Error_dist import error_dist
# -- The gate compiler --
def compile_gates(gates, errors, runs, Z_is_native):
# -- Helper functions for returning gates --
# The native bit-flip rotation
def S_phi(q, t, phi):
return [[s_phi], [q], t, [phi]]
def X(q, t):
return S_phi(q, t, 0)
def Y(q, t):
return S_phi(q, t, pi/2)
# Z-rotations are assumed to be synthesized from X and Y for now
def Z(q, t):
if Z_is_native:
return [[[z], [q], t, [0]]]
else:
return [Y(q, pi/2), X(q, t), Y(q, -pi/2)]
# XX-rotations are native to ion traps through motional sideband coupling
def XX(q1, q2, t):
return [[s_phi, s_phi], [q1, q2], t, [0, 0]]
native_gates = []
for gate in gates:
gate_type = gate[0] # Gate type
q = gate[1] # Qubit(s)
t = gate[2] # Gate angle
phi = gate[3] # Gate axis (on the x-y plane)
# -- Switch between different gate types --
if gate_type == s_phi:
native_gates.append(S_phi(q, t, phi))
if gate_type == x:
native_gates.append(X(q, t))
if gate_type == y:
native_gates.append(Y(q, t))
if gate_type == z:
for gate in Z(q, t):
native_gates.append(gate)
if gate_type == xx:
native_gates.append(XX(q[0], q[1], t))
# Hadmard gate synthesis
if gate_type == h:
native_gates.extend([Y(q, pi/2), X(q, -pi)])
# CNOT gate synthesis
if gate_type == cnot:
native_gates.extend([Y(q[0], pi/2), XX(q[0], q[1], pi/2),
X(q[0], -pi/2), X(q[1], -pi/2), Y(q[0], -pi/2)])
# -- Compile and output list of native gates --
# Native gates to noisy gates
[single_err, phase_err, xx_err] = errors
# Noisy gates: bases, qubit numbers, rotation angle, axis angle
noisy_gates = []
for native_gate in native_gates:
basis = native_gate[0]
qubits = native_gate[1]
# Two-qubit gate
if len(basis) == 2:
angle = [native_gate[2] * (xx_err * array(error_dist(runs)) + 1)]
axis1 = native_gate[3][0] + phase_err * \
array(error_dist(runs)) * pi * sqrt(2) / 4
axis2 = native_gate[3][1] + phase_err * \
array(error_dist(runs)) * pi * sqrt(2) / 4
noisy_gates.append([basis, qubits, angle, [axis1, axis2]])
# noisy_gates[0].append(native_gates[i][0])
# noisy_gates[1].append(native_gates[i][1])
# noisy_gates[2].append(native_gates[i][2] *
# (xx_err * error_dist(runs) + 1))
# noisy_gates[3].append([native_gates[i][3][0] + phase_err * error_dist(runs) * pi * sqrt(2) / 4,
# native_gates[i][3][1] + phase_err * error_dist(runs) * pi * sqrt(2) / 4])
# Single-qubit gate
else:
angle = [native_gate[2] *
(single_err * array(error_dist(runs)) + 1)]
axis = [native_gate[3][0] +
phase_err * array(error_dist(runs)) * pi / 2]
noisy_gates.append([basis, qubits, angle, axis])
# noisy_gates[0].append(native_gates[i][0])
# noisy_gates[1].append(native_gates[i][1])
# noisy_gates[2].append(native_gates[i][2] *
# (single_err * error_dist(runs) + 1))
# noisy_gates[3].append(native_gates[i][3] +
# phase_err * error_dist(runs) * pi / 2)
return native_gates, noisy_gates
``` |
{
"source": "jlfrancisco/paydunya-python",
"score": 3
} |
#### File: paydunya-python/paydunya/__init__.py
```python
__version__ = '1.0.6'
__author__ = "PAYDUNYA <<EMAIL>>"
import sys
import requests
try:
import simplejson as json
except ImportError:
import json
# runs in LIVE mode by defaults
debug = False
api_keys = {}
# PAYDUNYA HTTP API version
API_VERSION = 'v1'
SERVER = "app.paydunya.com"
# Sandbox Endpoint
SANDBOX_ENDPOINT = "https://%s/sandbox-api/%s/" % (SERVER, API_VERSION)
# Live Endpoint
LIVE_ENDPOINT = "https://%s/api/%s/" % (SERVER, API_VERSION)
# user-agent
PAYDUNYA_USER_AGENT = "paydunya-python/v%s" % __version__
# fixme: find a better way of 'self' referencing
__MODULE__ = sys.modules[__name__]
class PaydunyaError(Exception):
"""Base Exception class"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Store(object):
"""PAYDUNYA Store
Creates a store object for PAYDUNYA transactions
"""
def __init__(self, **kwargs):
self.name = kwargs.get('name', None)
self.tagline = kwargs.get('tagline', None)
self.postal_address = kwargs.get('postal_address', None)
self.phone_number = kwargs.get('phone_number', None)
self.website_url = kwargs.get('website_url', None)
self.logo_url = kwargs.get('logo_url', None)
@property
def info(self):
"""Returns the store information
What this does is simply return the store object's attributes
"""
return self.__dict__
class Payment(object):
"""Base class for other PAYDUNYA classes"""
def __init__(self):
"""Base class for all the other payment libraries"""
# request headers
self._headers = {
'User-Agent': PAYDUNYA_USER_AGENT,
"Content-Type": "application/json"
}
# response object
self._response = None
# data to send to server
self._data = None
self.store = Store(name=None)
def _process(self, resource=None, data={}):
"""Processes the current transaction
Sends an HTTP request to the PAYDUNYA API server
"""
# use object's data if no data is passed
_data = data or self._data
rsc_url = self.get_rsc_endpoint(resource)
if _data:
req = requests.post(rsc_url, data=json.dumps(_data),
headers=self.headers)
else:
req = requests.get(rsc_url, params=_data,
headers=self.headers)
if req.status_code == 200:
self._response = json.loads(req.text)
if int(self._response['response_code']) == 00:
return (True, self._response)
else:
return (False, self._response['response_text'])
else:
return (500, "Request Failed")
@property
def headers(self):
"""Returns the client's Request headers"""
return dict(self._config, **self._headers)
def add_header(self, header):
"""Add a custom HTTP header to the client's request headers"""
if type(header) is dict:
self._headers.update(header)
else:
raise ValueError(
"Dictionary expected, got '%s' instead" % type(header)
)
def get_rsc_endpoint(self, rsc):
"""Returns the HTTP API URL for current payment transaction"""
if self.debug:
return SANDBOX_ENDPOINT + rsc
return LIVE_ENDPOINT + rsc
@property
def debug(self):
"""Returns the current transaction mode"""
return __MODULE__.debug
@property
def _config(self):
_m = __MODULE__
return {
'PAYDUNYA-MASTER-KEY': _m.api_keys.get('PAYDUNYA-MASTER-KEY'),
'PAYDUNYA-PRIVATE-KEY': _m.api_keys.get('PAYDUNYA-PRIVATE-KEY'),
'PAYDUNYA-TOKEN': _m.api_keys.get('PAYDUNYA-TOKEN')
}
# moved here so the modules that depend on the 'Payment' class will work
from .invoice import Invoice, InvoiceItem
from .direct_payments import DirectPay
from .opr import OPR
__all__ = [
Store.__name__,
Payment.__name__,
Invoice.__name__,
InvoiceItem.__name__,
DirectPay.__name__,
OPR.__name__
]
``` |
{
"source": "jlfrancisco/Shop-Synchro",
"score": 2
} |
#### File: synchro_shop/sync/admin.py
```python
from django.contrib import admin
from django.contrib import admin as auth_admin
from .models import Shop, ShopReading
@admin.register(Shop)
class ShopAdmin(auth_admin.ModelAdmin):
list_display = ["id", "uuid", "name"]
search_fields = ["name"]
@admin.register(ShopReading)
class ShopReadingAdmin(auth_admin.ModelAdmin):
list_display = ["gtin", "get_shop", "expiry_date", "reading_time"]
search_fields = ["gtin"]
def get_shop(self, obj):
return obj.shop.name
``` |
{
"source": "jlfranklin/python-acquia-cloud-2",
"score": 2
} |
#### File: acapi2/resources/subscriptionlist.py
```python
from acapi2.resources.acquialist import AcquiaList
from acapi2.resources.subscription import Subscription
class SubscriptionList(AcquiaList):
def __init__(self, uri: str, api_key: str, api_secret: str, *args,
**kwargs) -> None:
# TODO Filters
super().__init__(uri, api_key, api_secret, *args, **kwargs)
self.fetch()
def fetch(self):
subs = self.request(uri=self.uri).json()
try:
sub_items = subs["_embedded"]["items"]
except KeyError:
# TODO Handle this
pass
else:
for sub in sub_items:
name = sub["id"]
subs_uri = "{base_uri}/{uuid}".format(
base_uri=self.uri, uuid=name)
self.__setitem__(name,
Subscription(subs_uri,
self.api_key,
self.api_secret))
@property
def base_uri(self) -> str:
return self._base_uri
@base_uri.setter
def base_uri(self, base_uri: str):
uri = "{}/subscriptions".format(base_uri)
self._base_uri = uri
```
#### File: acapi2/tests/test_agreements.py
```python
import requests_mock
from acapi2.resources.agreement import Agreement
from acapi2.resources.agreementlist import AgreementList
from acapi2.tests import BaseTest
@requests_mock.Mocker()
class TestAgreements(BaseTest):
def test_agreement_list(self, mocker):
response = {
"total": 3,
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/agreements"
},
"parent": {
"href": "https://cloud.acquia.com/api/"
}
},
"_embedded": {
"items": [
{
"uuid": "efc62c93-8203-4e8b-a8ff-4d18b780d4ab",
"document_uuid": "f25d0284-f25f-4e59-"
"9c48-7c39ae57b400",
"title": "Agreement Title",
"body": "<p>Agreement body and text.</p>",
"status": "accepted",
"created_at": "2017-01-23T12:00:00Z",
"updated_at": "2017-01-27T12:00:00Z",
"actioned_by": {
"uuid": "5aa902c5-f1c1-6c94-edfa-86bc58d0dce3",
"first_name": "James",
"last_name": "Kirk",
"mail": "<EMAIL>",
"picture_url": "https://accounts.acquia.com/sites"
"/default/avatars/456def"
"?mail=james.kirk"
"@example.com",
"username": "james.kirk"
},
"reference": {
"uuid": "9ab09eba-290d-4ed9-be4d-fa194ab92f39",
"name": "Acquia Subscription",
"type": "subscription"
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api"
"/agreements/efc62c93-8203-4e8b-"
"a8ff-4d18b780d4ab"
}
}
},
{
"uuid": "b63fff64-6c18-4899-acba-00ec6c8930e9",
"title": "Another Agreement",
"body": "<p>This is the body and text of another "
"agreement.</p>",
"status": "declined",
"created_at": "2017-02-23T12:00:00Z",
"updated_at": "2017-02-27T12:00:00Z",
"actioned_by": {
"uuid": "550e8400-e29b-41d4-a716-446655440000",
"first_name": "Jane",
"last_name": "Doe",
"mail": "<EMAIL>",
"picture_url": "https://accounts.acquia.com/"
"sites/default/avatars/123abc?"
"mail=jane.doe"
"@example.com",
"username": "jane.doe"
},
"reference": {
"uuid": "9ab09eba-290d-4ed9-be4d-fa194ab92f39",
"name": "Acquia Subscription",
"type": "subscription"
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"agreements/b63fff64-6c18-4899-acba-"
"00ec6c8930e9"
}
}
},
{
"uuid": "a8777880-8924-494a-abe2-62cc092df269",
"title": "A Third Agreement",
"body": "<p>This is the body and text of one "
"more agreement.</p>",
"status": "pending",
"created_at": "2017-02-23T12:00:00Z",
"updated_at": None,
"actioned_by": None,
"reference": {
"uuid": "9ab09eba-290d-4ed9-be4d-fa194ab92f39",
"name": "Acquia Subscription",
"type": "subscription"
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/"
"agreements/a8777880-8924-494a-"
"abe2-62cc092df269"
}
}
}
]
}
}
uri = f"{self.endpoint}/agreements"
mocker.register_uri("GET", uri, json=response, status_code=200)
agrs = self.acquia.agreements()
self.assertIsInstance(agrs, AgreementList)
def test_agreement(self, mocker):
uuid = "efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
response = {
"uuid": uuid,
"document_uuid": "f25d0284-f25f-4e59-9c48-7c39ae57b400",
"title": "Agreement Title",
"body": "<p>Agreement body and text.</p>",
"status": "accepted",
"created_at": "2017-01-23T12:00:00Z",
"updated_at": "2017-01-27T12:00:00Z",
"actioned_by": {
"uuid": "5aa902c5-f1c1-6c94-edfa-86bc58d0dce3",
"first_name": "James",
"last_name": "Kirk",
"mail": "<EMAIL>",
"picture_url": "https://accounts.acquia.com/sites/default/"
"avatars/456def?mail=<EMAIL>",
"username": "james.kirk"
},
"reference": {
"uuid": "9ab09eba-290d-4ed9-be4d-fa194ab92f39",
"name": "Acquia Subscription",
"type": "subscription"
},
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/agreements/"
"efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
},
"invitees": {
"href": "https://cloud.acquia.com/api/agreements/"
"efc62c93-8203-4e8b-a8ff-4d18b780d4ab/invitees"
},
"subscription": {
"href": "https://cloud.acquia.com/api/subscriptions/"
"9ab09eba-290d-4ed9-be4d-fa194ab92f39"
},
"actioned_by": {
"href": "https://cloud.acquia.com/api/users/"
"5aa902c5-f1c1-6c94-edfa-86bc58d0dce3"
},
"parent": {
"href": "https://cloud.acquia.com/api/agreements"
}
}
}
uri = f"{self.endpoint}/agreements/{uuid}"
mocker.register_uri("GET", uri, json=response, status_code=200)
agreement = self.acquia.agreement(uuid)
self.assertIsInstance(agreement, Agreement)
def test_accept_agreement(self, mocker):
response = {
"message": "The agreement has been accepted."
}
uuid = "efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
uri = f"{self.endpoint}/agreements/{uuid}/actions/accept"
mocker.register_uri(url=uri, method="POST",
status_code=200, json=response)
response = self.acquia.agreement(uuid).accept()
self.assertEqual(response.status_code, 200)
def test_decline_agreement(self, mocker):
response = {
"message": "The agreement has been declined."
}
uuid = "efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
uri = f"{self.endpoint}/agreements/{uuid}/actions/decline"
mocker.register_uri(url=uri, method="POST",
status_code=200, json=response)
response = self.acquia.agreement(uuid).decline()
self.assertEqual(response.status_code, 200)
def test_agreement_invitees(self, mocker):
response = {
"total": 2,
"_links": {
"self": {
"href": "https://cloud.acquia.com/api/agreements/"
"efc62c93-8203-4e8b-a8ff-4d18b780d4ab/invitees"
},
"parent": {
"href": "https://cloud.acquia.com/api/agreements/"
"efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
}
},
"_embedded": {
"items": [
{
"uuid": "u4ee550f-ee0c-102e-8305-1231390f2cc1",
"first_name": "User",
"last_name": "One",
"mail": "<EMAIL>",
"username": "user.one",
"picture_url": "https://accounts.acquia.com/"
"path/to/image.png"
},
{
"uuid": "u4ef8edc-ee0c-102e-8305-1231390f2cc2",
"first_name": "User",
"last_name": "Two",
"mail": "<EMAIL>",
"username": "user.two",
"picture_url": "https://accounts.acquia.com/"
"path/to/image.png"
}
]
}
}
uuid = "efc62c93-8203-4e8b-a8ff-4d18b780d4ab"
uri = f"{self.endpoint}/agreements/{uuid}/invitees"
mocker.register_uri("GET", uri, json=response, status_code=200)
invitees_response = self.acquia.agreement(uuid).invitees()
self.assertIn("total", invitees_response)
```
#### File: acapi2/tests/test_http_request.py
```python
from unittest.mock import patch
from acapi2.http_request import HttpRequest
from acapi2.tests import BaseTest
class TestHttpRequest(BaseTest):
def test_session(self):
http_request = HttpRequest()
http_request_1 = HttpRequest()
self.assertEqual(id(http_request.session), id(http_request_1.session))
def test_get_session(self):
request_session = HttpRequest()._get_session()
self.assertEqual(HttpRequest._session, request_session)
@patch("requests.Session.request")
def test_make_request(self, mock_session):
http_request = HttpRequest()
http_request.body = "body"
http_request.do()
mock_session.assert_called_once_with(
"GET",
"http://localhost/",
data="body",
headers={}
)
``` |
{
"source": "JLGGG/master_thesis_code",
"score": 3
} |
#### File: master_thesis_code/DataCollecting/ToS&PP_crawler.py
```python
import os
import re
from pathlib import Path
import pandas as pd
import time
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from func_timeout import func_timeout, FunctionTimedOut
import nltk
nltk.download('stopwords')
nltk.download('punkt')
# import matplotlib.pyplot as plt # For visualization.
def start_search(query):
opt = webdriver.ChromeOptions()
opt.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})
# opt.add_argument('headless')
# Set up your chromedriver path.
driver = webdriver.Chrome("C:/Users/USER-PC/Downloads/chromedriver.exe", options=opt)
links = [] # Initiate empty list to capture final results
# Specify number of pages on google search, each page has 10 links
n_pages = 35
# Sites including with [...] should remove
blacklist = ['sample', 'template', 'frontpage', 'definition', 'generator', 'generate', 'clauses', 'abuse',
'what', 'different', 'difference', 'agree', 'no', 'wikipedia', 'why', 'how', 'need', 'feed',
'not', 'click', 'spectrum', 'free', 'read', '?', 'tldr', 'court', 'include', 'add', 'whether',
'practise', 'design', 'violating', 'welcome', 'watch', 'are', 'improving', 'about', 'creating',
'vs.', 'versus', 'can', 'form', 'receive', 'euro', 'review', 'determining', 'to', 'in', 'apply']
# whitelist = ['terms', 'condition', 'service', 'use', 'agreement', 'statement'] Tos and T&C
whitelist = ['privacy']
for page in range(1, n_pages):
url = "http://www.google.com/search?q=" + query + "&start=" + str((page - 1) * 10)
driver.get(url)
soup = BeautifulSoup(driver.page_source, 'html.parser')
# Remove related questions
for tag in soup.select('.ULSxyf'):
tag.decompose()
search = soup.select('.yuRUbf')
for h in search:
links.append({
'Title': h.select_one('.LC20lb.DKV0Md').text,
'Link': h.a.get('href'),
'Flag': "false"
})
# try to bypass "I am not a robot".
time.sleep(0.25)
# Remove links that contain blacklist words
for link in links:
for black in blacklist:
# The find() method returns -1 if the value is not found.
if link['Title'].lower().find(black) >= 0:
link['Flag'] = "true"
break
final_links = []
for i, link in enumerate(links):
for white in whitelist:
if link['Flag'] == "false" and link['Title'].lower().find(white) >= 0:
final_links.append(link)
break
return final_links, driver
stop_words = set(stopwords.words('english'))
lemma = WordNetLemmatizer()
def clean_text(s):
s = re.sub('[^a-zA-Z]', ' ', s) # Removing numbers and punctuation
s = str(s).lower() # Convert all characters into lowercase
s = word_tokenize(s) # Tokenization
s = [w for w in s if w not in stop_words] # Removing stop words
s = [lemma.lemmatize(word=w, pos='v') for w in s] # Lemmatization
s = [i for i in s if len(i) > 2] # Remove the words having length <= 2
s = ' '.join(s) # Converting list to string
return s
def collect_ToS_text(soup, link):
# whitelist = [
# 'p',
# 'li',
# 'div',
# 'span',
# 'b',
# 'a',
# 'strong',
# 'font',
# ]
# blacklist = [
# '[document]',
# 'noscript',
# 'header',
# 'html',
# 'meta',
# 'head',
# 'input',
# 'script',
# 'style',
# 'title',
# # there may be more elements you don't want, such as "style", etc.
# ]
node = []
#text_elements = [t for t in soup.find_all(text=True) if t.parent.name not in blacklist]
#text_elements = [t for t in text_elements if t.parent.name in whitelist]
# for text in text_elements:
# node.append({
# 'Length': len(text),
# 'Link': link,
# 'Original': text,
# 'Processed': text,
# })
text = soup.get_text()
node.append({
'Length': len(text),
'Link': link,
'Original': text,
# 'Processed': text,
})
df = pd.DataFrame(node)
# df_cut = df[df['Length'] > 100]
# df_cut_revised = df_cut.copy()
# df_cut_revised['Processed'] = df_cut_revised['Processed'].apply(clean_text) # Text preprocessing
# df_cut_revised['Length'] = df_cut_revised['Processed'].apply(lambda x: len(x))
# final_df = df_cut_revised[df_cut_revised['Length'] > 10]
# Visualization code
# df.sort_values(by='Length', inplace=True, ascending=False)
# df.plot(x='Content', y='Length')
# plt.show()
#return final_df
return df
def enter_link(links, driver, flag, duplicate_check, df):
# super_filename = 'tos_data.csv'
super_filename = 'privacy_policy_data.csv'
i = 0
for link in links:
if link['Link'].find('.pdf') >= 0 or link['Link'].find('.html') >= 0:
continue
if flag == 0:
duplicate_check.append(link["Link"])
elif flag == 1:
# Confirm already accessed link
if link["Link"] in duplicate_check:
print("Accessed duplicate link. Return previous page.")
continue
try:
print(f'Go to {link["Link"]}')
driver.get(link["Link"])
except (NoSuchElementException, TimeoutException) :
continue
try:
soup = func_timeout(300, BeautifulSoup, args=(driver.page_source, 'html.parser'))
except FunctionTimedOut:
print(f'{link["link"]} page use too many time. It is terminated.')
continue
# Save each web page, log file
try:
tdf = collect_ToS_text(soup, link["Link"])
except KeyError:
print("KeyError happens")
continue
path = os.getcwd() + "/data_privacy_policy/"
# path = os.getcwd() + "/data/"
sub_filename = f'{link["Title"]}.csv'
sub_filename = re.sub("[\/:*?\"<>|]", "", sub_filename)
tdf.to_csv(Path(path + sub_filename), index=False) # save each page
print(tdf)
df = pd.concat([df, tdf])
if i % 10 == 0:
df.to_csv(Path(os.getcwd() + "/" + super_filename), index=False)
i += 1
try:
driver.back()
except TimeoutException:
continue
df.to_csv(Path(os.getcwd() + "/" + super_filename), index=False) # save whole page's data
print(f'Number of sentences collected: {len(df)}')
driver.close() # Close Chrome process
return df
def main():
duplicate_check = []
# df = pd.DataFrame(columns=['Length', 'Link', 'Original', 'Processed'])
df = pd.DataFrame(columns=['Length', 'Link', 'Original'])
links, driver = start_search('Privacy Policy')
df = enter_link(links, driver, 0, duplicate_check, df)
# links, driver = start_search('Terms of Service')
# df = enter_link(links, driver, 0, duplicate_check, df)
# links, driver = start_search('Terms of Conditions')
# enter_link(links, driver, 1, duplicate_check, df)
main()
``` |
{
"source": "jlgoh/labeldat",
"score": 3
} |
#### File: labeldat/models/organisation.py
```python
from extensions import db
class Organisation(db.Model):
id = db.Column(db.String(80), primary_key=True, nullable=False)
name = db.Column(db.String(80), nullable=False)
is_enterprise = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
projects = db.relationship('Project', backref='org', lazy=True) # 1(Organisation)-to-Many(Project)
users = db.relationship('User', backref='org', lazy=True) # 1(Organisation)-to-Many(Users)
def __repr__(self):
return f"<Organisation {self.id} | {self.name} | Enterprise : {self.is_enterprise}>"
def to_response(self):
return {
"id": self.id,
"name": self.name,
"is_enterprise": self.is_enterprise,
"created_at": self.created_at,
"projects": [pj.to_response() for pj in self.projects],
"users": [user.to_response() for user in self.users]
}
```
#### File: labeldat/models/project.py
```python
from extensions import db
from models.item_data_type import ItemDataType
from models.label import Label
from models.task import Task
class Project(db.Model):
id = db.Column(db.String(80), primary_key=True, nullable=False)
# 1(Project)-to-1(organisation)
org_id = db.Column(db.String(80), db.ForeignKey('organisation.id'), nullable=False)
project_name = db.Column(db.String(80), nullable=False)
item_data_type = db.Column(db.Enum(ItemDataType), nullable=False)
layout = db.Column(db.JSON, nullable=False)
outsource_labelling = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
# parent 1-to-many w Task
tasks = db.relationship('Task', backref='task', lazy=True)
# parent 1-to-many w ProjectManager
project_managers = db.relationship('ProjectManager', backref='project', lazy=True)
def __repr__(self):
return f"<Project {self.id} | {self.project_name} | Organisation : {self.org_id}>"
def to_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_project_for_user_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasksLabelled": [t.to_response_with_labels_from_user(user_id)
for t in self.tasks_and_labels_from_user(user_id)],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_created_project_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"created_at": self.created_at
}
def to_contributed_project_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"contributionCount": self.calculate_tasks_labelled_by_user(user_id),
"contributionPercentage": self.calculate_tasks_labelled_percentage_by_user(user_id),
"created_at": self.created_at
}
def tasks_and_labels_from_user(self, user_id):
resulting_tasks = []
for task in self.tasks:
for label in task.labels:
if label.user_id == user_id:
resulting_tasks.append(task)
break
return resulting_tasks
def calculate_number_of_tasks(self):
return len(self.tasks)
def calculate_tasks_labelled_percentage(self):
"""
Count % of tasks that have >= 1 label
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled = len([task for task in self.tasks if len(task.labels) > 0])
return round(float((num_labelled / number_of_tasks * 100)), 1)
def calculate_tasks_labelled_percentage_by_user(self, user_id):
"""
Count % of tasks that a user has labelled
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled_by_user = self.calculate_tasks_labelled_by_user(user_id)
return round(float((num_labelled_by_user / number_of_tasks) * 100), 1)
def calculate_tasks_labelled_by_user(self, user_id):
"""
Count number of tasks that a user has labelled
"""
tasks_by_user = db.session.query(Task).filter_by(project_id=self.id).join(Label).filter_by(
user_id=user_id).all()
num_labelled = len(tasks_by_user)
return num_labelled
```
#### File: labeldat/services/label_service.py
```python
import uuid
from extensions import db
from werkzeug.exceptions import *
from models import *
from datetime import datetime
class LabelService:
@staticmethod
def create_label(user_id, labels):
if not user_id:
raise BadRequest("LabelService :: create_label :: The user id is missing")
if not labels:
raise BadRequest("LabelService :: create_label :: The labels data is missing")
# To add the limits for number of labels for user and a specific task?
saved_labels = []
for label in labels:
task_id = label.get("taskId")
label_data = label.get("data")
new_label = Label(task_id=task_id, user_id=user_id, label_data=label_data, created_at=datetime.now())
db.session.add(new_label)
saved_labels.append(new_label)
db.session.commit()
print(f"LabelService :: create_label :: New labels saved: {saved_labels}")
return [saved_label.to_response() for saved_label in saved_labels]
```
#### File: labeldat/utilities/tasks_and_layout_response.py
```python
from models.item_data_type import ItemDataType
class TasksAndLayoutResponse:
def __init__(self, project_name=None, layout=None, item_data_type=None, data=None):
self.data = data
self.layout = layout
self.project_name = project_name
self.item_data_type = item_data_type
def to_dict(self):
return {
'layout': self.layout if self.layout else dict(),
'data': self.data if self.data else dict(),
'projectName': self.project_name if self.project_name else "Project",
'itemDataType': self.item_data_type if self.item_data_type else ItemDataType.IMAGE
}
```
#### File: labeldat/utilities/user_projects_results.py
```python
class UserProjectsResults:
def __init__(self, projects: [], contributed_projects: []):
self.projects = projects
self.contributed_projects = contributed_projects
def to_response(self, user_id):
return {
"projects": [pj.to_created_project_response() for pj in self.projects],
"contributedProjects": [pj.to_contributed_project_response(user_id) for pj in self.contributed_projects]
}
``` |
{
"source": "jlgoldman/big-query-log-drain",
"score": 3
} |
#### File: jlgoldman/big-query-log-drain/app_test.py
```python
import base64
import unittest
import flask_testing
import mock
from app import app
class AppTest(flask_testing.TestCase):
def create_app(self):
app.config['TESTING'] = True
return app
def test_forbidden_if_no_credentials(self):
resp = self.client.post('/log')
self.assertEqual(403, resp.status_code)
@mock.patch('settings.LOG_DRAIN_USERNAME', 'test-username')
@mock.patch('settings.LOG_DRAIN_PASSWORD', '<PASSWORD>')
@mock.patch('app._post_to_bigquery')
def test_allowed_if_credentials_match(self, mock_post_to_bigquery):
credentials = base64.b64encode('test-username:<PASSWORD>')
resp = self.client.post('/log', headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_not_called()
@mock.patch('settings.LOG_DRAIN_USERNAME', 'test-username')
@mock.patch('settings.LOG_DRAIN_PASSWORD', '<PASSWORD>')
@mock.patch('app._post_to_bigquery')
def test_single_record(self, mock_post_to_bigquery):
body = '''403 <190>1 2017-08-22T23:39:51.262277+00:00 host app web.1 - json: {"duration": 0.027, "host": "test.com", "method": "GET", "path": "/", "referrer": "", "remote_addr": "11.11.222.333", "response_code": 200, "timestamp": "2017-08-22T23:39:51.261888+00:00", "url": "/", "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"}\n'''
credentials = base64.b64encode('test-username:test-password')
resp = self.client.post('/log', data=body, headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_called_once()
log_records = mock_post_to_bigquery.call_args[0][0]
self.assertEqual(1, len(log_records))
expected_record = {
'duration': 0.027,
'host': 'test.com',
'method': 'GET',
'path': '/',
'referrer': '',
'remote_addr': '11.11.222.333',
'response_code': 200,
'timestamp': '2017-08-22T23:39:51.261888+00:00',
'url': '/',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
}
self.assertEqual(expected_record, log_records[0])
@mock.patch('settings.LOG_DRAIN_USERNAME', 'test-username')
@mock.patch('settings.LOG_DRAIN_PASSWORD', '<PASSWORD>')
@mock.patch('app._post_to_bigquery')
def test_two_records(self, mock_post_to_bigquery):
line1 = '''403 <190>1 2017-08-22T23:39:51.262277+00:00 host app web.1 - json: {"duration": 0.027, "host": "test.com", "method": "GET", "path": "/", "referrer": "", "remote_addr": "11.11.222.333", "response_code": 200, "timestamp": "2017-08-22T23:39:51.261888+00:00", "url": "/", "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"}'''
line2 = '''390 <190>1 2017-08-30T23:39:51.000000+00:00 host app web.1 - json: {"host": "test.com", "method": "GET", "path": "/foo", "referrer": "", "remote_addr": "1.2.3.4", "response_code": 200, "timestamp": "2017-08-30T23:39:51.000000+00:00", "url": "/foo?bar=1", "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36"}'''
body = '%s\n%s\n' % (line1, line2)
credentials = base64.b64encode('test-username:test-password')
resp = self.client.post('/log', data=body, headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_called_once()
log_records = mock_post_to_bigquery.call_args[0][0]
self.assertEqual(2, len(log_records))
expected_record1 = {
'duration': 0.027,
'host': 'test.com',
'method': 'GET',
'path': '/',
'referrer': '',
'remote_addr': '11.11.222.333',
'response_code': 200,
'timestamp': '2017-08-22T23:39:51.261888+00:00',
'url': '/',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
}
self.assertEqual(expected_record1, log_records[0])
expected_record2 = {
'host': 'test.com',
'method': 'GET',
'path': '/foo',
'referrer': '',
'remote_addr': '1.2.3.4',
'response_code': 200,
'timestamp': '2017-08-30T23:39:51.000000+00:00',
'url': '/foo?bar=1',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
}
self.assertEqual(expected_record2, log_records[1])
@mock.patch('settings.LOG_DRAIN_USERNAME', 'test-username')
@mock.patch('settings.LOG_DRAIN_PASSWORD', '<PASSWORD>')
@mock.patch('app._post_to_bigquery')
def test_garbage_record(self, mock_post_to_bigquery):
body = 'garbage'
credentials = base64.b64encode('test-username:test-password')
resp = self.client.post('/log', data=body, headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_not_called()
@mock.patch('settings.LOG_DRAIN_USERNAME', 'test-username')
@mock.patch('settings.LOG_DRAIN_PASSWORD', '<PASSWORD>')
@mock.patch('app._post_to_bigquery')
def test_malformed_record2(self, mock_post_to_bigquery):
body = '100 foo'
credentials = base64.b64encode('test-username:test-password')
resp = self.client.post('/log', data=body, headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_not_called()
body = '2 foo'
credentials = base64.b64encode('test-username:test-password')
resp = self.client.post('/log', data=body, headers={'Authorization': 'Basic ' + credentials})
self.assertEqual(200, resp.status_code)
self.assertEqual('', resp.data)
mock_post_to_bigquery.assert_not_called()
def test_render_default_diagnostics(self):
resp = self.client.get('/')
self.assertEqual(200, resp.status_code)
if __name__ == '__main__':
unittest.main()
```
#### File: jlgoldman/big-query-log-drain/settings.py
```python
import os
import dotenv
dotenv_filename = dotenv.find_dotenv()
if dotenv_filename:
dotenv.load_dotenv(dotenv_filename)
def parse_bool(env_value):
return env_value is not None and env_value.lower() not in ('0', 'false')
DEBUG = parse_bool(os.environ.get('DEBUG'))
LOG_DRAIN_USERNAME = os.environ.get('LOG_DRAIN_USERNAME')
LOG_DRAIN_PASSWORD = os.environ.get('LOG_DRAIN_PASSWORD')
LOG_RECORD_PREFIX = os.environ.get('LOG_RECORD_PREFIX', 'json:')
BIG_QUERY_PROJECT_ID = os.environ.get('BIG_QUERY_PROJECT_ID')
BIG_QUERY_DATASET_ID = os.environ.get('BIG_QUERY_DATASET_ID')
BIG_QUERY_TABLE_ID = os.environ.get('BIG_QUERY_TABLE_ID')
BIG_QUERY_SKIP_INVALID_ROWS = parse_bool(os.environ.get('BIG_QUERY_SKIP_INVALID_ROWS'))
BIG_QUERY_IGNORE_UNKNOWN_VALUES = parse_bool(os.environ.get('BIG_QUERY_IGNORE_UNKNOWN_VALUES'))
GOOGLE_SERVICE_ACCOUNT_CREDENTIALS_JSON = os.environ.get('GOOGLE_SERVICE_ACCOUNT_CREDENTIALS_JSON')
``` |
{
"source": "jlgrady1/youtopia",
"score": 3
} |
#### File: youtopia/docker/reaper.py
```python
import logging
import os
import time
log = logging.getLogger(__name__)
HOME = os.environ.get("MEDIA_HOME")
DEBUG = os.environ.get("DEBUG", "False")
INTERVAL = 5 * 60 # 5 Minutes
REAP_AGE_MIN = 30 # Files created greater than REAP_AGE_MIN ago will be reaped
def validate():
if not HOME:
log.error("YOUTOPIA_HOME is not set!")
exit(1)
def run():
while True:
time.sleep(INTERVAL)
log.info("Reaper is waking up.")
files = os.listdir(HOME)
now = time.time()
for f in files:
log.debug("Examining {}".format(f))
path = "{}/{}".format(HOME, f)
if not os.path.isfile(path):
log.info("Skipping dir {}".format(f))
info = os.stat(path)
created = info.st_ctime
delta = (now - created) / 60.0 # Time delta in minutes
if delta >= REAP_AGE_MIN:
log.info("Reaping file {}".format(f))
os.remove(path)
log.info("Reaper is going to sleep.")
def main():
# Configure logging
dbg = DEBUG.lower() == 'true'
if dbg:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
log.info("Reaper is starting up")
validate()
run()
if __name__ == '__main__':
main()
```
#### File: youtopia/mp3/forms.py
```python
import logging
from django import forms
LOGGER = logging.getLogger(__name__)
class YouTubeURLForm(forms.Form):
url_attrs = {
'placeholder': 'https://www.youtube.com/watch?v=<videoid>',
'class': 'form-control'
}
youtube_url = forms.CharField(
required=True,
max_length=100,
widget=forms.TextInput(attrs=url_attrs),
error_messages={'required': 'URL is required'}
)
def clean_youtube_url(self):
url = self.data['youtube_url']
# https://www.youtube.com/watch?v=<videoid>
if 'youtube.com/watch?v=' not in url:
raise forms.ValidationError("Unknown youtube url.")
return url
``` |
{
"source": "jlgutenson/RAPIDpy",
"score": 2
} |
#### File: RAPIDpy/inflow/lsm_rapid_process.py
```python
from datetime import datetime, timedelta
import multiprocessing
import os
import re
import traceback
# external packages
import pandas as pd
import pangaea
from netCDF4 import Dataset
import numpy as np
# local imports
from ..rapid import RAPID
from .CreateInflowFileFromERAInterimRunoff import \
CreateInflowFileFromERAInterimRunoff
from .CreateInflowFileFromLDASRunoff import CreateInflowFileFromLDASRunoff
from .CreateInflowFileFromWRFHydroRunoff import \
CreateInflowFileFromWRFHydroRunoff
from ..postprocess.generate_return_periods import generate_return_periods
from ..postprocess.generate_seasonal_averages import generate_seasonal_averages
from ..utilities import (case_insensitive_file_search,
get_valid_directory_list,
partition)
# -----------------------------------------------------------------------------
# MULTIPROCESSING FUNCTION
# -----------------------------------------------------------------------------
def generate_inflows_from_runoff(args):
"""
prepare runoff inflow file for rapid
"""
runoff_file_list = args[0]
file_index_list = args[1]
weight_table_file = args[2]
grid_type = args[3]
rapid_inflow_file = args[4]
rapid_inflow_tool = args[5]
mp_lock = args[6]
time_start_all = datetime.utcnow()
if not isinstance(runoff_file_list, list):
runoff_file_list = [runoff_file_list]
else:
runoff_file_list = runoff_file_list
if not isinstance(file_index_list, list):
file_index_list = [file_index_list]
else:
file_index_list = file_index_list
if runoff_file_list and file_index_list:
# prepare ECMWF file for RAPID
index_string = "Index: {0}".format(file_index_list[0])
if len(file_index_list) > 1:
index_string += " to {0}".format(file_index_list[-1])
print(index_string)
runoff_string = "File(s): {0}".format(runoff_file_list[0])
if len(runoff_file_list) > 1:
runoff_string += " to {0}".format(runoff_file_list[-1])
print(runoff_string)
print("Converting inflow ...")
try:
rapid_inflow_tool.execute(nc_file_list=runoff_file_list,
index_list=file_index_list,
in_weight_table=weight_table_file,
out_nc=rapid_inflow_file,
grid_type=grid_type,
mp_lock=mp_lock)
except Exception:
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
raise
time_finish_ecmwf = datetime.utcnow()
print("Time to convert inflows: {0}"
.format(time_finish_ecmwf-time_start_all))
# -----------------------------------------------------------------------------
# UTILITY FUNCTIONS
# -----------------------------------------------------------------------------
DEFAULT_LSM_INPUTS = {
'erai_new': {
'file_datetime_re_pattern': r'\d{8}',
'file_datetime_pattern': "%Y%m%d",
},
't255': {
'file_datetime_re_pattern': r'\d{8}',
'file_datetime_pattern': "%Y%m%d",
},
't511': {
'file_datetime_re_pattern': r'\d{8}',
'file_datetime_pattern': "%Y%m%d",
},
't159': {
'file_datetime_re_pattern': r'\d{8}',
'file_datetime_pattern': "%Y%m%d",
},
'gldas2': {
'file_datetime_re_pattern': r'\d{8}\.\d{2}',
'file_datetime_pattern': "%Y%m%d.%H",
},
'gldas': {
'file_datetime_re_pattern': r'\d{8}\.\d{2}',
'file_datetime_pattern': "%Y%m%d.%H",
},
'nldas': {
'file_datetime_re_pattern': r'\d{8}\.\d{2}',
'file_datetime_pattern': "%Y%m%d.%H",
},
'cmip5': {
'file_datetime_re_pattern': r'\d{4}',
'file_datetime_pattern': "%Y",
},
'lis': {
'file_datetime_re_pattern': r'\d{10}',
'file_datetime_pattern': "%Y%m%d%H",
},
'joules': {
'file_datetime_re_pattern': r'\d{10}',
'file_datetime_pattern': "%Y%m%d%H",
},
'wrf': {
'file_datetime_re_pattern': r'\d{10}',
'file_datetime_pattern': "%Y%m%d%H",
},
}
def identify_lsm_grid(lsm_grid_path):
"""
This is used to idenfity the input LSM grid
"""
# check to see what kind of file we are dealing with
lsm_example_file = Dataset(lsm_grid_path)
# INDENTIFY LAT/LON DIMENSIONS
dim_list = lsm_example_file.dimensions.keys()
latitude_dim = "lat"
if 'latitude' in dim_list:
latitude_dim = 'latitude'
elif 'g0_lat_0' in dim_list:
# GLDAS/NLDAS MOSAIC
latitude_dim = 'g0_lat_0'
elif 'lat_110' in dim_list:
# NLDAS NOAH/VIC
latitude_dim = 'lat_110'
elif 'north_south' in dim_list:
# LIS/Joules
latitude_dim = 'north_south'
elif 'south_north' in dim_list:
# WRF Hydro
latitude_dim = 'south_north'
elif 'Y' in dim_list:
# FLDAS
latitude_dim = 'Y'
longitude_dim = "lon"
if 'longitude' in dim_list:
longitude_dim = 'longitude'
elif 'g0_lon_1' in dim_list:
# GLDAS/NLDAS MOSAIC
longitude_dim = 'g0_lon_1'
elif 'lon_110' in dim_list:
# NLDAS NOAH/VIC
longitude_dim = 'lon_110'
elif 'east_west' in dim_list:
# LIS/Joules
longitude_dim = 'east_west'
elif 'west_east' in dim_list:
# WRF Hydro
longitude_dim = 'west_east'
elif 'X' in dim_list:
# FLDAS
longitude_dim = 'X'
time_dim = None
if 'time' in dim_list:
time_dim = 'time'
elif 'Time' in dim_list:
time_dim = 'Time'
elif 'Times' in dim_list:
time_dim = 'Times'
elif 'times' in dim_list:
time_dim = 'times'
lat_dim_size = len(lsm_example_file.dimensions[latitude_dim])
lon_dim_size = len(lsm_example_file.dimensions[longitude_dim])
# IDENTIFY VARIABLES
var_list = lsm_example_file.variables.keys()
latitude_var = "lat"
if 'latitude' in var_list:
latitude_var = 'latitude'
elif 'g0_lat_0' in var_list:
latitude_var = 'g0_lat_0'
elif 'lat_110' in var_list:
latitude_var = 'lat_110'
elif 'north_south' in var_list:
latitude_var = 'north_south'
elif 'XLAT' in var_list:
# WRF
latitude_var = 'XLAT'
elif 'Y' in var_list:
# FLDAS
latitude_var = 'Y'
longitude_var = "lon"
if 'longitude' in var_list:
longitude_var = 'longitude'
elif 'g0_lon_1' in var_list:
longitude_var = 'g0_lon_1'
elif 'lon_110' in var_list:
longitude_var = 'lon_110'
elif 'east_west' in var_list:
longitude_var = 'east_west'
elif 'XLONG' in var_list:
# WRF
longitude_var = 'XLONG'
elif 'X' in var_list:
# FLDAS
longitude_var = 'X'
time_var = None
if 'time' in var_list:
time_var = 'time'
elif 'Time' in var_list:
time_var = 'Time'
elif 'Times' in var_list:
time_var = 'Times'
elif 'times' in var_list:
time_var = 'times'
surface_runoff_var = ""
subsurface_runoff_var = ""
total_runoff_var = ""
for var in var_list:
if var.startswith("SSRUN"):
# NLDAS/GLDAS
surface_runoff_var = var
elif var.startswith("BGRUN"):
# NLDAS/GLDAS
subsurface_runoff_var = var
elif var == "Qs_acc":
# GLDAS v2
surface_runoff_var = var
elif var == "Qsb_acc":
# GLDAS v2
subsurface_runoff_var = var
elif var == "Qs_tavg":
# FLDAS
surface_runoff_var = var
elif var == "Qsb_tavg":
# FLDAS
subsurface_runoff_var = var
elif var == "Qs_inst":
# LIS
surface_runoff_var = var
elif var == "Qsb_inst":
# LIS
subsurface_runoff_var = var
elif var == "SFROFF":
# WRF Hydro
surface_runoff_var = var
elif var == "UDROFF":
# WRF Hydro
subsurface_runoff_var = var
elif var.lower() == "ro":
# ERA Interim
total_runoff_var = var
elif var == "total runoff":
# CMIP5 data
total_runoff_var = var
# IDENTIFY GRID TYPE
lsm_file_data = {
"weight_file_name": "",
"grid_type": "",
"model_name": "",
"description": "",
"rapid_inflow_tool": None,
"latitude_var": latitude_var,
"longitude_var": longitude_var,
"time_var": time_var,
"latitude_dim": latitude_dim,
"longitude_dim": longitude_dim,
"time_dim": time_dim,
}
institution = ""
title = ""
try:
institution = lsm_example_file.getncattr("institution")
except AttributeError:
pass
try:
title = lsm_example_file.getncattr("title")
except AttributeError:
pass
runoff_vars = [surface_runoff_var, subsurface_runoff_var]
if institution == "European Centre for Medium-Range Weather Forecasts" \
or total_runoff_var.lower() == "ro":
# these are the ECMWF models
if lat_dim_size == 1280 and lon_dim_size == 2576:
print("Runoff file identified as new ERA Interim GRID")
# A) ERA Interim Low Res (T255)
# Downloaded as 0.5 degree grid
# dimensions:
# longitude = 720 ;
# latitude = 361 ;
lsm_file_data["description"] = "new ERA Interim GRID"
lsm_file_data["model_name"] = "erai"
lsm_file_data["weight_file_name"] = r'weight_era_new\.csv'
lsm_file_data["grid_type"] = 'erai_new'
elif lat_dim_size == 361 and lon_dim_size == 720:
print("Runoff file identified as ERA Interim Low Res (T255) GRID")
# A) ERA Interim Low Res (T255)
# Downloaded as 0.5 degree grid
# dimensions:
# longitude = 720 ;
# latitude = 361 ;
lsm_file_data["description"] = "ERA Interim (T255 Grid)"
lsm_file_data["model_name"] = "erai"
lsm_file_data["weight_file_name"] = r'weight_era_t255\.csv'
lsm_file_data["grid_type"] = 't255'
elif lat_dim_size == 512 and lon_dim_size == 1024:
print("Runoff file identified as ERA Interim High Res (T511) GRID")
# B) ERA Interim High Res (T511)
# dimensions:
# lon = 1024 ;
# lat = 512 ;
lsm_file_data["description"] = "ERA Interim (T511 Grid)"
lsm_file_data["weight_file_name"] = r'weight_era_t511\.csv'
lsm_file_data["model_name"] = "erai"
lsm_file_data["grid_type"] = 't511'
elif lat_dim_size == 161 and lon_dim_size == 320:
print("Runoff file identified as ERA 20CM (T159) GRID")
# C) ERA 20CM (T159) - 3hr - 10 ensembles
# Downloaded as 1.125 degree grid
# dimensions:
# longitude = 320 ;
# latitude = 161 ;
lsm_file_data["description"] = "ERA 20CM (T159 Grid)"
lsm_file_data["weight_file_name"] = r'weight_era_t159\.csv'
lsm_file_data["model_name"] = "era_20cm"
lsm_file_data["grid_type"] = 't159'
else:
lsm_example_file.close()
raise Exception("Unsupported ECMWF grid.")
lsm_file_data["rapid_inflow_tool"] = \
CreateInflowFileFromERAInterimRunoff()
elif institution == "NASA GSFC":
if title == "GLDAS2.0 LIS land surface model output":
print("Runoff file identified as GLDAS v2 LIS GRID")
# this is the LIS model
lsm_file_data["weight_file_name"] = r'weight_gldas2\.csv'
lsm_file_data["grid_type"] = 'gldas2'
lsm_file_data["description"] = "GLDAS2.0 LIS"
lsm_file_data["model_name"] = "nasa"
else:
print("Runoff file identified as LIS GRID")
# this is the LIS model (can be FLDAS)
# THIS CASE CAN ALSO BE FOR FLDAS, however you will need to add
# the file_datetime_pattern && file_datetime_re_pattern for it to
# work if it is not 3-hourly time step.
lsm_file_data["weight_file_name"] = r'weight_lis\.csv'
lsm_file_data["grid_type"] = 'lis'
lsm_file_data["description"] = "NASA GSFC LIS"
lsm_file_data["model_name"] = "nasa"
elif institution == "Met Office, UK":
print("Runoff file identified as Joules GRID")
lsm_file_data["weight_file_name"] = r'weight_joules\.csv'
lsm_file_data["grid_type"] = 'joules'
lsm_file_data["description"] = "Met Office Joules"
lsm_file_data["model_name"] = "met_office"
elif institution == "NCAR, USACE, USBR":
print("Runoff file identified as CMIP5")
lsm_file_data["weight_file_name"] = r'weight_cmip5\.csv'
lsm_file_data["grid_type"] = 'cmip5'
lsm_file_data["description"] = "CMIP5 Runoff"
lsm_file_data["model_name"] = "cmip5"
runoff_vars = [total_runoff_var]
elif surface_runoff_var.startswith("SSRUN") \
and subsurface_runoff_var.startswith("BGRUN"):
lsm_file_data["model_name"] = "nasa"
if lat_dim_size == 600 and lon_dim_size == 1440:
print("Runoff file identified as GLDAS GRID")
# GLDAS NC FILE
# dimensions:
# g0_lat_0 = 600 ;
# g0_lon_1 = 1440 ;
# variables
# SSRUN_GDS0_SFC_ave1h (surface)
# BGRUN_GDS0_SFC_ave1h (subsurface)
# or
# SSRUNsfc_GDS0_SFC_ave1h (surface)
# BGRUNsfc_GDS0_SFC_ave1h (subsurface)
lsm_file_data["description"] = "GLDAS"
lsm_file_data["weight_file_name"] = r'weight_gldas\.csv'
lsm_file_data["grid_type"] = 'gldas'
elif lat_dim_size <= 224 and lon_dim_size <= 464:
print("Runoff file identified as NLDAS GRID")
# NLDAS MOSAIC FILE
# dimensions:
# g0_lat_0 = 224 ;
# g0_lon_1 = 464 ;
# NLDAS NOAH/VIC FILE
# dimensions:
# lat_110 = 224 ;
# lon_110 = 464 ;
lsm_file_data["description"] = "NLDAS"
lsm_file_data["weight_file_name"] = r'weight_nldas\.csv'
lsm_file_data["grid_type"] = 'nldas'
else:
lsm_example_file.close()
raise Exception("Unsupported runoff grid.")
else:
title = ""
try:
title = lsm_example_file.getncattr("TITLE")
except AttributeError:
pass
if "WRF" in title:
lsm_file_data["description"] = "WRF/WRF-Hydro Runoff"
lsm_file_data["weight_file_name"] = r'weight_wrf\.csv'
lsm_file_data["model_name"] = 'wrf'
lsm_file_data["grid_type"] = 'wrf'
lsm_file_data['rapid_inflow_tool'] = \
CreateInflowFileFromWRFHydroRunoff(
latitude_dim,
longitude_dim,
latitude_var,
longitude_var,
surface_runoff_var,
subsurface_runoff_var,
)
else:
lsm_example_file.close()
raise Exception("Unsupported LSM grid.")
lsm_example_file.close()
# set the inflow tool to use the LDAS tool by default
if lsm_file_data["rapid_inflow_tool"] is None:
lsm_file_data["rapid_inflow_tool"] = \
CreateInflowFileFromLDASRunoff(
latitude_dim,
longitude_dim,
latitude_var,
longitude_var,
runoff_vars,
)
return lsm_file_data
def determine_start_end_timestep(lsm_file_list,
file_re_match=None,
file_datetime_pattern=None,
expected_time_step=None,
lsm_grid_info=None):
"""
Determine the start and end date from LSM input files
"""
if lsm_grid_info is None:
lsm_grid_info = identify_lsm_grid(lsm_file_list[0])
if None in (lsm_grid_info['time_var'], lsm_grid_info['time_dim'])\
or lsm_grid_info['model_name'] in ('era_20cm', 'erai'):
# NOTE: the ERA20CM and ERA 24hr time variables
# in the tests are erroneous
if None in (file_re_match, file_datetime_pattern):
raise ValueError("LSM files missing time dimension and/or "
"variable.To mitigate this, add the "
"'file_re_match' and 'file_datetime_pattern' "
"arguments.")
if lsm_grid_info['time_dim'] is None:
print("Assuming time dimension is 1")
file_size_time = 1
else:
lsm_example_file = Dataset(lsm_file_list[0])
file_size_time = \
len(lsm_example_file.dimensions[lsm_grid_info['time_dim']])
lsm_example_file.close()
total_num_time_steps = int(file_size_time * len(lsm_file_list))
# determine the start time from the existing files
actual_simulation_start_datetime = \
datetime.strptime(file_re_match.search(lsm_file_list[0]).group(0),
file_datetime_pattern)
# check to see if the time step matches expected
if len(lsm_file_list) > 1:
time_step = \
int((datetime.strptime(
file_re_match.search(lsm_file_list[1]).group(0),
file_datetime_pattern) -
actual_simulation_start_datetime).total_seconds()
/ float(file_size_time))
elif expected_time_step is not None:
time_step = int(expected_time_step)
else:
raise ValueError("Only one LSM file with one timestep present. "
"'expected_time_step' parameter required to "
"continue.")
# determine the end datetime
actual_simulation_end_datetime = \
datetime.strptime(file_re_match.search(lsm_file_list[-1]).group(0),
file_datetime_pattern) \
+ timedelta(seconds=(file_size_time-1) * time_step)
else:
with pangaea.open_mfdataset(lsm_file_list,
lat_var=lsm_grid_info['latitude_var'],
lon_var=lsm_grid_info['longitude_var'],
time_var=lsm_grid_info['time_var'],
lat_dim=lsm_grid_info['latitude_dim'],
lon_dim=lsm_grid_info['longitude_dim'],
time_dim=lsm_grid_info['time_dim']) as xds:
datetime_arr = [pd.to_datetime(dval) for dval in
xds.lsm.datetime.values]
actual_simulation_start_datetime = datetime_arr[0]
actual_simulation_end_datetime = datetime_arr[-1]
total_num_time_steps = len(datetime_arr)
if total_num_time_steps <= 1:
if expected_time_step is not None:
time_step = int(expected_time_step)
else:
raise ValueError("Only one LSM file with one timestep "
"present. 'expected_time_step' parameter "
"required to continue.")
else:
time_step = int(np.diff(xds.lsm.datetime.values)[0]
/ np.timedelta64(1, 's'))
if expected_time_step is not None:
if time_step != int(expected_time_step):
print("WARNING: The time step used {0} is different than "
"expected {1}".format(time_step, expected_time_step))
return (actual_simulation_start_datetime, actual_simulation_end_datetime,
time_step, total_num_time_steps)
# ------------------------------------------------------------------------------
# MAIN PROCESS
# ------------------------------------------------------------------------------
def run_lsm_rapid_process(rapid_executable_location,
lsm_data_location,
rapid_io_files_location=None,
rapid_input_location=None,
rapid_output_location=None,
simulation_start_datetime=None,
simulation_end_datetime=datetime.utcnow(),
file_datetime_pattern=None,
file_datetime_re_pattern=None,
initial_flows_file=None,
ensemble_list=(None,),
generate_rapid_namelist_file=True,
run_rapid_simulation=True,
generate_return_periods_file=False,
return_period_method='weibul',
generate_seasonal_averages_file=False,
generate_seasonal_initialization_file=False,
generate_initialization_file=False,
use_all_processors=True,
num_processors=1,
mpiexec_command="mpiexec",
cygwin_bin_location="",
modeling_institution="US Army Engineer Research "
"and Development Center",
convert_one_hour_to_three=False,
expected_time_step=None):
# pylint: disable=anomalous-backslash-in-string
"""
This is the main process to generate inflow for RAPID and to run RAPID.
Parameters
----------
rapid_executable_location: str
Path to the RAPID executable.
lsm_data_location: str
Path to the directory containing the Land Surface Model output files.
rapid_io_files_location: str, optional
Path to the directory containing the input and output folders for
RAPID. This is for running multiple watersheds.
rapid_input_location: str, optional
Path to directory with RAPID simulation input data.
Required if `rapid_io_files_location` is not set.
rapid_output_location: str, optional
Path to directory to put output. Required if
`rapid_io_files_location` is not set.
simulation_start_datetime: datetime, optional
Datetime object with date bound of earliest simulation start.
simulation_end_datetime: datetime, optional
Datetime object with date bound of latest simulation end.
Defaults to :obj:`datetime.utcnow`.
file_datetime_pattern: str, optional
Datetime pattern for files (Ex. '%Y%m%d%H'). If set,
`file_datetime_re_pattern` is required.
Various defaults used by each model.
file_datetime_re_pattern: raw str, optional
Regex pattern to extract datetime (Ex. r'\d{10}').
If set, `file_datetime_pattern` is required.
Various defaults used by each model.
initial_flows_file: str, optional
If given, this is the path to a file with initial flows
for the simulaion.
ensemble_list: list, optional
This is the expexted ensemble name appended to the end of the
file name.
generate_rapid_namelist_file: bool, optional
If True, this will create a RAPID namelist file for the run in
your RAPID input directory. Default is True.
run_rapid_simulation: bool, optional
If True, the RAPID simulation will run after generating the
inflow file. Default is True.
generate_return_periods_file: bool, optional
If True, the return period file will be generated in the output.
Default is False.
return_period_method: str, optional
If True, the return period file will be generated in the output.
Default is False.
generate_seasonal_averages_file: bool, optional
If True, the season average file will be generated. Default is False.
generate_seasonal_initialization_file: bool, optional
If True, an intialization based on the seasonal average for the
current day of the year will be created. Default is False.
generate_initialization_file: bool, optional
If True, an initialization file from the last time step of the
simulation willl be created. Default is False.
use_all_processors: bool, optional
If True, it will use all available processors to perform this
operation. Default is True.
num_processors: int, optional
If use_all_processors is False, this argument will determine the
number of processors to use. Default is 1.
mpiexec_command: str, optional
This is the command to execute RAPID. Default is "mpiexec".
cygwin_bin_location: str, optional
If using Windows, this is the path to the Cygwin bin location.
Default is "".
modeling_institution: str, optional
This is the institution performing the modeling and is in the
output files.
Default is "US Army Engineer Research and Development Center".
convert_one_hour_to_three: bool, optional
If the time step is expected to be 1-hr it will convert to 3.
Set to False if the LIS, NLDAS, or Joules grid time step is
greater than 1-hr.
expected_time_step: int, optional
The time step in seconds of your LSM input data if only one file
is given. Required if only one file is present.
Returns
-------
list:
A list of output file information.
Example of regular run:
.. code:: python
from datetime import datetime
from RAPIDpy.inflow import run_lsm_rapid_process
run_lsm_rapid_process(
rapid_executable_location='/home/alan/rapid/src/rapid',
rapid_io_files_location='/home/alan/rapid-io',
lsm_data_location='/home/alan/era_data',
)
Example of single input/output run:
.. code:: python
from datetime import datetime
from RAPIDpy.inflow import run_lsm_rapid_process
run_lsm_rapid_process(
rapid_executable_location='/home/alan/rapid/src/rapid',
rapid_input_location='/home/alan/rapid-io/input/provo_watershed',
rapid_output_location='/home/alan/rapid-io/output/provo_watershed',
lsm_data_location='/home/alan/era_data',
)
Example of run with FLDAS and datetime filter:
.. note:: http://disc.sci.gsfc.nasa.gov/uui/datasets?keywords=FLDAS
.. code:: python
from datetime import datetime
from RAPIDpy.inflow import run_lsm_rapid_process
run_lsm_rapid_process(
rapid_executable_location='/home/alan/rapid/src/rapid',
rapid_io_files_location='/home/alan/rapid-io',
lsm_data_location='/home/alan/lsm_data',
simulation_start_datetime=datetime(1980, 1, 1),
file_datetime_re_pattern = r'\d{8}',
file_datetime_pattern = "%Y%m%d",
)
Example of run with CMIP5:
.. note:: http://gdo-dcp.ucllnl.org/downscaled_cmip_projections/techmemo/BCSD5HydrologyMemo.pdf
.. code:: python
from datetime import datetime
from RAPIDpy.inflow import run_lsm_rapid_process
run_lsm_rapid_process(
rapid_executable_location='/home/jimwlewis/rapid/src/rapid',
rapid_io_files_location='/data/rapid-io4',
lsm_data_location='/data/rapid-io4/input/cmip5-jun01',
simulation_start_datetime=datetime(2001, 1, 1),
simulation_end_datetime=datetime(2002, 12, 31),
file_datetime_pattern="%Y",
file_datetime_re_pattern=r'\d{4}',
)
""" # noqa
time_begin_all = datetime.utcnow()
# use all processors makes precedent over num_processors arg
if use_all_processors is True:
num_cpus = multiprocessing.cpu_count()
elif num_processors > multiprocessing.cpu_count():
print("WARNING: Num processors requested exceeded max. Set to max ...")
num_cpus = multiprocessing.cpu_count()
else:
num_cpus = num_processors
# get list of correctly formatted rapid input directories in
# rapid directory
rapid_directories = []
if rapid_io_files_location is not None:
main_rapid_input_directory = os.path.join(rapid_io_files_location,
'input')
for watershed_directory in \
get_valid_directory_list(main_rapid_input_directory):
watershed_input_path = os.path.join(main_rapid_input_directory,
watershed_directory)
watershed_output_path = os.path.join(rapid_io_files_location,
'output',
watershed_directory)
rapid_directories.append(
(watershed_input_path, watershed_output_path))
elif None not in (rapid_input_location, rapid_output_location):
rapid_directories = [(rapid_input_location, rapid_output_location)]
else:
raise ValueError("Need 'rapid_io_files_location' or "
"'rapid_input_location' and 'rapid_output_location'"
" set to continue.")
all_output_file_information = []
for ensemble in ensemble_list:
output_file_information = {
'ensemble': ensemble,
}
ensemble_file_ending = ".nc"
ensemble_file_ending4 = ".nc4"
if ensemble is not None:
ensemble_file_ending = "_{0}.nc".format(ensemble)
ensemble_file_ending4 = "_{0}.nc4".format(ensemble)
# get list of files
lsm_file_list = []
for walkdir_info in os.walk(lsm_data_location,
followlinks=True):
for lsm_file in walkdir_info[2]:
if lsm_file.endswith(ensemble_file_ending) or \
lsm_file.endswith(ensemble_file_ending4):
lsm_file_list.append(
os.path.join(walkdir_info[0], lsm_file))
lsm_file_list = sorted(lsm_file_list)
# IDENTIFY THE GRID
lsm_file_data = identify_lsm_grid(lsm_file_list[0])
# load in the datetime pattern
if file_datetime_pattern is None or file_datetime_re_pattern is None:
file_datetime_re_pattern = \
DEFAULT_LSM_INPUTS[lsm_file_data['grid_type']][
'file_datetime_re_pattern']
file_datetime_pattern = \
DEFAULT_LSM_INPUTS[lsm_file_data['grid_type']][
'file_datetime_pattern']
file_re_match = re.compile(file_datetime_re_pattern)
# get subset based on time bounds
if simulation_start_datetime is not None:
print("Filtering files by datetime ...")
lsm_file_list_subset = []
for lsm_file in lsm_file_list:
match = file_re_match.search(lsm_file)
print(match.group(0))
file_date = datetime.strptime(match.group(0),
file_datetime_pattern)
if file_date > simulation_end_datetime:
break
if file_date >= simulation_start_datetime:
lsm_file_list_subset.append(lsm_file)
lsm_file_list = sorted(lsm_file_list_subset)
print("Running from {0} to {1}".format(lsm_file_list[0],
lsm_file_list[-1]))
# get number of time steps in file
actual_simulation_start_datetime, actual_simulation_end_datetime, \
time_step, total_num_time_steps = \
determine_start_end_timestep(
lsm_file_list,
file_re_match=file_re_match,
file_datetime_pattern=file_datetime_pattern,
expected_time_step=expected_time_step,
lsm_grid_info=lsm_file_data)
# VALIDATING INPUT IF DIVIDING BY 3
if (lsm_file_data['grid_type'] in ('nldas', 'lis', 'joules')) \
and convert_one_hour_to_three:
num_extra_files = total_num_time_steps % 3
if num_extra_files != 0:
print("WARNING: Number of files needs to be divisible by 3. "
"Remainder is {0}".format(num_extra_files))
print("This means your simulation will be truncated")
total_num_time_steps /= 3
time_step *= 3
# compile the file ending
out_file_ending = "{0}_{1}_{2}hr_{3:%Y%m%d}to{4:%Y%m%d}{5}"\
.format(lsm_file_data['model_name'],
lsm_file_data['grid_type'],
int(time_step/3600),
actual_simulation_start_datetime,
actual_simulation_end_datetime,
ensemble_file_ending)
# run LSM processes
for master_watershed_input_directory, \
master_watershed_output_directory in rapid_directories:
print("Running from: {0}".format(master_watershed_input_directory))
try:
os.makedirs(master_watershed_output_directory)
except OSError:
pass
# create inflow to dump data into
master_rapid_runoff_file = \
os.path.join(master_watershed_output_directory,
'm3_riv_bas_{0}'.format(out_file_ending))
weight_table_file = \
case_insensitive_file_search(master_watershed_input_directory,
lsm_file_data['weight_file_name'])
try:
in_rivid_lat_lon_z_file = \
case_insensitive_file_search(
master_watershed_input_directory,
r'comid_lat_lon_z\.csv')
except IndexError:
in_rivid_lat_lon_z_file = ""
print("WARNING: comid_lat_lon_z file not found."
" The lat/lon will not be added ...")
print("Writing inflow file to: {0}"
.format(master_rapid_runoff_file))
lsm_file_data['rapid_inflow_tool'].generateOutputInflowFile(
out_nc=master_rapid_runoff_file,
start_datetime_utc=actual_simulation_start_datetime,
number_of_timesteps=total_num_time_steps,
simulation_time_step_seconds=time_step,
in_rapid_connect_file=case_insensitive_file_search(
master_watershed_input_directory,
r'rapid_connect\.csv'),
in_rivid_lat_lon_z_file=in_rivid_lat_lon_z_file,
land_surface_model_description=lsm_file_data['description'],
modeling_institution=modeling_institution
)
job_combinations = []
if (lsm_file_data['grid_type'] in ('nldas', 'lis', 'joules')) \
and convert_one_hour_to_three:
print("Grouping {0} in threes"
.format(lsm_file_data['grid_type']))
lsm_file_list = [lsm_file_list[nldas_index:nldas_index+3]
for nldas_index in
range(0, len(lsm_file_list), 3)
if len(lsm_file_list[
nldas_index:nldas_index+3]) == 3]
if len(lsm_file_list) < num_cpus:
num_cpus = len(lsm_file_list)
# pylint: disable=no-member
mp_lock = multiprocessing.Manager().Lock()
partition_list, partition_index_list = \
partition(lsm_file_list, num_cpus)
for loop_index, cpu_grouped_file_list in enumerate(partition_list):
if cpu_grouped_file_list and partition_index_list[loop_index]:
job_combinations.append((
cpu_grouped_file_list,
partition_index_list[loop_index],
weight_table_file,
lsm_file_data['grid_type'],
master_rapid_runoff_file,
lsm_file_data['rapid_inflow_tool'],
mp_lock))
# # COMMENTED CODE IS FOR DEBUGGING
generate_inflows_from_runoff((
cpu_grouped_file_list,
partition_index_list[loop_index],
weight_table_file,
lsm_file_data['grid_type'],
master_rapid_runoff_file,
lsm_file_data['rapid_inflow_tool'],
mp_lock))
# pool = multiprocessing.Pool(num_cpus)
# pool.map(generate_inflows_from_runoff,
# job_combinations)
# pool.close()
# pool.join()
# set up RAPID manager
rapid_manager = RAPID(
rapid_executable_location=rapid_executable_location,
cygwin_bin_location=cygwin_bin_location,
num_processors=num_cpus,
mpiexec_command=mpiexec_command,
ZS_TauR=time_step,
ZS_dtR=15 * 60,
ZS_TauM=total_num_time_steps * time_step,
ZS_dtM=time_step)
if initial_flows_file and os.path.exists(initial_flows_file):
rapid_manager.update_parameters(
Qinit_file=initial_flows_file,
BS_opt_Qinit=True
)
# run RAPID for the watershed
lsm_rapid_output_file = \
os.path.join(master_watershed_output_directory,
'Qout_{0}'.format(out_file_ending))
rapid_manager.update_parameters(
rapid_connect_file=case_insensitive_file_search(
master_watershed_input_directory,
r'rapid_connect\.csv'),
Vlat_file=master_rapid_runoff_file,
riv_bas_id_file=case_insensitive_file_search(
master_watershed_input_directory,
r'riv_bas_id\.csv'),
k_file=case_insensitive_file_search(
master_watershed_input_directory,
r'k\.csv'),
x_file=case_insensitive_file_search(
master_watershed_input_directory,
r'x\.csv'),
Qout_file=lsm_rapid_output_file
)
rapid_manager.update_reach_number_data()
output_file_information[
os.path.basename(master_watershed_input_directory)] = {
'm3_riv': master_rapid_runoff_file,
'qout': lsm_rapid_output_file
}
if generate_rapid_namelist_file:
rapid_manager.generate_namelist_file(
os.path.join(master_watershed_input_directory,
"rapid_namelist_{}"
.format(out_file_ending[:-3])))
if run_rapid_simulation:
rapid_manager.run()
rapid_manager.make_output_cf_compliant(
simulation_start_datetime=actual_simulation_start_datetime,
comid_lat_lon_z_file=in_rivid_lat_lon_z_file,
project_name="{0} Based Historical flows by {1}"
.format(lsm_file_data['description'],
modeling_institution)
)
# generate return periods
if generate_return_periods_file and \
os.path.exists(lsm_rapid_output_file) and \
lsm_rapid_output_file:
return_periods_file = os.path.join(
master_watershed_output_directory,
'return_periods_{0}'.format(out_file_ending))
# assume storm has 3 day length
storm_length_days = 3
generate_return_periods(
qout_file=lsm_rapid_output_file,
return_period_file=return_periods_file,
num_cpus=num_cpus,
storm_duration_days=storm_length_days,
method=return_period_method)
# generate seasonal averages file
if generate_seasonal_averages_file and \
os.path.exists(lsm_rapid_output_file) and \
lsm_rapid_output_file:
seasonal_averages_file = os.path.join(
master_watershed_output_directory,
'seasonal_averages_{0}'.format(out_file_ending))
generate_seasonal_averages(lsm_rapid_output_file,
seasonal_averages_file,
num_cpus)
# generate seasonal initialization file
if generate_seasonal_initialization_file and \
os.path.exists(lsm_rapid_output_file) and \
lsm_rapid_output_file:
seasonal_qinit_file = os.path.join(
master_watershed_input_directory,
'seasonal_qinit_{0}.csv'.format(out_file_ending[:-3]))
rapid_manager.generate_seasonal_intitialization(
seasonal_qinit_file)
# generate initialization file
if generate_initialization_file and \
os.path.exists(lsm_rapid_output_file) and \
lsm_rapid_output_file:
qinit_file = os.path.join(
master_watershed_input_directory,
'qinit_{0}.csv'.format(out_file_ending[:-3]))
rapid_manager.generate_qinit_from_past_qout(qinit_file)
all_output_file_information.append(output_file_information)
# print info to user
time_end = datetime.utcnow()
print("Time Begin All: {0}".format(time_begin_all))
print("Time Finish All: {0}".format(time_end))
print("TOTAL TIME: {0}".format(time_end-time_begin_all))
return all_output_file_information
``` |
{
"source": "jlgzb/mmpose",
"score": 2
} |
#### File: models/backbones/tcn.py
```python
import copy
import torch.nn as nn
from mmcv.cnn import ConvModule, build_conv_layer, constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmpose.core import WeightNormClipHook
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class BasicTemporalBlock(nn.Module):
"""Basic block for VideoPose3D.
Args:
in_channels (int): Input channels of this block.
out_channels (int): Output channels of this block.
mid_channels (int): The output channels of conv1. Default: 1024.
kernel_size (int): Size of the convolving kernel. Default: 3.
dilation (int): Spacing between kernel elements. Default: 3.
dropout (float): Dropout rate. Default: 0.25.
causal (bool): Use causal convolutions instead of symmetric
convolutions (for real-time applications). Default: False.
residual (bool): Use residual connection. Default: True.
use_stride_conv (bool): Use optimized TCN that designed
specifically for single-frame batching, i.e. where batches have
input length = receptive field, and output length = 1. This
implementation replaces dilated convolutions with strided
convolutions to avoid generating unused intermediate results.
Default: False.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: dict(type='Conv1d').
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN1d').
"""
def __init__(self,
in_channels,
out_channels,
mid_channels=1024,
kernel_size=3,
dilation=3,
dropout=0.25,
causal=False,
residual=True,
use_stride_conv=False,
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d')):
# Protect mutable default arguments
conv_cfg = copy.deepcopy(conv_cfg)
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mid_channels = mid_channels
self.kernel_size = kernel_size
self.dilation = dilation
self.dropout = dropout
self.causal = causal
self.residual = residual
self.use_stride_conv = use_stride_conv
self.pad = (kernel_size - 1) * dilation // 2
if use_stride_conv:
self.stride = kernel_size
self.causal_shift = kernel_size // 2 if causal else 0
self.dilation = 1
else:
self.stride = 1
self.causal_shift = kernel_size // 2 * dilation if causal else 0
self.conv1 = nn.Sequential(
ConvModule(
in_channels,
mid_channels,
kernel_size=kernel_size,
stride=self.stride,
dilation=self.dilation,
bias='auto',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
self.conv2 = nn.Sequential(
ConvModule(
mid_channels,
out_channels,
kernel_size=1,
bias='auto',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
if residual and in_channels != out_channels:
self.short_cut = build_conv_layer(conv_cfg, in_channels,
out_channels, 1)
else:
self.short_cut = None
self.dropout = nn.Dropout(dropout) if dropout > 0 else None
def forward(self, x):
"""Forward function."""
if self.use_stride_conv:
assert self.causal_shift + self.kernel_size // 2 < x.shape[2]
else:
assert 0 <= self.pad + self.causal_shift < x.shape[2] - \
self.pad + self.causal_shift <= x.shape[2]
out = self.conv1(x)
if self.dropout is not None:
out = self.dropout(out)
out = self.conv2(out)
if self.dropout is not None:
out = self.dropout(out)
if self.residual:
if self.use_stride_conv:
res = x[:, :, self.causal_shift +
self.kernel_size // 2::self.kernel_size]
else:
res = x[:, :,
(self.pad + self.causal_shift):(x.shape[2] - self.pad +
self.causal_shift)]
if self.short_cut is not None:
res = self.short_cut(res)
out = out + res
return out
@BACKBONES.register_module()
class TCN(BaseBackbone):
"""TCN backbone.
Temporal Convolutional Networks.
More details can be found in the
`paper <https://arxiv.org/abs/1811.11742>`__ .
Args:
in_channels (int): Number of input channels, which equals to
num_keypoints * num_features.
stem_channels (int): Number of feature channels. Default: 1024.
num_blocks (int): NUmber of basic temporal convolutional blocks.
Default: 2.
kernel_sizes (Sequence[int]): Sizes of the convolving kernel of
each basic block. Default: ``(3, 3, 3)``.
dropout (float): Dropout rate. Default: 0.25.
causal (bool): Use causal convolutions instead of symmetric
convolutions (for real-time applications).
Default: False.
residual (bool): Use residual connection. Default: True.
use_stride_conv (bool): Use TCN backbone optimized for
single-frame batching, i.e. where batches have input length =
receptive field, and output length = 1. This implementation
replaces dilated convolutions with strided convolutions to avoid
generating unused intermediate results. The weights are
interchangeable with the reference implementation. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: dict(type='Conv1d').
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN1d').
max_norm (float|None): if not None, the weight of convolution layers
will be clipped to have a maximum norm of max_norm.
Example:
>>> from mmpose.models import TCN
>>> import torch
>>> self = TCN(in_channels=34)
>>> self.eval()
>>> inputs = torch.rand(1, 34, 243)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 235)
(1, 1024, 217)
"""
def __init__(self,
in_channels,
stem_channels=1024,
num_blocks=2,
kernel_sizes=(3, 3, 3),
dropout=0.25,
causal=False,
residual=True,
use_stride_conv=False,
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
max_norm=None):
# Protect mutable default arguments
conv_cfg = copy.deepcopy(conv_cfg)
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__()
self.in_channels = in_channels
self.stem_channels = stem_channels
self.num_blocks = num_blocks
self.kernel_sizes = kernel_sizes
self.dropout = dropout
self.causal = causal
self.residual = residual
self.use_stride_conv = use_stride_conv
self.max_norm = max_norm
assert num_blocks == len(kernel_sizes) - 1
for ks in kernel_sizes:
assert ks % 2 == 1, 'Only odd filter widths are supported.'
self.expand_conv = ConvModule(
in_channels,
stem_channels,
kernel_size=kernel_sizes[0],
stride=kernel_sizes[0] if use_stride_conv else 1,
bias='auto',
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
dilation = kernel_sizes[0]
self.tcn_blocks = nn.ModuleList()
for i in range(1, num_blocks + 1):
self.tcn_blocks.append(
BasicTemporalBlock(
in_channels=stem_channels,
out_channels=stem_channels,
mid_channels=stem_channels,
kernel_size=kernel_sizes[i],
dilation=dilation,
dropout=dropout,
causal=causal,
residual=residual,
use_stride_conv=use_stride_conv,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
dilation *= kernel_sizes[i]
if self.max_norm is not None:
# Apply weight norm clip to conv layers
weight_clip = WeightNormClipHook(self.max_norm)
for module in self.modules():
if isinstance(module, nn.modules.conv._ConvNd):
weight_clip.register(module)
self.dropout = nn.Dropout(dropout) if dropout > 0 else None
def forward(self, x):
"""Forward function."""
x = self.expand_conv(x)
if self.dropout is not None:
x = self.dropout(x)
outs = []
for i in range(self.num_blocks):
x = self.tcn_blocks[i](x)
outs.append(x)
return tuple(outs)
def init_weights(self, pretrained=None):
"""Initialize the weights."""
super().init_weights(pretrained)
if pretrained is None:
for m in self.modules():
if isinstance(m, nn.modules.conv._ConvNd):
kaiming_init(m, mode='fan_in', nonlinearity='relu')
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
```
#### File: tests/test_backward_compatibility/test_eval_hook_compatibility.py
```python
import unittest.mock as mock
import torch
from torch.utils.data import DataLoader, Dataset
from mmpose.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, res_folder=None, logger=None):
pass
def test_old_fashion_eval_hook_parameters():
data_loader = DataLoader(
ExampleDataset(),
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
# test argument "key_indicator"
_ = EvalHook(data_loader, key_indicator='AP')
_ = DistEvalHook(data_loader, key_indicator='AP')
# test argument "gpu_collect"
_ = EvalHook(data_loader, save_best='AP', gpu_collect=False)
```
#### File: tests/test_evaluation/test_bottom_up_eval.py
```python
import copy
import numpy as np
import torch
from mmpose.core import (aggregate_results, get_group_preds,
get_multi_stage_outputs)
def test_get_multi_stage_outputs():
fake_outputs = [torch.zeros((1, 4, 2, 2))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2))]
# outputs_flip
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=4, with_heatmaps=[False],
with_ae=[True])
assert heatmaps == []
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True],
with_ae=[True])
assert len(heatmaps) == 1
flip_index = [1, 0]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
tag_per_joint=False,
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True],
with_ae=[True], flip_index=flip_index)
assert len(heatmaps) == 2
# with heatmaps & with ae
fake_outputs = [torch.zeros((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
fake_flip_outputs = [torch.ones((1, 4, 2, 2)), torch.ones((1, 2, 4, 4))]
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, True])
assert torch.allclose(heatmaps[0], torch.tensor(0.))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False])
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, False],
with_ae=[True, False], flip_index=flip_index)
assert torch.allclose(heatmaps[0], torch.tensor(0.))
# size_projected
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=None,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
size_projected=(8, 8))
assert heatmaps[0].shape == torch.Size([1, 2, 8, 8])
outputs, heatmaps, tags = \
get_multi_stage_outputs(outputs=copy.deepcopy(fake_outputs),
outputs_flip=fake_flip_outputs,
num_joints=2, with_heatmaps=[True, True],
with_ae=[True, False],
align_corners=True)
assert torch.allclose(heatmaps[0], torch.tensor(0.5))
def test_aggregate_results():
fake_heatmaps = [torch.zeros((1, 2, 2, 2))]
fake_tags = [torch.zeros((1, 2, 2, 2))]
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=None, tags_list=[],
heatmaps=fake_heatmaps, tags=fake_tags,
test_scale_factor=[1], project2image=True,
flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(0.))
fake_aggr_heatmaps = torch.ones(1, 2, 2, 2)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=True, flip_test=False,
align_corners=True)
assert torch.allclose(aggregated_heatmaps, torch.tensor(1.))
fake_heatmaps = [torch.zeros((1, 2, 2, 2)), torch.ones((1, 2, 2, 2))]
fake_aggr_heatmaps = torch.ones(1, 2, 4, 4)
aggregated_heatmaps, tags_list = \
aggregate_results(scale=1, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
aggregated_heatmaps, tags_list = \
aggregate_results(scale=2, aggregated_heatmaps=fake_aggr_heatmaps,
tags_list=[], heatmaps=fake_heatmaps,
tags=fake_tags, test_scale_factor=[1, 2],
project2image=False, flip_test=True)
assert aggregated_heatmaps.shape == torch.Size((1, 2, 4, 4))
def test_get_group_preds():
fake_grouped_joints = [np.array([[[0, 0], [1, 1]]])]
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]))
assert not results == []
results = get_group_preds(
fake_grouped_joints,
center=np.array([0, 0]),
scale=np.array([1, 1]),
heatmap_size=np.array([2, 2]),
use_udp=True)
assert not results == []
```
#### File: mmpose/tests/test_post_processing.py
```python
import numpy as np
from numpy.testing import assert_array_almost_equal
from mmpose.core import (affine_transform, flip_back, fliplr_joints,
fliplr_regression, get_affine_transform, rotate_point,
transform_preds)
def test_affine_transform():
pt = np.array([0, 1])
trans = np.array([[1, 0, 1], [0, 1, 0]])
ans = affine_transform(pt, trans)
assert_array_almost_equal(ans, np.array([1, 1]), decimal=4)
assert isinstance(ans, np.ndarray)
def test_rotate_point():
src_point = np.array([0, 1])
rot_rad = np.pi / 2.
ans = rotate_point(src_point, rot_rad)
assert_array_almost_equal(ans, np.array([-1, 0]), decimal=4)
assert isinstance(ans, list)
def test_fliplr_joints():
joints = np.array([[0, 0, 0], [1, 1, 0]])
joints_vis = np.array([[1], [1]])
joints_flip, _ = fliplr_joints(joints, joints_vis, 5, [[0, 1]])
res = np.array([[3, 1, 0], [4, 0, 0]])
assert_array_almost_equal(joints_flip, res)
def test_flip_back():
heatmaps = np.random.random([1, 2, 32, 32])
flipped_heatmaps = flip_back(heatmaps, [[0, 1]])
heatmaps_new = flip_back(flipped_heatmaps, [[0, 1]])
assert_array_almost_equal(heatmaps, heatmaps_new)
heatmaps = np.random.random([1, 2, 32, 32])
flipped_heatmaps = flip_back(heatmaps, [[0, 1]])
heatmaps_new = flipped_heatmaps[..., ::-1]
assert_array_almost_equal(heatmaps[:, 0], heatmaps_new[:, 1])
assert_array_almost_equal(heatmaps[:, 1], heatmaps_new[:, 0])
ori_heatmaps = heatmaps.copy()
# test in-place flip
heatmaps = heatmaps[:, :, :, ::-1]
assert_array_almost_equal(ori_heatmaps[:, :, :, ::-1], heatmaps)
def test_transform_preds():
coords = np.random.random([2, 2])
center = np.array([50, 50])
scale = np.array([100 / 200.0, 100 / 200.0])
size = np.array([100, 100])
ans = transform_preds(coords, center, scale, size)
assert_array_almost_equal(coords, ans)
coords = np.random.random([2, 2])
center = np.array([50, 50])
scale = np.array([100 / 200.0, 100 / 200.0])
size = np.array([101, 101])
ans = transform_preds(coords, center, scale, size, use_udp=True)
assert_array_almost_equal(coords, ans)
def test_get_affine_transform():
center = np.array([50, 50])
scale = np.array([100 / 200.0, 100 / 200.0])
size = np.array([100, 100])
ans = get_affine_transform(center, scale, 0, size)
trans = np.array([[1, 0, 0], [0, 1, 0]])
assert_array_almost_equal(trans, ans)
def test_flip_regression():
coords = np.random.rand(3, 3)
flip_pairs = [[1, 2]]
root = coords[:1]
coords_flipped = coords.copy()
coords_flipped[1] = coords[2]
coords_flipped[2] = coords[1]
coords_flipped[..., 0] = 2 * root[..., 0] - coords_flipped[..., 0]
# static mode
res_static = fliplr_regression(
coords, flip_pairs, center_mode='static', center_x=root[0, 0])
assert_array_almost_equal(res_static, coords_flipped)
# root mode
res_root = fliplr_regression(
coords, flip_pairs, center_mode='root', center_index=0)
assert_array_almost_equal(res_root, coords_flipped)
```
#### File: tools/dataset/preprocess_h36m.py
```python
import argparse
import os
import pickle
import tarfile
import xml.etree.ElementTree as ET
from os.path import join
import cv2
import numpy as np
from spacepy import pycdf
class PreprocessH36m:
"""Preprocess Human3.6M dataset.
Args:
metadata (str): Path to metadata.xml.
original_dir (str): Directory of the original dataset with all files
compressed. Specifically, .tgz files belonging to subject 1
should be placed under the subdirectory 's1'.
extracted_dir (str): Directory of the extracted files. If not given, it
will be placed under the same parent directory as original_dir.
processed_der (str): Directory of the processed files. If not given, it
will be placed under the same parent directory as original_dir.
sample_rate (int): Downsample FPS to `1 / sample_rate`. Default: 5.
"""
def __init__(self,
metadata,
original_dir,
extracted_dir=None,
processed_dir=None,
sample_rate=5):
self.metadata = metadata
self.original_dir = original_dir
self.sample_rate = sample_rate
if extracted_dir is None:
self.extracted_dir = join(
os.path.dirname(os.path.abspath(self.original_dir)),
'extracted')
else:
self.extracted_dir = extracted_dir
if processed_dir is None:
self.processed_dir = join(
os.path.dirname(os.path.abspath(self.original_dir)),
'processed')
else:
self.processed_dir = processed_dir
self.subjects = []
self.sequence_mappings = {}
self.action_names = {}
self.camera_ids = []
self._load_metadata()
self.subjects_annot = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
self.subjects_splits = {
'train': ['S1', 'S5', 'S6', 'S7', 'S8'],
'test': ['S9', 'S11']
}
self.extract_files = ['Videos', 'D2_Positions', 'D3_Positions_mono']
self.movable_joints = [
0, 1, 2, 3, 6, 7, 8, 12, 13, 14, 15, 17, 18, 19, 25, 26, 27
]
self.scale_factor = 1.2
self.image_sizes = {
'54138969': {
'width': 1000,
'height': 1002
},
'55011271': {
'width': 1000,
'height': 1000
},
'58860488': {
'width': 1000,
'height': 1000
},
'60457274': {
'width': 1000,
'height': 1002
}
}
def extract_tgz(self):
"""Extract files from self.extrct_files."""
os.makedirs(self.extracted_dir, exist_ok=True)
for subject in self.subjects_annot:
cur_dir = join(self.original_dir, subject.lower())
for file in self.extract_files:
filename = join(cur_dir, file + '.tgz')
print(f'Extracting {filename} ...')
with tarfile.open(filename) as tar:
tar.extractall(self.extracted_dir)
print('Extraction done.\n')
def generate_cameras_file(self):
"""Generate cameras.pkl which contains camera parameters for 11
subjects each with 4 cameras."""
cameras = {}
for subject in range(1, 12):
for camera in range(4):
key = (f'S{subject}', self.camera_ids[camera])
cameras[key] = self._get_camera_params(camera, subject)
out_file = join(self.processed_dir, 'annotation_body3d', 'cameras.pkl')
with open(out_file, 'wb') as fout:
pickle.dump(cameras, fout)
print(f'Camera parameters have been written to "{out_file}".\n')
def generate_annotations(self):
"""Generate annotations for training and testing data."""
output_dir = join(self.processed_dir, 'annotation_body3d',
f'fps{50 // self.sample_rate}')
os.makedirs(output_dir, exist_ok=True)
for data_split in ('train', 'test'):
imgnames_all = []
centers_all = []
scales_all = []
kps2d_all = []
kps3d_all = []
for subject in self.subjects_splits[data_split]:
for action, subaction in self.sequence_mappings[subject].keys(
):
if action == '1':
# exclude action "_ALL"
continue
for camera in self.camera_ids:
imgnames, centers, scales, kps2d, kps3d\
= self._load_annotations(
subject, action, subaction, camera)
imgnames_all.append(imgnames)
centers_all.append(centers)
scales_all.append(scales)
kps2d_all.append(kps2d)
kps3d_all.append(kps3d)
imgnames_all = np.concatenate(imgnames_all)
centers_all = np.concatenate(centers_all)
scales_all = np.concatenate(scales_all)
kps2d_all = np.concatenate(kps2d_all)
kps3d_all = np.concatenate(kps3d_all)
out_file = join(output_dir, f'h36m_{data_split}.npz')
np.savez(
out_file,
imgname=imgnames_all,
center=centers_all,
scale=scales_all,
part=kps2d_all,
S=kps3d_all)
print(
f'All annotations of {data_split}ing data have been written to'
f' "{out_file}". {len(imgnames_all)} samples in total.\n')
if data_split == 'train':
kps_3d_all = kps3d_all[..., :3] # remove visibility
mean_3d, std_3d = self._get_pose_stats(kps_3d_all)
kps_2d_all = kps2d_all[..., :2] # remove visibility
mean_2d, std_2d = self._get_pose_stats(kps_2d_all)
# centered around root
# the root keypoint is 0-index
kps_3d_rel = kps_3d_all[..., 1:, :] - kps_3d_all[..., :1, :]
mean_3d_rel, std_3d_rel = self._get_pose_stats(kps_3d_rel)
kps_2d_rel = kps_2d_all[..., 1:, :] - kps_2d_all[..., :1, :]
mean_2d_rel, std_2d_rel = self._get_pose_stats(kps_2d_rel)
stats = {
'joint3d_stats': {
'mean': mean_3d,
'std': std_3d
},
'joint2d_stats': {
'mean': mean_2d,
'std': std_2d
},
'joint3d_rel_stats': {
'mean': mean_3d_rel,
'std': std_3d_rel
},
'joint2d_rel_stats': {
'mean': mean_2d_rel,
'std': std_2d_rel
}
}
for name, stat_dict in stats.items():
out_file = join(output_dir, f'{name}.pkl')
with open(out_file, 'wb') as f:
pickle.dump(stat_dict, f)
print(f'Create statistic data file: {out_file}')
@staticmethod
def _get_pose_stats(kps):
"""Get statistic information `mean` and `std` of pose data.
Args:
kps (ndarray): keypoints in shape [..., K, C] where K and C is
the keypoint category number and dimension.
Returns:
mean (ndarray): [K, C]
"""
assert kps.ndim > 2
K, C = kps.shape[-2:]
kps = kps.reshape(-1, K, C)
mean = kps.mean(axis=0)
std = kps.std(axis=0)
return mean, std
def _load_metadata(self):
"""Load meta data from metadata.xml."""
assert os.path.exists(self.metadata)
tree = ET.parse(self.metadata)
root = tree.getroot()
for i, tr in enumerate(root.find('mapping')):
if i == 0:
_, _, *self.subjects = [td.text for td in tr]
self.sequence_mappings \
= {subject: {} for subject in self.subjects}
elif i < 33:
action_id, subaction_id, *prefixes = [td.text for td in tr]
for subject, prefix in zip(self.subjects, prefixes):
self.sequence_mappings[subject][(action_id, subaction_id)]\
= prefix
for i, elem in enumerate(root.find('actionnames')):
action_id = str(i + 1)
self.action_names[action_id] = elem.text
self.camera_ids \
= [elem.text for elem in root.find('dbcameras/index2id')]
w0 = root.find('w0')
self.cameras_raw = [float(num) for num in w0.text[1:-1].split()]
def _get_base_filename(self, subject, action, subaction, camera):
"""Get base filename given subject, action, subaction and camera."""
return f'{self.sequence_mappings[subject][(action, subaction)]}' + \
f'.{camera}'
def _get_camera_params(self, camera, subject):
"""Get camera parameters given camera id and subject id."""
metadata_slice = np.zeros(15)
start = 6 * (camera * 11 + (subject - 1))
metadata_slice[:6] = self.cameras_raw[start:start + 6]
metadata_slice[6:] = self.cameras_raw[265 + camera * 9 - 1:265 +
(camera + 1) * 9 - 1]
# extrinsics
x, y, z = -metadata_slice[0], metadata_slice[1], -metadata_slice[2]
R_x = np.array([[1, 0, 0], [0, np.cos(x), np.sin(x)],
[0, -np.sin(x), np.cos(x)]])
R_y = np.array([[np.cos(y), 0, np.sin(y)], [0, 1, 0],
[-np.sin(y), 0, np.cos(y)]])
R_z = np.array([[np.cos(z), np.sin(z), 0], [-np.sin(z),
np.cos(z), 0], [0, 0, 1]])
R = (R_x @ R_y @ R_z).T
T = metadata_slice[3:6].reshape(-1, 1)
# convert unit from milimeter to meter
T *= 0.001
# intrinsics
c = metadata_slice[8:10, None]
f = metadata_slice[6:8, None]
# distortion
k = metadata_slice[10:13, None]
p = metadata_slice[13:15, None]
return {
'R': R,
'T': T,
'c': c,
'f': f,
'k': k,
'p': p,
'w': self.image_sizes[self.camera_ids[camera]]['width'],
'h': self.image_sizes[self.camera_ids[camera]]['height'],
'name': f'camera{camera + 1}',
'id': self.camera_ids[camera]
}
def _load_annotations(self, subject, action, subaction, camera):
"""Load annotations for a sequence."""
subj_dir = join(self.extracted_dir, subject)
basename = self._get_base_filename(subject, action, subaction, camera)
# load 2D keypoints
with pycdf.CDF(
join(subj_dir, 'MyPoseFeatures', 'D2_Positions',
basename + '.cdf')) as cdf:
kps_2d = np.array(cdf['Pose'])
num_frames = kps_2d.shape[1]
kps_2d = kps_2d.reshape((num_frames, 32, 2))[::self.sample_rate,
self.movable_joints]
kps_2d = np.concatenate([kps_2d, np.ones((len(kps_2d), 17, 1))],
axis=2)
# load 3D keypoints
with pycdf.CDF(
join(subj_dir, 'MyPoseFeatures', 'D3_Positions_mono',
basename + '.cdf')) as cdf:
kps_3d = np.array(cdf['Pose'])
kps_3d = kps_3d.reshape(
(num_frames, 32, 3))[::self.sample_rate,
self.movable_joints] / 1000.
kps_3d = np.concatenate([kps_3d, np.ones((len(kps_3d), 17, 1))],
axis=2)
# calculate bounding boxes
bboxes = np.stack([
np.min(kps_2d[:, :, 0], axis=1),
np.min(kps_2d[:, :, 1], axis=1),
np.max(kps_2d[:, :, 0], axis=1),
np.max(kps_2d[:, :, 1], axis=1)
],
axis=1)
centers = np.stack([(bboxes[:, 0] + bboxes[:, 2]) / 2,
(bboxes[:, 1] + bboxes[:, 3]) / 2],
axis=1)
scales = self.scale_factor * np.max(
bboxes[:, 2:] - bboxes[:, :2], axis=1) / 200
# extract frames and save imgnames
imgnames = []
video_path = join(subj_dir, 'Videos', basename + '.mp4')
sub_base = subject + '_' + basename.replace(' ', '_')
img_dir = join(self.processed_dir, 'images', subject, sub_base)
os.makedirs(img_dir, exist_ok=True)
prefix = join(subject, sub_base, sub_base)
cap = cv2.VideoCapture(video_path)
i = 0
while True:
success, img = cap.read()
if not success:
break
if i % self.sample_rate == 0:
imgname = f'{prefix}_{i + 1:06d}.jpg'
imgnames.append(imgname)
dest_path = join(self.processed_dir, 'images', imgname)
if not os.path.exists(dest_path):
cv2.imwrite(dest_path, img)
if len(imgnames) == len(centers):
break
i += 1
cap.release()
imgnames = np.array(imgnames)
print(f'Annoatations for sequence "{subject} {basename}" are loaded. '
f'{len(imgnames)} samples in total.')
return imgnames, centers, scales, kps_2d, kps_3d
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--metadata', type=str, required=True, help='Path to metadata.xml')
parser.add_argument(
'--original',
type=str,
required=True,
help='Directory of the original dataset with all files compressed. '
'Specifically, .tgz files belonging to subject 1 should be placed '
'under the subdirectory \"s1\".')
parser.add_argument(
'--extracted',
type=str,
default=None,
help='Directory of the extracted files. If not given, it will be '
'placed under the same parent directory as original_dir.')
parser.add_argument(
'--processed',
type=str,
default=None,
help='Directory of the processed files. If not given, it will be '
'placed under the same parent directory as original_dir.')
parser.add_argument(
'--sample_rate',
type=int,
default=5,
help='Downsample FPS to `1 / sample_rate`. Default: 5.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
h36m = PreprocessH36m(
metadata=args.metadata,
original_dir=args.original,
extracted_dir=args.extracted,
processed_dir=args.processed,
sample_rate=args.sample_rate)
h36m.extract_tgz()
h36m.generate_cameras_file()
h36m.generate_annotations()
``` |
{
"source": "jlhall/dotfiles",
"score": 2
} |
#### File: workflows/user.workflow.F455B544-D4E3-402B-B987-8D3EA582A111/alfred-wunderlist-workflow.py
```python
import logging
from logging.config import fileConfig
import sys
fileConfig('logging_config.ini')
from wunderlist.handlers.route import route
from wunderlist.util import workflow
log = logging.getLogger('wunderlist')
def main(wf):
route(wf.args)
log.info('Workflow response complete')
if __name__ == '__main__':
wf = workflow()
sys.exit(wf.run(main, text_errors='--commit' in wf.args))
```
#### File: wunderlist/handlers/search.py
```python
import re
from peewee import fn, OperationalError
from workflow import MATCH_ALL, MATCH_ALLCHARS
from wunderlist import icons
from wunderlist.models.list import List
from wunderlist.models.preferences import Preferences
from wunderlist.models.task import Task
from wunderlist.sync import background_sync
from wunderlist.util import workflow
_hashtag_prompt_pattern = re.compile(r'#\S*$', re.UNICODE)
def filter(args):
query = ' '.join(args[1:])
wf = workflow()
prefs = Preferences.current_prefs()
matching_hashtags = []
if not query:
wf.add_item('Begin typing to search tasks', '', icon=icons.SEARCH)
hashtag_match = re.search(_hashtag_prompt_pattern, query)
if hashtag_match:
from wunderlist.models.hashtag import Hashtag
hashtag_prompt = hashtag_match.group().lower()
hashtags = Hashtag.select().where(Hashtag.id.contains(hashtag_prompt)).order_by(fn.Lower(Hashtag.tag).asc())
for hashtag in hashtags:
# If there is an exact match, do not show hashtags
if hashtag.id == hashtag_prompt:
matching_hashtags = []
break
matching_hashtags.append(hashtag)
# Show hashtag prompt if there is more than one matching hashtag or the
# hashtag being typed does not exactly match the single matching hashtag
if len(matching_hashtags) > 0:
for hashtag in matching_hashtags:
wf.add_item(hashtag.tag[1:], '', autocomplete=u'-search %s%s ' % (query[:hashtag_match.start()], hashtag.tag), icon=icons.HASHTAG)
else:
conditions = True
lists = workflow().stored_data('lists')
matching_lists = None
query = ' '.join(args[1:]).strip()
list_query = None
# Show all lists on the main search screen
if not query:
matching_lists = lists
# Filter lists when colon is used
if ':' in query:
matching_lists = lists
components = re.split(r':\s*', query, 1)
list_query = components[0]
if list_query:
matching_lists = workflow().filter(
list_query,
lists if lists else [],
lambda l: l['title'],
# Ignore MATCH_ALLCHARS which is expensive and inaccurate
match_on=MATCH_ALL ^ MATCH_ALLCHARS
)
# If no matching list search against all tasks
if matching_lists:
query = components[1] if len(components) > 1 else ''
# If there is a list exactly matching the query ignore
# anything else. This takes care of lists that are substrings
# of other lists
if len(matching_lists) > 1:
for l in matching_lists:
if l['title'].lower() == list_query.lower():
matching_lists = [l]
break
if matching_lists:
if not list_query:
wf.add_item('Browse by hashtag', autocomplete='-search #', icon=icons.HASHTAG)
if len(matching_lists) > 1:
for l in matching_lists:
icon = icons.INBOX if l['list_type'] == 'inbox' else icons.LIST
wf.add_item(l['title'], autocomplete='-search %s: ' % l['title'], icon=icon)
else:
conditions = conditions & (Task.list == matching_lists[0]['id'])
if not matching_lists or len(matching_lists) <= 1:
for arg in query.split(' '):
if len(arg) > 1:
conditions = conditions & (Task.title.contains(arg) | List.title.contains(arg))
if conditions:
if not prefs.show_completed_tasks:
conditions = Task.completed_at.is_null() & conditions
tasks = Task.select().where(Task.list.is_null(False) & conditions)
# Default Wunderlist sort order reversed to show newest first
tasks = tasks.join(List).order_by(Task.order.desc(), List.order.asc())
# Avoid excessive results
tasks = tasks.limit(50)
try:
for t in tasks:
wf.add_item(u'%s – %s' % (t.list_title, t.title), t.subtitle(), autocomplete='-task %s ' % t.id, icon=icons.TASK_COMPLETED if t.completed else icons.TASK)
except OperationalError:
background_sync()
if prefs.show_completed_tasks:
wf.add_item('Hide completed tasks', arg='-pref show_completed_tasks --alfred %s' % ' '.join(args), valid=True, icon=icons.HIDDEN)
else:
wf.add_item('Show completed tasks', arg='-pref show_completed_tasks --alfred %s' % ' '.join(args), valid=True, icon=icons.VISIBLE)
wf.add_item('New search', autocomplete='-search ', icon=icons.CANCEL)
wf.add_item('Main menu', autocomplete='', icon=icons.BACK)
# Make sure tasks are up-to-date while searching
background_sync()
def commit(args, modifier=None):
action = args[1]
```
#### File: wunderlist/models/list.py
```python
import logging
import time
from peewee import (BooleanField, CharField, IntegerField, PeeweeException,
PrimaryKeyField, TextField)
from wunderlist.models.fields import DateTimeUTCField
from wunderlist.models.base import BaseModel
from wunderlist.util import workflow, NullHandler
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class List(BaseModel):
id = PrimaryKeyField()
title = TextField(index=True)
list_type = CharField()
public = BooleanField()
completed_count = IntegerField(default=0)
uncompleted_count = IntegerField(default=0)
order = IntegerField(index=True)
revision = IntegerField()
created_at = DateTimeUTCField()
@classmethod
def sync(cls):
from wunderlist.api import lists
start = time.time()
lists_data = lists.lists()
instances = []
log.info('Retrieved all %d lists in %s', len(lists_data), time.time() - start)
start = time.time()
workflow().store_data('lists', lists_data)
try:
instances = cls.select(cls.id, cls.revision, cls.title)
except PeeweeException:
pass
log.info('Loaded all %d lists from the database in %s', len(instances), time.time() - start)
return cls._perform_updates(instances, lists_data)
@classmethod
def _populate_api_extras(cls, info):
from wunderlist.api.lists import update_list_with_tasks_count
update_list_with_tasks_count(info)
return info
def __str__(self):
return u'<%s %d %s>' % (type(self).__name__, self.id, self.title)
def _sync_children(self):
from wunderlist.models.task import Task
Task.sync_tasks_in_list(self)
class Meta:
order_by = ('order', 'id')
has_children = True
```
#### File: wunderlist/models/reminder.py
```python
import logging
import time
from peewee import (ForeignKeyField, IntegerField, PeeweeException,
PrimaryKeyField)
from wunderlist.models.fields import DateTimeUTCField
from wunderlist.models.base import BaseModel
from wunderlist.models.task import Task
from wunderlist.util import NullHandler
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class Reminder(BaseModel):
id = PrimaryKeyField()
task = ForeignKeyField(Task, null=True, related_name='reminders')
date = DateTimeUTCField()
revision = IntegerField()
created_at = DateTimeUTCField()
@classmethod
def sync(cls):
from wunderlist.api import reminders
start = time.time()
instances = []
reminders_data = reminders.reminders()
log.info('Retrieved all %d reminders in %s', len(reminders_data), time.time() - start)
start = time.time()
try:
instances = cls.select(cls.id, cls.revision)
except PeeweeException:
pass
log.info('Loaded all %d reminders from the database in %s', len(instances), time.time() - start)
return cls._perform_updates(instances, reminders_data)
```
#### File: wunderlist/models/root.py
```python
import logging
import time
from peewee import ForeignKeyField, IntegerField, PrimaryKeyField
from workflow.notify import notify
from wunderlist.models.base import BaseModel
from wunderlist.models.list import List
from wunderlist.models.user import User
from wunderlist.util import NullHandler
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
class Root(BaseModel):
id = PrimaryKeyField()
user = ForeignKeyField(User, null=True)
revision = IntegerField()
@classmethod
def sync(cls, background=False):
from wunderlist.api import root
start = time.time()
instance = None
root_data = root.root()
log.info('Retrieved Root revision in %s', time.time() - start)
try:
instance = cls.get()
except Root.DoesNotExist:
pass
if not background and (not instance or
instance.revision != root_data['revision']):
notify('Please wait...', 'The workflow is making sure your tasks are up-to-date')
return cls._perform_updates([instance], [root_data])
def _sync_children(self):
from wunderlist.models.hashtag import Hashtag
from wunderlist.models.preferences import Preferences
from wunderlist.models.reminder import Reminder
start = time.time()
user_revised = User.sync()
log.info('Synced user in %s', time.time() - start)
start = time.time()
lists_revised = List.sync()
log.info('Synced lists and tasks in %s', time.time() - start)
start = time.time()
# Changes to reminders or settings increment the User revision
if user_revised:
Preferences.sync()
log.info('Synced preferences in %s', time.time() - start)
start = time.time()
Reminder.sync()
log.info('Synced reminders in %s', time.time() - start)
start = time.time()
# Changes in lists or tasks require hashtags to be updated
if lists_revised:
Hashtag.sync()
log.info('Synced hashtags in %s', time.time() - start)
def __str__(self):
return '<%s>' % (type(self).__name__)
class Meta(object):
expect_revisions = True
has_children = True
```
#### File: wunderlist/models/task.py
```python
from datetime import date
import logging
import time
from peewee import (BooleanField, CharField, DateField, ForeignKeyField,
IntegerField, PeeweeException, PrimaryKeyField, TextField,
JOIN)
from wunderlist.models.fields import DateTimeUTCField
from wunderlist.models.base import BaseModel
from wunderlist.models.list import List
from wunderlist.models.user import User
from wunderlist.util import short_relative_formatted_date, NullHandler
log = logging.getLogger(__name__)
log.addHandler(NullHandler())
_days_by_recurrence_type = {
'day': 1,
'week': 7,
'month': 30.43,
'year': 365
}
_star = u'★'
_overdue_1x = u'⚠️'
_overdue_2x = u'❗️'
_recurrence = u'↻'
_reminder = u'⏰'
class Task(BaseModel):
id = PrimaryKeyField()
list = ForeignKeyField(List, null=True, related_name='tasks')
task = ForeignKeyField('self', null=True, related_name='subtasks')
title = TextField(index=True)
completed_at = DateTimeUTCField(null=True)
completed_by = ForeignKeyField(User, related_name='completed_tasks', null=True)
starred = BooleanField(index=True, null=True)
due_date = DateField(index=True, null=True)
recurrence_type = CharField(null=True)
recurrence_count = IntegerField(null=True)
assignee = ForeignKeyField(User, related_name='assigned_tasks', null=True)
order = IntegerField(index=True, null=True)
revision = IntegerField()
created_at = DateTimeUTCField()
created_by = ForeignKeyField(User, related_name='created_tasks', null=True)
@classmethod
def sync_tasks_in_list(cls, list):
from wunderlist.api import tasks
from concurrent import futures
start = time.time()
instances = []
tasks_data = []
position_by_task_id = {}
with futures.ThreadPoolExecutor(max_workers=4) as executor:
positions_job = executor.submit(tasks.task_positions, list.id)
jobs = (
executor.submit(tasks.tasks, list.id, completed=False),
executor.submit(tasks.tasks, list.id, completed=True),
executor.submit(tasks.tasks, list.id, subtasks=True)
)
for job in futures.as_completed(jobs):
tasks_data += job.result()
position_by_task_id = dict((id, index) for (id, index) in enumerate(positions_job.result()))
log.info('Retrieved all %d tasks for %s in %s', len(tasks_data), list, time.time() - start)
start = time.time()
def task_order(task):
task['order'] = position_by_task_id.get(task['id'])
return task['order'] or 1e99
tasks_data.sort(key=task_order)
try:
# Include all tasks thought to be in the list, plus any additional
# tasks referenced in the data (task may have been moved to a different list)
ParentTask = cls.alias()
task_ids = [task['id'] for task in tasks_data]
instances = cls.select(cls.id, cls.title, cls.revision)\
.join(ParentTask, JOIN.LEFT_OUTER)\
.where(
(ParentTask.list == list.id) |
(cls.list == list.id) |
(cls.id.in_(task_ids)) |
(cls.task.in_(task_ids))
)
except PeeweeException:
pass
log.info('Loaded all %d tasks for %s from the database in %s', len(instances), list, time.time() - start)
start = time.time()
cls._perform_updates(instances, tasks_data)
log.info('Completed updates to tasks in %s in %s', list, time.time() - start)
return None
@classmethod
def due_today(cls):
return (
cls.select(cls, List)
.join(List)
.where(cls.completed_at >> None)
.where(cls.due_date <= date.today())
.order_by(List.order.asc(), cls.due_date.asc())
)
@classmethod
def search(cls, query):
return (
cls.select(cls, List)
.join(List)
.where(cls.completed_at >> None)
.where(cls.title.contains(query))
.order_by(List.order.asc(), cls.due_date.asc())
)
@property
def reminder_date_local(self):
# For related property Task.reminders
import wunderlist.models.reminder
for reminder in self.reminders:
return reminder.date_local
return None
@property
def completed(self):
return bool(self.completed_at)
@property
def overdue_times(self):
if self.recurrence_type is None or self.completed:
return 0
recurrence_days = _days_by_recurrence_type[self.recurrence_type] * self.recurrence_count
overdue_time = date.today() - self.due_date
return int(overdue_time.days / recurrence_days)
@property
def list_title(self):
if self.list:
return self.list.title
return None
def subtitle(self):
from wunderlist.util import format_time
subtitle = []
if self.starred:
subtitle.append(_star)
# Task is completed
if self.completed:
subtitle.append('Completed %s' % short_relative_formatted_date(self.completed_at))
# Task is not yet completed
elif self.due_date:
subtitle.append('Due %s' % short_relative_formatted_date(self.due_date))
if self.recurrence_type:
if self.recurrence_count > 1:
subtitle.append('%s Every %d %ss' % (_recurrence, self.recurrence_count, self.recurrence_type))
# Cannot simply add -ly suffix
elif self.recurrence_type == 'day':
subtitle.append('%s Daily' % (_recurrence))
else:
subtitle.append('%s %sly' % (_recurrence, self.recurrence_type.title()))
if not self.completed:
overdue_times = self.overdue_times
if overdue_times > 1:
subtitle.insert(0, u'%s %dX OVERDUE!' % (_overdue_2x, overdue_times))
elif overdue_times == 1:
subtitle.insert(0, u'%s OVERDUE!' % (_overdue_1x))
reminder_date = self.reminder_date_local
if reminder_date:
reminder_date_phrase = None
if reminder_date.date() == self.due_date:
reminder_date_phrase = 'On due date'
else:
reminder_date_phrase = short_relative_formatted_date(reminder_date)
subtitle.append('%s %s at %s' % (
_reminder,
reminder_date_phrase,
format_time(reminder_date, 'short')))
subtitle.append(self.title)
return ' '.join(subtitle)
def __str__(self):
title = self.title if len(self.title) <= 20 else self.title[:20].rstrip() + u'…'
return u'<%s %d %s>' % (type(self).__name__, self.id, title)
class Meta(object):
order_by = ('order', 'id')
```
#### File: user.workflow.F455B544-D4E3-402B-B987-8D3EA582A111/wunderlist/sync.py
```python
from datetime import datetime
import os
import time
from workflow.notify import notify
from workflow.background import is_running
from wunderlist.models.preferences import Preferences
from wunderlist.util import workflow
def sync(background=False):
from wunderlist.models import base, root, list, task, user, hashtag, reminder
from peewee import OperationalError
# If a sync is already running, wait for it to finish. Otherwise, store
# the current pid in alfred-workflow's pid cache file
if not background:
if is_running('sync'):
wait_count = 0
while is_running('sync'):
time.sleep(.25)
wait_count += 1
if wait_count == 2:
notify('Please wait...', 'The workflow is making sure your tasks are up-to-date')
return False
pidfile = workflow().cachefile('sync.pid')
with open(pidfile, 'wb') as file_obj:
file_obj.write('{0}'.format(os.getpid()))
Preferences.current_prefs().last_sync = datetime.now()
base.BaseModel._meta.database.create_tables([
root.Root,
list.List,
task.Task,
user.User,
hashtag.Hashtag,
reminder.Reminder
], safe=True)
# Perform a query that requires the latest schema; if it fails due to a
# mismatched scheme, delete the old database and re-sync
try:
task.Task.select().where(task.Task.recurrence_count > 0).count()
hashtag.Hashtag.select().where(hashtag.Hashtag.tag == '').count()
except OperationalError:
base.BaseModel._meta.database.close()
workflow().clear_data(lambda f: 'wunderlist.db' in f)
# Make sure that this sync does not try to wait until its own process
# finishes
sync(background=True)
return
first_sync = False
try:
root.Root.get()
except root.Root.DoesNotExist:
first_sync = True
root.Root.sync(background=background)
if background:
if first_sync:
notify('Initial sync has completed', 'All of your tasks are now available for browsing')
# If executed manually, this will pass on to the post notification action
print 'Sync completed successfully'
return True
def background_sync():
from workflow.background import run_in_background
task_id = 'sync'
# Only runs if another sync is not already in progress
run_in_background(task_id, [
'/usr/bin/env',
'python',
workflow().workflowfile('alfred-wunderlist-workflow.py'),
'pref sync background',
'--commit'
])
def background_sync_if_necessary(seconds=30):
last_sync = Preferences.current_prefs().last_sync
# Avoid syncing on every keystroke, background_sync will also prevent
# multiple concurrent syncs
if last_sync is None or (datetime.now() - last_sync).total_seconds() > seconds:
background_sync()
```
#### File: user.workflow.F455B544-D4E3-402B-B987-8D3EA582A111/wunderlist/util.py
```python
from datetime import date, datetime, timedelta
import logging
from workflow import Workflow
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
_workflow = None
_update_settings = None
def workflow():
global _workflow, _update_settings
if _workflow is None:
version = '0.7.0'
_workflow = Workflow(
capture_args=False,
update_settings={
'github_slug': 'idpaterson/alfred-wunderlist-workflow',
'version': version,
# Check for updates daily
# TODO: check less frequently as the workflow becomes more
# stable
'frequency': 1,
# Always download pre-release updates if a prerelease is
# currently installed
'prerelease': '-' in version
}
)
# Avoid default logger output configuration
_workflow.logger = logging.getLogger('workflow')
return _workflow
def parsedatetime_calendar():
from parsedatetime import Calendar, Constants
return Calendar(parsedatetime_constants())
def parsedatetime_constants():
from parsedatetime import Constants
from wunderlist.models.preferences import Preferences
loc = Preferences.current_prefs().date_locale or user_locale()
return Constants(loc)
def user_locale():
import locale
loc = locale.getlocale(locale.LC_TIME)[0]
if not loc:
# In case the LC_* environment variables are misconfigured, catch
# an exception that may be thrown
try:
loc = locale.getdefaultlocale()[0]
except IndexError:
loc = 'en_US'
return loc
def format_time(time, format):
c = parsedatetime_constants()
expr = c.locale.timeFormats[format]
expr = (expr
.replace('HH', '%H')
.replace('h', '%I')
.replace('mm', '%M')
.replace('ss', '%S')
.replace('a', '%p')
.replace('z', '%Z')
.replace('v', '%z'))
return time.strftime(expr).lstrip('0')
def short_relative_formatted_date(dt):
d = dt.date() if isinstance(dt, datetime) else dt
today = date.today()
# Mar 3, 2016
date_format = '%b %d, %Y'
if d == today:
return 'today'
if d == today + timedelta(days=1):
return 'tomorrow'
elif d == today - timedelta(days=1):
return 'yesterday'
elif d.year == today.year:
# Wed, Mar 3
date_format = '%a, %b %d'
return dt.strftime(date_format)
def relaunch_alfred(command='wl'):
import subprocess
alfred_major_version = workflow().alfred_version.tuple[0]
subprocess.call([
'/usr/bin/env', 'osascript', '-l', 'JavaScript',
'bin/launch_alfred.scpt', command, str(alfred_major_version)
])
def utc_to_local(utc_dt):
import calendar
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
return local_dt.replace(microsecond=utc_dt.microsecond)
``` |
{
"source": "jlhall/ImplAlgos",
"score": 4
} |
#### File: py/data_structures/binary_tree.py
```python
class Node(object):
"""Node class for use in binary tree"""
def __init__(self, value, left=None, right=None):
super(Node, self).__init__()
self.value = value
self.left = left
self.right = right
class Tree(object):
"""Tree class for use in binary tree"""
def __init__(self):
super(Tree, self).__init__()
self.root = None
def add(self, value):
curr = self.root
if curr == None:
self.root = Node(value, None, None)
while curr != None:
if (value < curr.value and curr.left == None):
curr.left = Node(value, None, None)
elif value > curr.value and curr.right == None:
curr.right = Node(value, None, None)
elif value < curr.value:
curr = curr.left
elif value > curr.value:
curr = curr.right
else:
return
```
#### File: py/sorting_algos/bubble_sort.py
```python
import random
n = random.randint(1, 1000)
ab = []
for i in range(n):
ab.append(random.randint(1, n))
def bub_sort(a):
for i in range(len(a)-1, 0, -1):
for j in range(i):
if a[j] > a[j+1]:
a[j], a[j+1] = a[j+1], a[j]
return a
print bub_sort(ab)
``` |
{
"source": "JLHasson/coremltools",
"score": 2
} |
#### File: nnssa/coreml/ssa_converter.py
```python
import numpy as np
from warnings import warn
from six import string_types as _string_types
from coremltools.models import datatypes
from coremltools.proto import NeuralNetwork_pb2, Model_pb2
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models.neural_network.flexible_shape_utils import set_multiarray_ndshape_range
from collections import Iterable
import coremltools
from ..commons import builtins
from ..commons.basic_graph_ops import topsort, check_connections
from .graph_pass import *
try:
import shapes
except:
from . import shapes
DEBUG = False
def _is_scalar(type_):
if type_ is None:
return False
result = builtins.is_int(type_) or builtins.is_float(type_) or builtins.is_bool(type_)
if builtins.is_tensor(type_) and (len(type_.get_shape()) == 0):
result = True
return result
def ssa_convert(ssa,
top_func='main',
inputs=None,
outputs=None,
image_input_names=None,
image_format=None,
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
predicted_probabilities_output='',
add_custom_layers=False,
custom_conversion_functions=None,
custom_shape_functions=None,
optional_inputs=None
):
"""
Convert NNSSA into Core ML spec.
ssa : NetworkEnsemble
Required parameter
NNSSA to be converted to CoreML spec.
top_func : str or 'main'
Function entry point
inputs : dict of str -> list/tuple or None
Input features of CoreML specs. Must be a dictionary with
name as key and shape as value {name: shape},
where name is the input's name, shape is the
shape of the feature tensor. The shape must be static - all
dimensions of shape should be a positive integer.
When not provided, SSA converter will treat all input nodes
in top level NNSSA as inputs.
outputs : list of str or None
Output features of CoreML specs. Must be a list of [name].
When not provided, SSA converter will treat all output nodes
in top level NNSSA as outputs.
add_custom_layers : bool or False
If True, then `custom` layers will be added to the model in place
for unsupported ops.
Parameters for these custom layers should be filled manually by editing the mlmodel
or the 'custom_conversion_functions' argument can be used to do the same during the process of conversion
custom_conversion_functions : dict of str -> function or empty dict
Specify custom function to be used for conversion for given op. User can override existing conversion
function and provide their own custom implementation to convert certain ops. Dictionary key must be string
specifying Op name or Op type and value must be a function implementation available in current context.
If user provides two separate functions for node name and node type, then custom function tied to node name will be used.
As, function tied to node type is more generic than one tied to node name.
custom_conversion_functions option is different than add_custom_layers.
Both options can be used in conjunction in which case, custom function will be invoked for provided ops and
custom layer will be added for ops with no respective conversion function. This option gives finer control to user.
One use case could be to modify input attributes or certain graph properties before calling existing conversion function.
Note that, It is custom conversion function's responsibility to add respective Core ML layer into builder (coremltools's NeuralNetworkBuilder)
custom_shape_functions : dict of str -> functions or empty dict
Specify custom function to compute `output` shape given `input` shape for given custom operator
This is required for new converter path, which maintains and propagates shapes while converting operators.
image_format: str
Optional and valid if image_input_names is also set. Specify either 'NCHW' or 'NHWC' to set or
override the image format. If not set, tries to use hints from the graph which may be present in convolution or
other image-specific layers. Ultimately defaults to NHWC.
"""
if not custom_conversion_functions:
custom_conversion_functions = dict()
if not custom_shape_functions:
custom_shape_functions = dict()
if not optional_inputs:
optional_inputs = list()
if outputs is not None:
ssa.extract_subgraph(outputs, name=top_func)
if DEBUG:
import graphviz
dot_string = ssa.get_dot_string(annotation=True, name_and_op_style=True, highlight_debug_nodes=[])
graphviz.Source(dot_string).view(filename='/tmp/ssa')
# apply passes on the ssa, prior to conversion
# note: ideally order of passes should not matter, however, might be few special cases
# fuse_batch_to_space_or_space_to_batch needs to be applied before transform_nhwc_to_nchw
passes = [
constant_weight_link_removal,
onehot_matmul_to_embedding,
fuse_layer_norm,
fuse_gelu,
fuse_batch_to_space_or_space_to_batch,
fuse_bias_add,
transform_nhwc_to_nchw,
remove_identity,
remove_no_ops_and_shift_control_dependencies,
remove_single_isolated_node,
fuse_batch_norm,
spatial_reduce_to_global_pool,
fuse_pad_into_conv,
remove_oneway_split,
remove_noneffective_transpose,
remove_noneffective_reshape
]
for p in passes:
p(ssa)
if DEBUG:
import graphviz
dot_string = ssa.get_dot_string(annotation=True, name_and_op_style=True, highlight_debug_nodes=[])
graphviz.Source(dot_string).view(filename='/tmp/ssa_after_passes')
for f in list(ssa.functions.values()):
check_connections(f.graph)
# Set classifier flag
is_classifier = class_labels is not None
neural_network_type = 'classifier' if is_classifier else None
converter = SSAConverter(ssa,
top_func=top_func,
inputs=inputs,
outputs=outputs,
neural_network_type=neural_network_type,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
custom_shape_functions=custom_shape_functions,
optional_inputs=optional_inputs)
converter.convert()
builder = converter._get_builder(func=top_func)
# Add image input identifier
if image_input_names is not None and isinstance(
image_input_names, _string_types):
image_input_names = [image_input_names]
# Add classifier classes (if applicable)
if is_classifier:
classes = []
classes_in = class_labels
if isinstance(classes_in, _string_types): # string
import os
if not os.path.isfile(classes_in):
raise ValueError("Path to class labels (%s) does not exist." % \
classes_in)
with open(classes_in, 'r') as f:
classes = f.read()
classes = classes.splitlines()
elif type(classes_in) is list: # list[int or str]
classes = classes_in
else:
raise ValueError('Class labels must be a list of integers / strings,' \
' or a file path')
if predicted_feature_name is not None:
builder.set_class_labels(
classes, predicted_feature_name=predicted_feature_name,
prediction_blob=predicted_probabilities_output)
else:
builder.set_class_labels(classes)
detected_image_format = ssa.get_image_format()
if image_format and detected_image_format and image_format != detected_image_format:
warn('[SSAConverter] Detected image format different from input.'
'Detected: {} Input: {}'.format(detected_image_format, image_format))
image_format = image_format or detected_image_format or 'NHWC'
# Set pre-processing parameters
builder.set_pre_processing_parameters(image_input_names=image_input_names,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
image_format=image_format)
mlmodel_spec = converter.get_spec()
# Required if an output node produces multiple outputs
# Generate new output features
modified_output_features_list = []
for idx, output_feature in enumerate(mlmodel_spec.description.output):
if output_feature.name in converter.op_tensor_map:
atype = mlmodel_spec.description.output[idx].type
for aname in converter.op_tensor_map[output_feature.name]:
new_feature = Model_pb2.FeatureDescription()
new_feature.name = aname
new_feature.type.CopyFrom(atype)
if aname not in [feature.name for feature in modified_output_features_list]:
modified_output_features_list.append(new_feature)
else:
modified_output_features_list.append(output_feature)
# delete the existing output feature
mlmodel_spec.description.ClearField('output')
# creating new output features description
mlmodel_spec.description.output.extend(modified_output_features_list)
# MLModel passes
mlmodel_passes = [remove_disconnected_layers,
remove_redundant_transposes,
]
for p in mlmodel_passes:
p(mlmodel_spec)
if DEBUG:
coremltools.models.utils.save_spec(mlmodel_spec, '/tmp/model_from_spec.mlmodel')
return mlmodel_spec
class SSAConverter(object):
def __init__(self,
net_ensemble, # type: NetworkEnsemble
top_func='main', # type: str
inputs=None, # type: Dict[str, tuple]
outputs=None, # type: List[str]
neural_network_type=None, # type: str
add_custom_layers=False, # type: bool
custom_conversion_functions={}, # type: Dict[Text, Any]
custom_shape_functions={}, # type: Dict[Text, Any]
optional_inputs=[] # type: List[str]
):
self.net_ensemble = net_ensemble
self.top_func = top_func # string indicating the top level function
if self.top_func not in self.net_ensemble.functions:
raise ValueError(
'Top level function %s not in the NetworkEnsemble Provided' % self.top_func)
# get top level inputs and outputs to instantiate spec
self.net_ensemble.functions[top_func].find_inputs_and_outputs()
top_input_names = list(map(str, self.net_ensemble.functions[top_func].inputs))
top_output_names = list(map(str, self.net_ensemble.functions[top_func].outputs))
top_ssa = self.net_ensemble.functions[top_func]
# custom conversion functions
self.custom_conversion_functions = custom_conversion_functions
self.add_custom_layers = add_custom_layers
self.custom_shape_functions = custom_shape_functions
# find_inputs_and_outputs() generates a list of required inputs, which
# may not be supplied by inputs. We need to make sure that the
# user-supplied inputs name and shape are consistent with the NNSSA.
top_input_shapes = []
for name in top_input_names:
node = top_ssa.graph[name]
shape = self._get_tensor_shape_from_type(node.datatype)
if shape is None and inputs is None:
raise ValueError(
'NNSSA input "%s" has non-static shape %s, please provide in argument "inputs"'
% (name, str(shape)))
if inputs is not None:
if name not in inputs:
raise ValueError(
'Input "%s" is required by SSAConverter, but not passed in argument "inputs"' % name)
if shapes.is_static_shape(inputs[name]) and not shapes.is_a_shape_of(inputs[name], shape):
raise ValueError(
'Input "%s" expects a shape compatible to %s, but is given %s' %
(name, str(shape), inputs[name]))
# Now that we can use the shape to create top_input_shapes
shape = inputs[name] if inputs[name] else [1, ]
top_input_shapes.append(shape)
top_input_types = []
is_input_optional = [True if name in optional_inputs else False for name in top_input_names]
is_input_dynamic = [True if not shapes.is_static_shape(shape) else False for shape in top_input_shapes]
for idx, dims in enumerate(top_input_shapes):
if is_input_dynamic[idx]:
static_shape = [dim_size if dim_size > 0 else 1 for dim_size in dims]
else:
static_shape = dims
top_input_types.append(datatypes.Array(*static_shape))
top_input_features = list(zip(top_input_names, top_input_types))
# TODO - verify outputs
if outputs is not None:
top_output_features = []
for name in outputs:
if name in self.net_ensemble.variables.keys(): # Variable/States are optional inputs & outputs to be added later
continue
elif name in top_output_names:
top_output_features.append((name, None))
else:
if len(top_output_names) == 1:
raise ValueError('Output "{}" is not an output node in the source graph. Do you mean "{}"?'
.format(name, top_output_names[0]))
else:
raise ValueError('Output "%s" is not an output node in the source graph.' % name)
else:
top_output_features = list(zip(top_output_names, [None] * len(top_output_names)))
self.top_builder = NeuralNetworkBuilder(input_features=top_input_features,
output_features=top_output_features,
disable_rank5_shape_mapping=True,
mode=neural_network_type,
use_float_arraytype=True)
self.spec = self.top_builder.spec
for idx, input in enumerate(self.spec.description.input):
if is_input_dynamic[idx]:
input_name = top_input_names[idx]
dynamic_shape = top_input_shapes[idx]
lower_bounds, upper_bounds = [], []
for dim_size in dynamic_shape:
if dim_size > 0:
lower_bounds.append(dim_size)
upper_bounds.append(dim_size)
else:
lower_bounds.append(1)
upper_bounds.append(-1)
set_multiarray_ndshape_range(self.spec, input_name, lower_bounds=lower_bounds, upper_bounds=upper_bounds)
if is_input_optional[idx]:
self.spec.description.input[idx].type.isOptional = True
self.CONVERT_FUNCTION_MAP = {
'Abs': self._convert_unary_common,
'Add': self._convert_binary,
'AddV2': self._convert_binary,
'AddN': self._convert_addn,
'All': self._convert_reduction,
'Any': self._convert_reduction,
'ArgMax': self._convert_argmax,
'ArgMin': self._convert_argmin,
'AvgPool': self._convert_avgpool,
'BatchMatMul': self._convert_batched_mat_mul,
'BatchNorm': self._convert_batchnorm,
'BatchToSpaceND': self._convert_batch_to_space_nd,
'BiasAdd': self._convert_binary_broadcastable,
'Cast': self._convert_cast,
'Ceil': self._convert_unary_common,
'ClipByValue': self._convert_clip,
'Concat': self._convert_concat_nd,
'ConcatV2': self._convert_concat_nd,
'Const': self._convert_const,
'Conv2D': self._convert_conv2d,
'Conv2DBackpropInput': self._convert_conv2d_transpose,
'Cos': self._convert_unary_trigonometric,
'DepthToSpace': self._convert_reorganize_data,
'DepthwiseConv2dNative': self._convert_conv2d,
'Elu': self._convert_unary_activation,
'Embedding': self._convert_embedding,
'Equal': self._convert_binary_broadcastable,
'Exp': self._convert_unary_common,
'ExpandDims': self._convert_expand_dims,
'Fill': self._convert_fill,
'Floor': self._convert_unary_common,
'FloorDiv': self._convert_binary_broadcastable,
'FloorMod': self._convert_floor_mod,
'Gather': self._convert_gather,
'GatherNd': self._convert_gather_nd,
'GeLU': self._convert_gelu,
'Greater': self._convert_binary_broadcastable,
'GreaterEqual': self._convert_binary_broadcastable,
'Identity': self._convert_identity,
'LRN': self._convert_lrn,
'LSTMBlock': self._convert_lstm_block_cell,
'LayerNormalization': self._convert_layer_normalization,
'LeakyRelu': self._convert_unary_activation,
'Less': self._convert_binary_broadcastable,
'LessEqual': self._convert_binary_broadcastable,
'Log': self._convert_unary_common,
'LogSoftmax': self._convert_unary_log_softmax,
'LogicalAnd': self._convert_binary_broadcastable,
'LogicalNot': self._convert_unary_logical_not,
'LogicalOr': self._convert_binary_broadcastable,
'MatMul': self._convert_batched_mat_mul,
'MatrixBandPart': self._convert_matrix_band_part,
'Max': self._convert_reduction,
'MaxPool': self._convert_maxpool,
'Maximum': self._convert_binary_broadcastable,
'Mean': self._convert_reduction,
'Min': self._convert_reduction,
'Minimum': self._convert_binary_broadcastable,
'MirrorPad': self._convert_mirror_pad,
'Mul': self._convert_binary,
'Neg': self._convert_unary_neg,
'NotEqual': self._convert_binary_broadcastable,
'Pack': self._convert_pack,
'Pad': self._convert_constant_pad,
'PadV2': self._convert_constant_pad,
'Placeholder': self._convert_input,
'Pow': self._convert_binary_broadcastable,
'Prod': self._convert_reduction,
'Range': self._convert_range,
'RealDiv': self._convert_binary,
'Reciprocal': self._convert_unary_inverse,
'Relu': self._convert_unary_activation,
'Relu6': self._convert_unary_activation_relu6,
'Reshape': self._convert_reshape,
'ResizeBilinear': self._convert_resize_bilinear,
'ResizeNearestNeighbor': self._convert_resize_nearest_neighbor,
'ReverseSequence': self._convert_reverse_sequence,
'ReverseV2': self._convert_reverse,
'Round': self._convert_unary_common,
'Rsqrt': self._convert_unary_common,
'ScatterNd': self._convert_scatter_nd,
'SelectMask': self._convert_select,
'Shape': self._convert_shape,
'Sigmoid': self._convert_unary_activation,
'Sign': self._convert_unary_common,
'Sin': self._convert_unary_trigonometric,
'Size': self._convert_size,
'Selu': self._convert_selu,
'Slice': self._convert_slice,
'Softmax': self._convert_softmax,
'SpaceToBatchND': self._convert_space_to_batch_nd,
'SpaceToDepth': self._convert_reorganize_data,
'Split': self._convert_split,
'SplitV': self._convert_split,
'Sqrt': self._convert_unary_common,
'Square': self._convert_unary_square,
'SquaredDifference': self._convert_squared_difference,
'Squeeze': self._convert_squeeze,
'StridedSlice': self._convert_slice,
'Sub': self._convert_binary,
'Sum': self._convert_reduction,
'Softplus': self._convert_unary_activation,
'Tan': self._convert_unary_trigonometric,
'Tanh': self._convert_unary_activation,
'TensorArrayGatherV3': self._convert_tensorarray_gather,
'TensorArrayReadV3': self._convert_tensorarray_read,
'TensorArrayScatterV3': self._convert_array_scatter,
'TensorArraySizeV3': self._convert_tensorarray_size,
'TensorArrayV3': self._convert_tensorarray_alloc,
'TensorArrayWriteV3': self._convert_tensorarray_write,
'Tile': self._convert_tile,
'TopKV2': self._convert_topk,
'Transpose': self._convert_transpose,
'Unpack': self._convert_unpack,
'Where': self._convert_where,
'function_entry': self._convert_function,
'get_global': self._convert_get_global,
'get_tuple': self._convert_get_tuple,
'iff': self._convert_iff,
'make_tuple': self._convert_make_tuple,
'return': self._convert_return,
'set_global': self._convert_set_global,
'while': self._convert_while,
'ZerosLike': self._convert_zeros_like
}
# converter state variables
# func_stack stores a list of NNSSA function names
self.func_stack = [self.top_func]
# Theoretically, there should be a one-to-one mapping between
# SSA function and nn_spec, which is associated with a NeuralNetworkBuilder
self.func_builder_map = {self.top_func: self.top_builder}
# All the shapes of the tensor of CoreML str:shape
self.tensor_shapes = {
name: top_input_shapes[idx]
for idx, name in enumerate(top_input_names)
}
# Map for tensors generated by special ops (make_tuple, get_tuple, function, return, etc)
# and value is the list of node names that represent tensors
self.op_tensor_map = {}
# all variables/states are treated as both inputs & outputs.
for name, aVariable in self.net_ensemble.variables.items():
if _is_scalar(aVariable):
shape = [1, ]
else:
assert builtins.is_tensor(aVariable)
shape = list([int(i) if i and i > 0 else 1 for i in self._get_tensor_shape_from_type(aVariable)])
self.top_builder.add_optionals([(name + '__invar__', shape)], [(name + '__outvar__', shape)])
self.tensor_shapes[name + '__invar__'] = shape
def get_spec(self):
return self.spec
def print_function_nodes(self, func_name):
if func_name not in self.net_ensemble.functions:
raise ValueError('%s is not a function name in NetworkEnsemble' % func_name)
graph = self.net_ensemble.functions[func_name].graph
for name, node in graph.items():
if node.op == 'get_global':
print('%s (%s) var = %s' % (name, node.op, node.attr['variable']))
if node.op == 'set_global':
print('%s (%s) var = %s' % (name, node.op, node.attr['variable']))
def get_nnssa_inputs_outputs(self):
inputs, outputs, placeholder_defaults = self.net_ensemble._get_inputs_outputs()
print('Inputs: ')
for i in inputs:
print(i)
print('Outputs: ')
for o in outputs:
print(o)
print('Placeholders with default: ')
for p in placeholder_defaults:
print(p)
return inputs, outputs, placeholder_defaults
def convert(self):
""" Convert the NNSSA function on top of func_stack into NeuralNetworkSpec.
"""
func_name = self.func_stack[-1]
func = self.net_ensemble.functions[func_name]
print('[SSAConverter] Converting function %s ...' % func_name)
# Do a topological sort
restricted_graph = {}
function = self.net_ensemble.functions[func_name]
for k, v in function.graph.items():
if len(v.outputs) > 0 and all(
[function.graph[i].value is not None for i in v.outputs]):
continue
restricted_graph[k] = v
instruction_order = topsort(restricted_graph)
# Make a buffer between variable inputs
builder = self._get_builder()
for name, var in self.net_ensemble.variables.items():
layer = builder.add_copy(
name=name + '_copy',
input_name=name + '__invar__',
output_name=name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# Convert operations one by one
for idx, node_name in enumerate(instruction_order):
node = func.graph[node_name]
op_type = node.op
custom_conversion_name = None
if node_name in self.custom_conversion_functions:
custom_conversion_name = node_name
elif op_type in self.custom_conversion_functions:
custom_conversion_name = op_type
# Set conversion function and message
conversion_message = ''
if custom_conversion_name is not None:
conversion_message = ' with custom conversion function'
elif op_type in self.CONVERT_FUNCTION_MAP:
convert_func = self.CONVERT_FUNCTION_MAP[op_type]
elif self.add_custom_layers:
# Add custom layer
convert_func = self._convert_custom_layer
conversion_message = ' with custom layer'
else:
raise NotImplementedError(
'[SSAConverter] Conversion for op %s not implemented, terminating...' % op_type)
print('[SSAConverter] [{}/{}] Converting op type: \'{}\', name: \'{}\'{}{}'.format(
idx + 1, len(instruction_order), op_type, node_name, conversion_message,
((', output_shape: ' + str(node.datatype.get_shape()) + '.') if builtins.is_tensor(node.datatype) else '.')))
# If custom conversion method is provided, use it
# Otherwise, invoke internal conversion method
if custom_conversion_name is not None:
self.custom_conversion_functions[custom_conversion_name](self, node)
else:
convert_func(node)
# Make a buffer between variable inputs
builder = self._get_builder()
for name, var in self.net_ensemble.variables.items():
layer = builder.add_copy(
name=name + '_copy_r',
input_name=name,
output_name=name + '__outvar__')
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _get_builder(self, func=None):
if func is None:
func = self.func_stack[-1]
return self.func_builder_map[func]
def _get_tensor_shape_from_type(self, type_):
if _is_scalar(type_):
shape = (1,)
elif builtins.is_tensor(type_):
shape = type_.get_shape()
elif builtins.is_list(type_):
element_shape = type_.T[0].get_shape()
for ashape in type_.T:
assert ashape.get_shape() == element_shape
shape = [-1] + list(element_shape)
else:
shape = None
return shape
def _get_input_tensors(self, node, inspect_shapes=True):
""" Get the input nodes, their names and types for a node.
There are three cases:
(1) (Tuple case) input is a tuple. In this case, expand that tuple input into a list of input tensors
(2) (Regular case) input is a node name. In this case just copy it.
(3) (Indexed tuple case) input is one element in a tuple. In this case it should be stored in op_tensor_map
"""
input_nodes, input_names, input_types = [], [], []
for name in node.inputs:
if name in self.op_tensor_map:
input_names.extend(self.op_tensor_map[name])
else:
input_names.append(name)
for name in input_names:
if name in self.net_ensemble.variables:
input_node, _ = self.__get_node_and_type_by_name(name + "/read")
input_type = self.net_ensemble.variables[name]
else:
input_node, input_type = self.__get_node_and_type_by_name(name)
assert input_node is not None
assert input_type is not None
input_nodes.append(input_node)
input_types.append(input_type)
if inspect_shapes:
self.__compare_propagated_and_inferred_shape(name, input_type)
return input_nodes, input_names, input_types
def __get_node_and_type_by_name(self, name):
for fname in self.func_stack[::-1]:
func = self.net_ensemble.functions[fname]
if name in func.graph:
node = func.graph[name]
return node, node.datatype
for node_name, output_names in self.op_tensor_map.items():
if name in output_names:
node, type_ = self.__get_node_and_type_by_name(node_name)
if builtins.is_tuple(type_):
Id = output_names.index(name)
type_ = node.datatype.T[Id]
return node, type_
return None, None
def __compare_propagated_and_inferred_shape(self, name, type_):
propagated_shape = tuple(self.tensor_shapes[name])
if _is_scalar(type_):
inferred_shape = (1,)
elif builtins.is_tensor(type_):
inferred_shape = type_.get_shape()
elif builtins.is_list(type_):
element_shape = type_.T[0].get_shape()
for ashape in type_.T:
assert ashape.get_shape() == element_shape
inferred_shape = [-1] + list(element_shape)
else:
raise ValueError('[SSAConverter] Failed to infer shape for tensor %s' % name)
mismatch = '[SSAConverter] Shape mismatch for {}: inferred {} vs. propagated {}.'.format(
name, inferred_shape, propagated_shape)
if len(propagated_shape) != len(inferred_shape):
raise ValueError(mismatch)
for pdim, idim in zip(propagated_shape, inferred_shape):
if pdim == -1 or idim == -1 or pdim == idim:
continue
raise ValueError(mismatch)
def _convert_input(self, node):
""" Convert an input node. For now, we may just need to skip it.
"""
pass
def _convert_const(self, node):
""" Convert a constant node.
"""
node_value = node.value
if node_value is None:
node_value = node.attr.get('value')
val = np.array(node_value.val)
if len(val.shape) == 0:
val = np.array([node_value.val])
builder = self._get_builder()
layer = builder.add_load_constant_nd(
name=node.name, output_name=node.name, constant_value=val, shape=val.shape)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_custom_layer(self, node):
""" Add custom layer
"""
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = node.op
params.description = "Custom layer that corresponds to the TensorFlow op {}".format(node.op)
builder = self._get_builder()
layer = builder.add_custom(name=node.name,
input_names=node.inputs,
output_names=[node.name],
custom_proto_spec=params)
if node.op not in self.custom_shape_functions:
raise ValueError('Custom Shape Function for {} not provided!'.format(node.op))
shapes.propagate_single_layer(layer, self.tensor_shapes, custom_shape_function=self.custom_shape_functions[node.op])
def _convert_transpose(self, node):
""" Convert a transpose op.
"""
# permute dimensions are assumed to be a const
input_nodes, input_names, input_types = self._get_input_tensors(node)
dim = input_nodes[1].value.val if len(input_names) > 1 else node.attr.get('dim')
if dim is None:
raise ValueError('[SSAConverter] Cannot handle dynamic Transpose')
dim = list(dim)
builder = self._get_builder()
layer = builder.add_transpose(
name=node.name, axes=dim, input_name=input_names[0], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_shape(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 1)
builder = self._get_builder()
layer = builder.add_get_shape(
name=node.name, input_name=input_names[0], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_selu(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 1)
builder = self._get_builder()
elu_output_name = node.name + '_elu'
builder.add_activation(node.name +'__elu__', 'ELU', input_names[0], elu_output_name,
params=1.6732632)
builder.add_elementwise(node.name,
input_names=elu_output_name,
output_name=node.name,
mode='MULTIPLY',
alpha=1.05070098)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_size(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 1)
builder = self._get_builder()
layer = builder.add_get_shape(
name=node.name + "_shape", input_name=input_names[0], output_name=node.name + "_shape")
layer = builder.add_reduce_prod(
name=node.name,
input_name=node.name + "_shape",
output_name=node.name,
keepdims=True,
reduce_all=True)
self.tensor_shapes[node.name] = [1]
def _convert_slice(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
has_squeeze = 'squeeze' in node.attr and node.attr['squeeze']
axes = node.attr.get('squeeze')
if _is_scalar(node.datatype):
output_shape = []
elif builtins.is_tensor(node.datatype):
output_shape = self._get_tensor_shape_from_type(node.datatype)
else:
output_shape = None
if has_squeeze:
if output_shape is None:
raise ValueError('[SSAConverter] Unable to determine output shapes for Slice')
if len(output_shape) == 0 and len(axes) == 1:
has_squeeze = False
slice_output_name = node.name + '_slice_' if has_squeeze else node.name
builder = self._get_builder()
rank = len(self._get_tensor_shape_from_type(input_types[0]))
begin_masks = [True if i in node.attr['begin_masks'] else False for i in range(rank)]
end_masks = [True if i in node.attr['end_masks'] else False for i in range(rank)]
if 'slice' not in node.attr:
assert node.attr["new_axis_mask"] == 0
assert len(input_names) >= 4
layer = builder.add_slice_dynamic(name=slice_output_name,
input_names=input_names[:4],
output_name=slice_output_name,
begin_masks=begin_masks,
end_masks=end_masks)
if not has_squeeze and output_shape:
self.tensor_shapes[node.name] = output_shape
else:
shapes.propagate_single_layer(layer, self.tensor_shapes)
else:
# For simple RNN, node.attr always has a 'slice'
# This means slicing is always static
# each slice is [begin, end, step]
slices = node.attr['slice']
begin_indices, end_indices, strides = [], [], []
for s in slices:
begin_indices.append(s[0])
end_indices.append(s[1])
strides.append(s[2])
layer = builder.add_slice_static(
name=slice_output_name,
input_name=input_names[0],
output_name=slice_output_name,
begin_ids=begin_indices,
end_ids=end_indices,
strides=strides,
begin_masks=begin_masks,
end_masks=end_masks)
shapes.propagate_single_layer(layer, self.tensor_shapes)
if has_squeeze:
input_shape = self._get_tensor_shape_from_type(input_types[0])
input_rank = len(input_shape)
squeeze_all = (input_rank == len(axes))
layer = builder.add_squeeze(
name=node.name,
input_name=slice_output_name,
output_name=node.name,
axes=axes if not squeeze_all else None,
squeeze_all=squeeze_all)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_range(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) != 3:
raise ValueError(
'CoreML NeuralNetwork range layer must have 3 inputs: start, end and step')
input_names = [input_names[1], input_names[0], input_names[2]]
builder = self._get_builder()
layer = builder.add_range_dynamic(name=node.name, output_name=node.name, input_names=input_names)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_tensorarray_alloc(self, node):
# TensorArray is a list of tensors, it will be treated as a rank+1
# tensor when converted. The shape information is stored at two
# different places - node input specifies the length of the list
# while the node's datatype stores the shape of each tensor allocated.
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 1)
element_shape = node.datatype.T[0].get_shape()
if (not node.attr.get('identical_element_shapes', True) or
not all([atype.get_shape() == element_shape for atype in node.datatype.T])):
raise ValueError(
'[SSAConverter] TensorArray allocation cannot handle arrays'
'with tensors of various shapes.')
has_static_element_shape = all([dim > 0 for dim in element_shape])
if input_nodes[0].op == 'Const':
length = input_nodes[0].value.val
array_size = length if length > 0 else 1
elif 'size' in node.attr and isinstance(node.attr['size'], int):
array_size = node.attr['size']
else:
array_size = None
# Simpler case: No dynamic shape
if array_size is not None and has_static_element_shape:
array_shape = [array_size] + list(element_shape)
layer = self._get_builder().add_load_constant_nd(
name=node.name,
output_name=node.name,
constant_value=np.zeros(array_shape, dtype='float'),
shape=array_shape)
shapes.propagate_single_layer(layer, self.tensor_shapes)
elif has_static_element_shape:
# Load element shape into network
builder = self._get_builder()
if element_shape:
node_es_name = node.name + '__element_shape'
layer = builder.add_load_constant_nd(
name=node_es_name,
output_name=node_es_name,
constant_value=np.array(element_shape, dtype='float'),
shape=[len(element_shape)])
shapes.propagate_single_layer(layer, self.tensor_shapes)
# Concatenate list length (the input, should be a constant vector of size 1) with element shape
node_arr_shape_name = node.name + '__arr_shape'
layer = builder.add_concat_nd(
name=node_arr_shape_name,
input_names=input_names + [node_es_name],
output_name=node_arr_shape_name,
axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
else:
node_arr_shape_name = input_names[0]
# Now allocate required shape
layer = builder.add_fill_dynamic(
name=node.name, input_name=node_arr_shape_name, output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# Overwrite the output shape with fixed element shape
self.tensor_shapes[node.name][1:] = element_shape
layer.outputTensor[0].dimValue[1:] = element_shape
else:
raise ValueError(
'[SSAConverter] TensorArray allocation cannot determine element shapes statically'
)
def _convert_array_scatter(self, node):
# NNSSA input order: indices, value, array
# CoreML input order: container (array), indices, slices (value)
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) != 3:
raise ValueError('Scatter only accepts 3 inputs')
indices, value, array = input_names
layer = self._get_builder().add_scatter(
name=node.name, input_names=[array, indices, value], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_make_tuple(self, node):
# make tuple aggregates a list of SSA nodes (which also stands for their outputs)
# For now, I think recording the make_tuple node itself for reference would suffice.
if node.name in self.op_tensor_map:
raise ValueError('make_tuple node %s should not be visited twice.' % node.name)
input_nodes, input_names, input_types = self._get_input_tensors(node)
self.op_tensor_map[node.name] = input_names
def _convert_while(self, node):
# In CoreML, loops and branches should be designed such that inputs / outputs
# should be empty, because it is not necessary and not clearly defined.
# Should only take a tuples
assert (len(node.inputs) == 1)
current_graph = self.net_ensemble.functions[self.func_stack[-1]].graph
assert (current_graph[node.inputs[0]].op == 'make_tuple')
input_nodes, input_names, input_types = self._get_input_tensors(node)
self.op_tensor_map[node.name] = input_names
builder_top = self._get_builder()
while_layer = builder_top.add_loop(name=node.name)
loop_param = while_layer.loop
loop_param.maxLoopIterations = 0
# Both body function and condition function share the same inputs (args) of the loop
# convert the condition function
if 'cond_function' in node.attr:
if not loop_param.HasField('conditionNetwork'):
loop_param.condition.MergeFromString(b'')
cond_func_name = node.attr['cond_function']
# TODO - need to find cond_var name
self.func_stack.append(cond_func_name)
self.func_builder_map[cond_func_name] = NeuralNetworkBuilder(
nn_spec=loop_param.conditionNetwork, disable_rank5_shape_mapping=True)
self.op_tensor_map[cond_func_name] = input_names
self.convert()
cond_func = self.net_ensemble.functions[cond_func_name]
ret_node_name = cond_func.outputs[0]
loop_param.conditionVar = cond_func.graph[ret_node_name].inputs[0]
self.func_stack.pop()
else:
raise ValueError('Unable to determine condition function in the loop')
# convert the body function
if 'body_function' not in node.attr:
raise ValueError('A "while" SSA node should not be empty.')
if not loop_param.HasField('bodyNetwork'):
loop_param.bodyNetwork.MergeFromString(b'')
body_func_name = node.attr['body_function']
self.func_stack.append(body_func_name)
self.func_builder_map[body_func_name] = NeuralNetworkBuilder(
nn_spec=loop_param.bodyNetwork, disable_rank5_shape_mapping=True)
self.op_tensor_map[body_func_name] = input_names
self.convert()
# The body function should re-write variables when it returns.
body_func = self.net_ensemble.functions[body_func_name]
loop_var_tuple_name = None
for k, v in body_func.graph.items():
# k is name, v is node
if v.op == 'make_tuple' and body_func.graph[v.outputs[0]].op == 'return':
loop_var_tuple_name = k
break
loop_var_names = self.op_tensor_map[loop_var_tuple_name]
assert len(loop_var_names) == len(input_names)
# Loop body should have the same input and output
builder_body = self._get_builder()
for src, dst in zip(loop_var_names, input_names):
# loop variables may be passed as an input to while op but unused.
if src == dst:
continue
layer = builder_body.add_copy(
name='copy_' + src + '_' + dst, input_name=src, output_name=dst)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# Pop back into while's loop
self.func_stack.pop()
def _convert_function(self, node):
# Function node is the entry point of a function
pass
def _convert_get_tuple(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
self.op_tensor_map[node.name] = [input_names[node.attr['index']]] if node.attr['index'] < len(input_names) else []
def _convert_get_global(self, node):
input_name = node.attr["variable"]
self.op_tensor_map[node.name] = [input_name]
def _convert_set_global(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
output_name = node.attr["variable"]
builder = self._get_builder()
if len(node.outputs) > 0:
self.op_tensor_map[node.name] = [input_names[0]]
if input_nodes[0].op == "Const" and input_nodes[0].value.val.size == 0:
return
if output_name != input_names[0]:
layer = builder.add_copy(name=node.name,
input_name=input_names[0],
output_name=output_name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_return(self, node):
# When converting a body function of a loop, return node should overwrite body functions' input tensors
pass
def _convert_unary_logical_not(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_logical(
name=node.name,
input_names=input_names,
output_name=node.name,
mode='NOT')
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_floor_mod(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
a, b = input_names
a_div_b = node.name + "_floor_div"
floor_a = node.name + "_floor_a"
if builtins.is_int(node.attr['T']):
round_a = node.name + "_round_a"
round_b = node.name + "_round_b"
layer = self._get_builder().add_round(name=round_a,
input_name=a,
output_name=round_a)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_round(name=round_b,
input_name=b,
output_name=round_b)
shapes.propagate_single_layer(layer, self.tensor_shapes)
a, b = round_a, round_b
layer = self._get_builder().add_floor_div_broadcastable(
name=a_div_b, input_names=[a, b], output_name=a_div_b)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_multiply_broadcastable(
name=floor_a, input_names=[a_div_b, b], output_name=floor_a)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_subtract_broadcastable(
name=node.name, input_names=[a, floor_a], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_squared_difference(self, node):
assert (len(node.inputs) == 2)
input_nodes, input_names, input_types = self._get_input_tensors(node)
sub_node_name = node.name + '_sub_'
layer = self._get_builder().add_subtract_broadcastable(
name=sub_node_name, input_names=input_names, output_name=sub_node_name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_unary(
name=node.name, input_name=sub_node_name, output_name=node.name, mode='power', alpha=2.0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_select(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 3)
cond_name, true_name, false_name = input_names
if "expand_dims" in node.attr:
axes = node.attr["expand_dims"]
cond_output_name = node.name + '_expanded'
layer = self._get_builder().add_expand_dims(
name=cond_output_name, input_name=cond_name, output_name=cond_output_name, axes=axes)
shapes.propagate_single_layer(layer, self.tensor_shapes)
cond_name = cond_output_name
layer = self._get_builder().add_where_broadcastable(
name=node.name,
input_names=[cond_name, true_name, false_name],
output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_where(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) == 3:
self._convert_select(node)
else:
assert len(input_names) == 1
layer = self._get_builder().add_where_nonzero(name=node.name,
input_name=input_names[0],
output_name=node.name)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_softmax(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
axis = -1 if 'axis' not in node.attr else node.attr['axis']
layer = self._get_builder().add_softmax_nd(
name=node.name, input_name=input_names[0], output_name=node.name, axis=axis)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_tensorarray_read(self, node):
# TensorArrayReadV3 slices an element from TensorArray, which in NNSSA is a list.
# This is equivalent to array gather
input_nodes, input_names, input_types = self._get_input_tensors(node)
slice_output_name = node.name + '_slice_'
layer = self._get_builder().add_gather(
name=node.name + '_gather_',
input_names=input_names[::-1],
output_name=slice_output_name,
axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# tensorarray_read should generate only 1 slice, so adding a squeeze should be enough
layer = self._get_builder().add_squeeze(
name=node.name + '_squeeze_',
input_name=slice_output_name,
output_name=node.name,
axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_tensorarray_write(self, node):
"""def TensorArrayWrite(index, value, array):
array[index] = value
return array
"""
# node.inputs = ['index', 'value', 'array']
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 3)
index_name, value_name, array_name = input_names
if 'dynamic_size' in input_nodes[-1].attr:
builder = self._get_builder()
layer = builder.add_get_shape(
name=array_name + '_full_shape',
input_name=array_name,
output_name=array_name + '_full_shape')
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_slice_static(
name=array_name + '_length',
input_name=array_name + '_full_shape',
output_name=array_name + '_length',
begin_ids=[0],
end_ids=[1],
begin_masks=[False],
end_masks=[False],
strides=[1])
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_slice_static(
name=array_name + '_element_shape',
input_name=array_name + '_full_shape',
output_name=array_name + '_element_shape',
begin_ids=[1],
end_ids=[1],
begin_masks=[False],
end_masks=[True],
strides=[1])
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_greater_than(
name=array_name + "_is_growing",
input_names=[index_name, array_name + '_length'],
output_name=array_name + "_is_growing",
use_greater_than_equal=True
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_branch(
name=array_name + "_condition",
input_name=array_name + "_is_growing")
ifbranch = NeuralNetworkBuilder(nn_spec=layer.branch.ifBranch,
disable_rank5_shape_mapping=True)
layer = ifbranch.add_fill_dynamic(
name=array_name + "_alloc",
input_name=array_name + '_element_shape',
output_name=array_name + "_alloc",
value=0.0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = ifbranch.add_expand_dims(
name=array_name + "_new_element",
input_name=array_name + "_alloc",
output_name=array_name + "_new_element",
axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = ifbranch.add_concat_nd(
name=array_name + "_updated",
input_names=[array_name, array_name + "_new_element"],
output_name=array_name + "_updated",
axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = ifbranch.add_copy(
name=array_name + '_assign',
input_name=array_name + "_updated",
output_name=array_name
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
values_name = node.name + '_expanded'
layer = self._get_builder().add_expand_dims(
name=values_name, input_name=value_name, output_name=values_name, axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
# 3 inputs: [Scatter target, indices, scatter source]
layer = self._get_builder().add_scatter(
name=node.name,
input_names=[array_name, index_name, values_name],
output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_addn(self, node):
# TODO: Support single value addn
# Blocked by a bug in coremltools
if len(node.inputs) <= 1:
raise ValueError("Only supports two or more inputs for add_n operation.")
input_nodes, input_names, input_types = self._get_input_tensors(node)
prev_name = input_names[0]
for i in range(1,len(input_names)):
node_name = node.name + '_' + str(i)
output_name = node.name if i == len(input_names) -1 else node_name
layer = self._get_builder().add_elementwise(
name=node_name, input_names=[prev_name, input_names[i]],
output_name=output_name, mode='ADD')
shapes.propagate_single_layer(layer, self.tensor_shapes)
prev_name = node_name
def _convert_concat_nd(self, node):
assert len(node.inputs) > 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
axis = node.attr.get('axis')
if axis is None:
axis = input_nodes[-1].value.val if node.op == 'ConcatV2' else input_nodes[0].value.val
if axis is None:
raise NotImplementedError('[SSAConverter] Dynamic concatenation is not supported')
input_names = input_names[:-1] if node.op == 'ConcatV2' else input_names[1:]
input_types = input_types if node.op == 'ConcatV2' else input_types[1:]
input_names = [name for i, name in enumerate(input_names) if self._get_tensor_shape_from_type(input_types[i])[axis] != 0]
if len(input_names) == 1:
self.op_tensor_map[node.name] = input_names
return
if node.attr.get('data_format', None) == 'NHWC_format_inserted' and (axis == 1 or axis == -3):
layer = self._get_builder().add_elementwise(node.name, input_names, node.name, 'CONCAT')
else:
layer = self._get_builder().add_concat_nd(
name=node.name, input_names=input_names, output_name=node.name, axis=axis)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_batched_mat_mul(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
weight, bias = None, None
if len(input_names) == 1:
weight = node.attr.get('W', node.attr.get('W_const'))
bias = node.attr.get('bias')
elif len(input_names) == 2 and input_nodes[1].op == 'Const':
input_names = [input_names[0]]
weight = input_nodes[1].value.val
bias = node.attr.get('bias')
transpose_a = node.attr.get('adj_x', False) or node.attr.get('transpose_a', False)
transpose_b = node.attr.get('adj_y', False) or node.attr.get('transpose_b', False)
if len(input_names) == 1 and transpose_b and weight is not None:
weight = weight.transpose((1, 0))
n_rows = 0 if weight is None else weight.shape[0]
n_cols = 0 if weight is None else weight.shape[1]
builder = self._get_builder()
layer = builder.add_batched_mat_mul(
name=node.name,
input_names=input_names,
output_name=node.name,
W=weight, # (batched_mat_mul requires Cin, Cout)
weight_matrix_rows=n_rows,
weight_matrix_columns=n_cols,
bias=bias,
transpose_a=transpose_a,
transpose_b=transpose_b)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_split(self, node):
# Only handles static splits
axis = node.attr['split_dim']
split = node.attr['split']
split = [size for size in split if size != 0]
num_splits = len(split)
has_equal_splits = all([size == split[0] for size in split])
input_nodes, input_names, input_types = self._get_input_tensors(node)
if num_splits == 1:
if node.name in [feature.name for feature in self.get_spec().description.output]:
layer = self._get_builder().add_activation(
name=node.name,
non_linearity='LINEAR',
input_name=input_names[-1],
output_name=node.name,
params=(1.0, 0.0))
shapes.propagate_single_layer(layer, self.tensor_shapes)
else:
self.op_tensor_map[node.name] = [input_names[-1]]
return
# Split output is a tuple. We need to split them into a list of tensors
output_names = [(node.name + '_' + str(i)) for i in range(num_splits)]
if node.name in self.op_tensor_map:
raise ValueError(
'[SSAConverter] split node %s should not be visited twice.' % node.name)
self.op_tensor_map[node.name] = output_names
tensor_id = -1 if node.op == 'Split' else 0
if has_equal_splits:
layer = self._get_builder().add_split_nd(
name=node.name,
input_name=input_names[tensor_id],
output_names=output_names,
axis=axis,
num_splits=num_splits)
else:
layer = self._get_builder().add_split_nd(
name=node.name,
input_name=input_names[tensor_id],
output_names=output_names,
axis=axis,
split_sizes=list(split))
if not has_equal_splits:
for i, name in enumerate(output_names):
self.tensor_shapes[name] = self._get_tensor_shape_from_type(node.datatype.T[i])
else:
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_identity(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
if node.name in [feature.name for feature in self.get_spec().description.output]:
layer = self._get_builder().add_activation(
name=node.name,
non_linearity='LINEAR',
input_name=input_names[0],
output_name=node.name,
params=(1.0, 0.0))
shapes.propagate_single_layer(layer, self.tensor_shapes)
else:
self.op_tensor_map[node.name] = [input_names[-1]]
def _convert_tensorarray_size(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 1)
builder = self._get_builder()
layer = builder.add_get_shape(
name=node.name + '_full_shape',
input_name=input_names[0],
output_name=node.name + '_full_shape')
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_slice_static(
name=node.name,
input_name=node.name + '_full_shape',
output_name=node.name,
begin_ids=[0],
end_ids=[1],
begin_masks=[False],
end_masks=[False],
strides=[1])
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_tensorarray_gather(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 2)
layer = self._get_builder().add_gather(
name=node.name, input_names=input_names[::-1], output_name=node.name, axis=0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_pack(self, node):
axis = node.attr.get('axis')
axis = axis if axis else 0
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) == 1:
if _is_scalar(input_types[0]): # skip /identity op in this case
self.op_tensor_map[node.name] = input_names
else:
layer = self._get_builder().add_expand_dims(
name=node.name, input_name=input_names[0], output_name=node.name, axes=[0])
else:
if all([_is_scalar(input_type) for input_type in input_types]):
layer = self._get_builder().add_concat_nd(
name=node.name, input_names=input_names, output_name=node.name, axis=axis)
else:
layer = self._get_builder().add_stack(
name=node.name, input_names=input_names, output_name=node.name, axis=axis)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_unpack(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
output_names = [(node.name + '_' + str(i) + '_') for i in range(len(node.datatype.T))]
self.op_tensor_map[node.name] = output_names
num_splits = node.attr['num']
axis = int(node.attr['axis'])
interm_output_names = [name + '_unsqueezed_' for name in output_names]
layer = self._get_builder().add_split_nd(
name=node.name, input_name=input_names[0], output_names=interm_output_names, axis=axis,
num_splits=num_splits)
shapes.propagate_single_layer(layer, self.tensor_shapes)
for in_name, out_name in zip(interm_output_names, output_names):
layer = self._get_builder().add_squeeze(
name=out_name, input_name=in_name, output_name=out_name, axes=[0])
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_gather(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
# NNSSA: [u'encoder/Variable/read', u'Placeholder', u'encoder/embedding_lookup/axis']
# CoreML Given two inputs, 'data' and 'indices', gather the slices of 'data'
axis = node.attr['axis']
layer = self._get_builder().add_gather(
name=node.name, input_names=input_names[0:2], output_name=node.name, axis=axis)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_gather_nd(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_gather_nd(
name=node.name,
input_names=input_names,
output_name=node.name
)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_scatter_nd(self, node):
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
indices, updates, shape = input_names
if input_nodes[2].value:
output_shape = input_nodes[2].value.val
layer = self._get_builder().add_fill_static(
name=node.name + '_tmp',
output_name=node.name + '_tmp',
output_shape=output_shape,
)
else:
layer = self._get_builder().add_fill_dynamic(
name=node.name + '_tmp',
input_name= shape,
output_name=node.name + '_tmp'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_scatter_nd(
name=node.name,
input_names=[node.name + '_tmp', indices, updates],
output_name=node.name,
mode='ADD'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_square(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_elementwise(
name=node.name, input_names=input_names * 2, output_name=node.name, mode='MULTIPLY')
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_neg(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_elementwise(
name=node.name, input_names=[input_names[0]], output_name=node.name, mode='MULTIPLY', alpha=-1.0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_conv2d(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
weight = None
bias = None
if len(input_names) == 1:
weight = node.attr.get('W', node.attr.get('W_const'))
bias = node.attr.get('bias')
elif len(input_names) == 2:
input_names = [input_names[0]]
if input_nodes[1].op == 'Const':
weight = input_nodes[1].value.val
bias = node.attr.get('bias')
if weight is None:
raise NotImplementedError(
'[SSAConverter] Dynamic weights in convolution not implemented')
dilations_factors = node.attr.get('dilations', [1, 1, 1, 1])
assert len(weight.shape) == 4, 'Conv2d: weight parameter not rank 4'
data_format = node.attr.get('data_format', 'NHWC')
conv_input_name = input_names[0]
conv_output_name = node.name
builder = self._get_builder()
if data_format == 'NHWC' or data_format == 'NHWC_format_inserted':
stride_height = node.attr.get('strides', [1, 1, 1, 1])[1]
stride_width = node.attr.get('strides', [1, 1, 1, 1])[2]
else:
stride_height = node.attr.get('strides', [1, 1, 1, 1])[-2]
stride_width = node.attr.get('strides', [1, 1, 1, 1])[-1]
border_mode = node.attr.get('padding').lower()
groups = 1
kernel_height, kernel_width, kernel_channels, output_channels = weight.shape
if node.op == 'DepthwiseConv2dNative':
depth_multiplier = weight.shape[3]
weight = np.reshape(weight,
(kernel_height, kernel_width, 1, kernel_channels * depth_multiplier))
output_channels = kernel_channels * depth_multiplier
groups = kernel_channels
kernel_channels = 1
pad_h = node.attr.get('pad_h', [0, 0])
pad_w = node.attr.get('pad_w', [0, 0])
paddings_before = node.attr.get('_paddings_before', None)
if paddings_before:
layer = builder.add_padding(
name=node.name + '_paddings_before',
left=paddings_before[0],
right=paddings_before[1],
top=paddings_before[2],
bottom=paddings_before[3],
value=0,
input_name=conv_input_name,
output_name=node.name + '_paddings_before'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
builder.add_convolution(
name=conv_output_name,
kernel_channels=kernel_channels,
output_channels=output_channels,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=groups,
W=weight,
b=bias,
has_bias=(bias is not None),
is_deconv=False,
output_shape=None,
input_name=conv_input_name if not paddings_before else node.name + '_paddings_before',
output_name=conv_output_name,
dilation_factors=dilations_factors,
padding_bottom=pad_h[0],
padding_top=pad_h[1],
padding_left=pad_w[0],
padding_right=pad_w[1]
)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_pool(self, node, layer_type):
input_nodes, input_names, input_types = self._get_input_tensors(node)
data_format = node.attr.get('data_format', 'NHWC')
kernel_sizes = node.attr.get('ksize', [1, 1, 1, 1])
stride_sizes = node.attr.get('strides', [1, 1, 1, 1])
padding_type = node.attr.get('padding')
global_pooling = node.attr.get('global_pooling', False)
if data_format == 'NHWC' or data_format == 'NHWC_format_inserted':
kernel_height = kernel_sizes[1]
kernel_width = kernel_sizes[2]
stride_height = stride_sizes[1]
stride_width = stride_sizes[2]
else:
kernel_height = kernel_sizes[-2]
kernel_width = kernel_sizes[-1]
stride_height = stride_sizes[-2]
stride_width = stride_sizes[-1]
self._get_builder().add_pooling(
name=node.name,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
layer_type=layer_type,
padding_type=padding_type,
input_name=input_names[0],
output_name=node.name,
exclude_pad_area=True,
is_global=global_pooling
)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_maxpool(self, node):
self._convert_pool(node, 'MAX')
def _convert_avgpool(self, node):
self._convert_pool(node, 'AVERAGE')
def _convert_reshape(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
if _is_scalar(node.datatype) and self._get_tensor_shape_from_type(input_types[0]) == (1,): # skip/identity op in that case
self.op_tensor_map[node.name] = [input_names[0]]
elif self._get_tensor_shape_from_type(input_types[0]) == self._get_tensor_shape_from_type(node.datatype) \
and sum([i < 0 for i in self._get_tensor_shape_from_type(node.datatype)]) <= 1:
# in this case reshape is not changing the shape
self.op_tensor_map[node.name] = [input_names[0]]
elif (builtins.is_tensor(node.datatype) and
sum([i < 0 for i in self._get_tensor_shape_from_type(node.datatype)]) <= 1):
output_shape = self._get_tensor_shape_from_type(node.datatype)
layer = self._get_builder().add_reshape_static(
name=node.name,
input_name=input_names[0],
output_name=node.name,
output_shape=output_shape)
shapes.propagate_single_layer(layer, self.tensor_shapes)
else:
layer = self._get_builder().add_reshape_dynamic(
name=node.name, input_names=input_names, output_name=node.name)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_matrix_band_part(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
assert (len(input_names) == 3)
assert all([x.op == 'Const' for x in input_nodes[-2:]])
lower = input_nodes[1].value.val
upper = input_nodes[2].value.val
builder = self._get_builder()
builder.add_matrix_band_part(
name = node.name,
input_name= input_names[0],
output_name=node.name,
num_lower=lower,
num_upper=upper)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_argmax(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
axis = node.attr['reduction_indices'][0]
layer = self._get_builder().add_argmax(
name=node.name,
input_name=input_names[0],
output_name=node.name,
axis=axis,
keepdims=node.attr.get("keep_dims", False))
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_argmin(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
axis = node.attr['reduction_indices'][0]
layer = self._get_builder().add_argmin(
name=node.name,
input_name=input_names[0],
output_name=node.name,
axis=axis,
keepdims=node.attr.get("keep_dims", False))
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_reverse(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
reverse_axes = input_nodes[1].value.val
rank = len(self.tensor_shapes[input_names[0]])
reverse_dim = [False] * rank
for axis in reverse_axes:
reverse_dim[axis] = True
layer = self._get_builder().add_reverse(
name=node.name, input_name=input_names[0], output_name=node.name, reverse_dim=reverse_dim)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_expand_dims(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
if _is_scalar(input_types[0]): # skip/identity op in that case
input_nodes[0].datatype = builtins.tensor(input_types[0], (1,))
self.op_tensor_map[node.name] = [input_names[0]]
if len(input_names) == 2 and input_nodes[1].value.val is None:
raise NotImplementedError("[SSAConverter] Cannot handle dynamic expandDims")
axes = input_nodes[1].value.val
axes = list(axes) if isinstance(axes, Iterable) else [axes]
layer = self._get_builder().add_expand_dims(
name=node.name, input_name=input_names[0], output_name=node.name, axes=axes)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_squeeze(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
axes = node.attr["squeeze_dims"]
layer = self._get_builder().add_squeeze(
name=node.name,
input_name=input_names[0],
output_name=node.name,
axes=axes)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_cast(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_round(
name=node.name,
input_name=input_names[0],
output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_reverse_sequence(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
batch_axis = node.attr['batch_dim']
seq_axis = node.attr['seq_dim']
layer = self._get_builder().add_reverse_sequence(
name=node.name,
input_names=input_names,
output_name=node.name,
batch_axis=batch_axis,
seq_axis=seq_axis)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_embedding(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
weight = None
if len(input_names) == 1:
weight = node.attr.get('W')
elif len(input_names) == 2 and input_nodes[1].op == 'Const':
weight = input_nodes[1].value.val # (batch, depth, out_channels)
if weight is None:
raise ValueError('[SSAConverter] Unable to handle dynamic embedding')
out_channels = weight.shape[-1]
depth = node.attr['depth']
weight = weight.reshape([depth, out_channels]).transpose((1, 0))
expanddim_name = node.name + '_expandim_'
layer = self._get_builder().add_expand_dims(
name=expanddim_name, input_name=input_names[0], output_name=expanddim_name, axes=[-1])
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_embedding_nd(
name=node.name,
input_name=expanddim_name,
output_name=node.name,
vocab_size=depth,
embedding_size=out_channels,
W=weight)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_tile(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
reps = input_nodes[1].value.val
layer = self._get_builder().add_tile(
name=node.name,
input_name=input_names[0],
output_name=node.name,
reps=reps
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_lstm_block_cell(self, node):
assert len(node.inputs) == 5
input_nodes, input_names, input_types = self._get_input_tensors(node)
x, w_name, b_name, h_prev, c_prev = input_names
weight = input_nodes[1].value.val
bias = input_nodes[2].value.val
builder = self._get_builder()
def igfo_to_ifog(data):
i, g, f, o = np.split(data, 4, axis=-1)
return np.concatenate([i, f, o, g], axis=-1)
hidden_size = weight.shape[-1] // 4
input_size = weight.shape[0] - hidden_size
W_h_fw = weight[input_size:, :4 * hidden_size]
W_h_fw = igfo_to_ifog(W_h_fw)
W_h_fw = np.transpose(W_h_fw, [1, 0])
W_h_fw = np.ascontiguousarray(W_h_fw)
W_h_fw = np.split(W_h_fw, 4, axis=0)
W_x_fw = weight[:input_size, :4 * hidden_size]
W_x_fw = igfo_to_ifog(W_x_fw)
W_x_fw = np.transpose(W_x_fw, [1, 0])
W_x_fw = np.ascontiguousarray(W_x_fw)
W_x_fw = np.split(W_x_fw, 4, axis=0)
b_fw = bias[:4 * hidden_size]
b_fw = igfo_to_ifog(b_fw)
b_fw = np.split(b_fw, 4, axis=-1)
forget_bias = node.attr.get('forget_bias')
has_forget_bias = forget_bias and forget_bias != 0.0
if has_forget_bias:
b_fw[1] += forget_bias
layer = builder.add_expand_dims(
name=node.name + '_in_expand',
input_name=x,
output_name=node.name + '_in_expand',
axes=[-1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_expand_dims(
name=node.name + '_h_prev_expand',
input_name=h_prev,
output_name=node.name + '_h_prev_expand',
axes=[0, -1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_expand_dims(
name=node.name + '_c_prev_expand',
input_name=c_prev,
output_name=node.name + '_c_prev_expand',
axes=[0, -1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_unilstm(
name=node.name + '_lstm',
W_h=W_h_fw,
W_x=W_x_fw,
b=b_fw,
hidden_size=hidden_size,
input_size=input_size,
input_names=[
node.name + '_in_expand',
node.name + '_h_prev_expand',
node.name + '_c_prev_expand'
],
output_names=[
node.name + '_lstm_out',
node.name + '_lstm_h',
node.name + '_lstm_c',
],
forget_bias=has_forget_bias,
output_all=True,
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_squeeze(
name=node.name + '_out',
input_name=node.name + '_lstm_out',
output_name=node.name + '_out',
axes=[-1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_copy(
name=node.name + '_temp_h',
input_name=node.name + '_lstm_out',
output_name=node.name + '_temp_h'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
# workaround: Core ML LSTM layer outputs the states on last sequence
layer = builder.add_broadcast_to_like(
name=node.name + '_temp_c',
input_names=[node.name + '_lstm_c', node.name + '_lstm_out'],
output_name=node.name + '_temp_c',
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_squeeze(
name=node.name + '_h',
input_name=node.name + '_temp_h',
output_name=node.name + '_h',
axes=[-1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_squeeze(
name=node.name + '_c',
input_name=node.name + '_temp_c',
output_name=node.name + '_c',
axes=[-1, -2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
self.op_tensor_map[node.name] = [
node.name + '_out', node.name + '_h', node.name + '_c'
]
def _convert_constant_pad(self, node):
# operator Pad has 2 inputs, PadV2 has 3 inputs
assert len(node.inputs) == 2 or len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
constant_value = 0
if len(node.inputs) == 3:
constant_value = input_nodes[2].value.val
if constant_value == -np.inf:
INT_MIN = - np.iinfo(np.int64).max - 1
constant_value = np.float(INT_MIN)
if constant_value == np.inf:
INT_MAX = np.iinfo(np.int64).max
constant_value = np.float(INT_MAX)
# this layer takes at most 2 inputs
input_names = input_names[:2]
layer = self._get_builder().add_constant_pad(
name=node.name,
input_names=input_names,
output_name=node.name,
value=constant_value
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_mirror_pad(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
paddings = input_nodes[1].value.val # rank 4, nhwc
left, right = paddings[2][0], paddings[2][1]
top, bottom = paddings[1][0], paddings[1][1]
if node.attr.get('mode', '').lower() == 'symmetric':
warn('[SSAConverter]Warning: Symmetric MirrorPad is not supported'
'but can be approximated with non-symmetric padding in some'
'cases. Conversion will continue, but expect some loss'
'of model accuracy.')
builder = self._get_builder()
layer = builder.add_padding(
name=node.name,
left=left,
right=right,
top=top,
bottom=bottom,
input_name=input_names[0],
output_name=node.name,
padding_type='reflection'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_topk(self, node):
assert len(node.inputs) == 2
if node.attr.get('sorted') is False:
raise NotImplementedError('sorted should be set to True.')
input_nodes, input_names, input_types = self._get_input_tensors(node)
k = input_nodes[1].value.val
output_names = [(node.name + '_' + str(i)) for i in range(2)]
layer = self._get_builder().add_topk(
name=node.name,
input_names=[input_names[0]],
output_names=output_names,
k=k,
axis=-1
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
self.op_tensor_map[node.name] = output_names
def _convert_unary_log_softmax(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
axis = -1 if 'axis' not in node.attr else node.attr['axis']
layer = self._get_builder().add_reduce_logsumexp(
name=node.name + "_logsumexp",
input_name=input_names[0],
output_name=node.name + "_logsumexp",
axes=[axis],
keepdims=True,
reduce_all=False
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = self._get_builder().add_subtract_broadcastable(
name=node.name,
input_names=input_names + [node.name + "_logsumexp"],
output_name=node.name
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_inverse(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_unary(
name=node.name,
input_name=input_names[0],
output_name=node.name,
mode='inverse'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_batchnorm(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
if 'gamma' not in node.attr or 'beta' not in node.attr:
raise ValueError('BatchNorm node must have attributes \'gamma\' and \'beta\'')
gamma = node.attr.get('gamma')
num_channels = len(gamma)
beta = node.attr.get('beta')
mean = node.attr.get('mean', np.zeros((num_channels,)))
variance = node.attr.get('variance', np.ones((num_channels,)))
layer = self._get_builder().add_batchnorm(
name=node.name,
channels=num_channels,
gamma=gamma,
beta=beta,
mean=mean,
variance=variance,
input_name=input_names[0],
output_name=node.name
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_common(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
op = node.op.lower() # type of the unary operator
if op in ['sqrt', 'rsqrt', 'exp', 'log', 'abs']:
layer = self._get_builder().add_unary(
name=node.name, input_name=input_names[0], output_name=node.name, mode=op)
else:
# same function name for TensorFlow and Core ML
func = getattr(self._get_builder(), 'add_' + op)
layer = func(name=node.name, input_name=input_names[0], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_trigonometric(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
op = node.op.lower() # type of the unary operator
# assumes TensorFlow and Core ML has same op name
func = getattr(self._get_builder(), 'add_' + op)
layer = func(name=node.name, input_name=input_names[0], output_name=node.name)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_activation(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
op = node.op.upper() # type of the unary operator
params = None
if op in ['LEAKYRELU']:
params = ([node.attr['alpha']])
elif op in ['ELU']:
params = 1.0
layer = self._get_builder().add_activation(
name=node.name,
input_name=input_names[0],
output_name=node.name,
non_linearity=op,
params=params
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_unary_activation_relu6(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
builder = self._get_builder()
layer = builder.add_activation(
name=node.name + '_relu',
input_name=input_names[0],
output_name=node.name + '_relu',
non_linearity='RELU',
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_clip(
name=node.name,
input_name=node.name + '_relu',
output_name=node.name,
max_value=6.0
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_gelu(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
# Core ML has 3 modes: EXACT, TANH_APPROXIMATION, SIGMOID_APPROXIMATION
layer = self._get_builder().add_gelu(
name=node.name,
input_name=input_names[0],
output_name=node.name,
mode='TANH_APPROXIMATION')
output_shape = self._get_tensor_shape_from_type(node.datatype)
shapes.propagate_single_layer(layer, self.tensor_shapes,
output_shapes=[output_shape])
def _convert_reduction(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) == 2:
axes = np.array(input_nodes[1].value.val).flatten()
reduction_indices = list(axes) if isinstance(axes, Iterable) else [axes]
elif 'reduction_indices' in node.attr:
reduction_indices = node.attr['reduction_indices']
else:
reduction_indices = node.attr['axis']
if 'keep_dims' in node.attr:
keepdims = node.attr['keep_dims']
else:
keepdims = node.attr['keepdims']
op = node.op.lower() # type of the unary operator
if op in ['all', 'any']:
op = 'prod' if op == 'all' else 'sum'
func = getattr(self._get_builder(), 'add_reduce_' + op)
layer = func(
name=node.name,
input_name=input_names[0],
output_name=node.name,
axes=reduction_indices,
keepdims=keepdims,
reduce_all=not reduction_indices
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_resize_bilinear(self, node):
# In TF, ResizeBilinear requires channel-last image axis order
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) == 2 and input_nodes[1].op == 'Const':
target_size = input_nodes[1].value.val
else:
raise ValueError('[SSAConverter] Unable to determine target size'
'for ResizeBilinear')
mode = 'STRICT_ALIGN_ENDPOINTS_MODE' if node.attr.get(
'align_corners', False) else 'UPSAMPLE_MODE'
builder = self._get_builder()
layer = builder.add_resize_bilinear(
name=node.name,
input_name=input_names[0],
output_name=node.name,
target_height=target_size[0],
target_width=target_size[1],
mode=mode)
output_shape = self._get_tensor_shape_from_type(node.datatype)
shapes.propagate_single_layer(layer, self.tensor_shapes,
output_shapes=[output_shape])
def _convert_resize_nearest_neighbor(self, node):
# In TF, ResizeNearestNeighbor requires channel-last image axis order
# During conversion, NNSSA's output shape should have been modified
# to NCHW in transform_nhwc_to_nchw()
input_nodes, input_names, input_types = self._get_input_tensors(node)
if len(input_names) == 2 and input_nodes[1].op == 'Const':
target_size = input_nodes[1].value.val
else:
raise ValueError('[SSAConverter] Unable to determine target size'
'for ResizeNearestNeighbor')
try:
input_shape = self._get_tensor_shape_from_type(input_types[0])
except:
input_shape = None
if input_shape is None or len(input_shape) != 4:
raise ValueError('[SSAConverter] ResizeNearestNeighbor has invalid'
'input shape {}'.format(input_shape))
if target_size[0] < input_shape[2] and target_size[1] < input_shape[3]:
self._convert_resize_bilinear(node)
elif target_size[0] > input_shape[2] and target_size[1] > input_shape[3]:
if (target_size[0] % input_shape[2] > 0 or
target_size[1] % input_shape[3] > 0):
raise ValueError('[SSAConverter] Unsupported fractional'
'nearest-neighbor upsampling')
scaling_factor_h = int(target_size[0] / input_shape[2])
scaling_factor_w = int(target_size[1] / input_shape[3])
if scaling_factor_h <= 0 or scaling_factor_w <= 0:
raise ValueError('[SSAConverter] Invalid scaling factor.')
if node.attr.get('align_corners', False) is True:
raise ValueError('[SSAConverter] CoreML does not support '
'ResizeNearestNeighbor with align_core.')
builder = self._get_builder()
layer = builder.add_upsample(
name=node.name,
scaling_factor_h=scaling_factor_h,
scaling_factor_w=scaling_factor_w,
input_name=input_names[0],
output_name=node.name,
mode='NN')
output_shape = self._get_tensor_shape_from_type(node.datatype)
shapes.propagate_single_layer(layer, self.tensor_shapes,
output_shapes=[output_shape])
else:
raise NotImplementedError("[SSAConverter] Unsupported resizing option.")
def _convert_layer_normalization(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
input_name = input_names[0]
builder = self._get_builder()
gamma = node.attr['gamma']
beta = node.attr['beta']
axes = node.attr['axes']
epsilon = node.attr['epsilon']
input_shape = list(input_types[0].get_shape())
if (len(input_shape) in [2, 3] and len(axes) == 1 and \
axes[0] == len(input_shape) - 1):
# Performance enhancement for some models with layer-norm
builder.add_reshape_static(name=input_name + '_reshape',
input_name=input_name,
output_name=input_name + '_reshape',
output_shape=input_shape + [1, 1])
builder.add_mvn(name=input_name + '_mvn',
input_name=input_name + '_reshape',
output_name=input_name + '_mvn', across_channels=True,
normalize_variance=True, epsilon=epsilon)
builder.add_scale(name=node.name + '_5d',
input_name=input_name + '_mvn',
output_name=node.name + '_5d', W=gamma, b=beta, has_bias=True,
shape_scale=[len(gamma)], shape_bias=[len(beta)])
builder.add_reshape_static(name=node.name,
input_name=node.name + '_5d',
output_name=node.name,
output_shape=input_shape)
else:
# General implementation
input_shape = input_types[0].get_shape()
rdims = len(axes)
normalized_shape = node.datatype.get_shape()[-rdims:]
if gamma.shape != normalized_shape:
gamma = np.zeros(normalized_shape) + gamma
if beta.shape != normalized_shape:
beta = np.zeros(normalized_shape) + beta
builder.add_layer_normalization(node.name, input_name, node.name,
normalized_shape, gamma, beta, eps=1e-5)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(
node.datatype)
def _convert_binary(self, node):
"""
Convert binary operator
- Attempts to add elementwise operator if possible
- Otherwise, inserts broadcastable operator
"""
def _is_elementwise_scalar_check(input_type):
"""
Checks if element is scalar
- A scalar
- 0-D tensor
- 1-D tensor with only one element
"""
if _is_scalar(input_type):
return True
shape = input_type.get_shape()
if builtins.is_tensor(input_type) and len(shape) == 1 and shape[0] == 1:
return True
return False
# CoreML elementwise operator has limited brodcastable support
# Check if first shape can be broadcasted to second shape
def _is_broadcastable_shape(shape_0, shape_1):
assert (len(shape_0) > 0 and len(shape_1) > 0)
if shape_0[0] != 1 and shape_0[0] != shape_1[0]:
return False
if shape_0[1:] == [1] * (len(shape_0)-1):
return True
return False
def _convert_binary_elementwise(node):
"""
Adds binary elementwise operator
- Returns True if successful
- Otherwise returns False
"""
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
builder = self._get_builder()
elementwise_support = {'add', 'addv2', 'sub', 'mul', 'realdiv'}
op = node.op.lower()
if op not in elementwise_support:
return False
# If any of the input is dynamic, cannot add Elementwise operator
for _input in input_types:
if -1 in self._get_tensor_shape_from_type(_input):
return False
alpha = None
inputs = []
if input_nodes[1].op == 'Const' and _is_elementwise_scalar_check(input_types[1]):
# Note alpha is second input is scalar
alpha = input_nodes[1].value.val
inputs = [input_names[0]]
elif input_nodes[0].op == 'Const' and _is_elementwise_scalar_check(input_types[0]):
# Note alpha is first input is scalar
alpha = input_nodes[0].value.val
inputs = [input_names[1]]
else:
# If both inputs are not scalar, ensure shape is same
# If any of the input is not tensor, add broadcastable layer instead
if not (builtins.is_tensor(input_types[0]) and builtins.is_tensor(input_types[1])):
return False
shape_0 = list(input_types[0].get_shape())
shape_1 = list(input_types[1].get_shape())
# Make sure, any of the input is not rank-0
if len(shape_0) == 0 or len(shape_1) == 0:
return False
if _is_broadcastable_shape(shape_0, shape_1) or _is_broadcastable_shape(shape_1, shape_0):
pass
# NOTE: Special case, one of the input has multiple 1 dims and same shape
# e.g. (1, 4, 5) and (4, 5): in this case, we can expand second
# input to make equivalent to (1, 4, 5)
elif abs(len(shape_0) - len(shape_1)) > 0:
small_index = 1 if len(shape_0) > len(shape_1) else 0
# Switch shape and make first shape smaller to infer axis information
if small_index == 1:
shape_0, shape_1 = shape_1, shape_0
same_shape_index = len(shape_1) - len(shape_0)
shape_temp = [1] * same_shape_index + shape_0
if shape_temp != shape_1:
return False
# Extend one of the input to allow use of elementwise operator
layer = builder.add_expand_dims(name=node.name+'_'+input_names[small_index]+'_'+'_expand_dims',
input_name=input_names[small_index],
output_name=input_names[small_index]+'_expanded',
axes=list(range(same_shape_index)))
shapes.propagate_single_layer(layer, self.tensor_shapes)
input_names[small_index] += '_expanded'
elif shape_0 != shape_1:
return False
inputs = input_names
# Div operation cannot be simulated with more than one input and
# without Alpha
if op == 'realdiv' and alpha is None:
return False
if op == 'realdiv':
# Inverse Alpha to simulate DIV using MUL operator
if alpha is None:
raise ValueError("Incorrect configuration!! Alpha not provided for Elementwise Div operator")
alpha = 1 / float(alpha)
elif op == 'sub':
if alpha and inputs[0] == input_names[0]:
alpha = -alpha
else:
neg_index = 1
if alpha:
neg_index = 0
layer = builder.add_elementwise(name=node.name+'_'+inputs[neg_index]+'_neg',
input_names=[inputs[neg_index]],
output_name=inputs[neg_index]+'_neg',
mode='MULTIPLY',
alpha=-1.0)
inputs[neg_index] += '_neg'
shapes.propagate_single_layer(layer, self.tensor_shapes)
# map certain ops to different but equivalent ops
mapping_op = {'ADDV2':'ADD', 'SUB':'ADD', 'REALDIV':'MULTIPLY', 'MUL':'MULTIPLY'}
op = op.upper()
op = mapping_op.get(op, op)
layer = builder.add_elementwise(name=node.name,
input_names=inputs,
output_name=node.name,
mode=op,
alpha=alpha)
shapes.propagate_single_layer(layer, self.tensor_shapes)
return True
# Try to add Elementwise operator if possible,
# If configuration not supported, insert broadcastable operator instead
if not _convert_binary_elementwise(node):
self._convert_binary_broadcastable(node)
def _convert_binary_broadcastable(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
builder = self._get_builder()
op = node.op.lower() # type of the unary operator
compare_greater_ops = {'greater', 'greaterequal'}
compare_equal_ops = {'equal', 'notequal'}
compare_less_ops = {'less', 'lessequal'}
logical_ops = {'logicaland': 'AND', 'logicalor': 'OR'}
math_ops = {'sub': 'subtract', 'mul': 'multiply', 'realdiv': 'divide',
'floordiv': 'floor_div', 'maximum': 'max', 'minimum': 'min',
'biasadd': 'add', 'pow': 'pow', 'addv2': 'add'}
if op in compare_greater_ops:
layer = builder.add_greater_than(
name=node.name,
input_names=input_names,
output_name=node.name,
use_greater_than_equal='equal' in op
)
elif op in compare_equal_ops:
op = 'not_equal' if op == 'notequal' else op
func = getattr(builder, 'add_' + op)
layer = func(
name=node.name,
input_names=input_names,
output_name=node.name
)
elif op in compare_less_ops:
layer = builder.add_less_than(
name=node.name,
input_names=input_names,
output_name=node.name,
use_less_than_equal='equal' in op
)
elif op in logical_ops.keys():
layer = self._get_builder().add_logical(
name=node.name,
input_names=input_names,
output_name=node.name,
mode=logical_ops[op]
)
elif op in math_ops.keys():
func = getattr(builder, 'add_' + math_ops[op] + '_broadcastable')
layer = func(
name=node.name,
input_names=input_names,
output_name=node.name
)
else: # same function name for TensorFlow and Core ML
func = getattr(builder, 'add_' + op + '_broadcastable')
layer = func(
name=node.name,
input_names=input_names,
output_name=node.name
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_fill(self, node):
assert len(node.inputs) == 2
input_nodes, input_names, input_types = self._get_input_tensors(node)
value = input_nodes[1].value.val
layer = self._get_builder().add_fill_dynamic(name=node.name,
input_name=input_names[0],
output_name=node.name,
value=value)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_iff(self, node):
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
layer = self._get_builder().add_branch(name=node.name,
input_name=input_names[0])
ifbranch = NeuralNetworkBuilder(nn_spec=layer.branch.ifBranch,
disable_rank5_shape_mapping=True)
ifbranch.add_activation(name=node.name + "_if_",
non_linearity='LINEAR',
input_name=input_names[1],
output_name=node.name,
params=(1.0, 0.0))
elsebranch = NeuralNetworkBuilder(nn_spec=layer.branch.elseBranch,
disable_rank5_shape_mapping=True)
elsebranch.add_activation(name=node.name + "_else_",
non_linearity='LINEAR',
input_name=input_names[2],
output_name=node.name,
params=(1.0, 0.0))
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_reorganize_data(self, node):
assert len(node.inputs) == 1
input_nodes, input_names, input_types = self._get_input_tensors(node)
block_size = node.attr.get('block_size', 2)
if node.op == 'SpaceToDepth':
mode = 'SPACE_TO_DEPTH'
else: # node.op == 'DepthToSpace':
mode = 'DEPTH_TO_SPACE'
builder = self._get_builder()
layer = builder.add_reorganize_data(
name=node.name,
input_name=input_names[0],
output_name=node.name,
mode=mode,
block_size=block_size
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_space_to_batch_nd(self, node):
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
block_shape = input_nodes[1].value.val
if len(block_shape.flatten()) != 2 or block_shape[0] != block_shape[1]:
raise NotImplementedError('non-equal block shape is not yet supported')
paddings = input_nodes[2].value.val
needs_paddings = any(paddings.flatten())
builder = self._get_builder()
layer = builder.add_transpose(
name=node.name + '_transpose1',
input_name=input_names[0],
output_name=node.name + '_transpose1',
axes=[3, 0, 1, 2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
if needs_paddings:
left, right = paddings[1][0], paddings[1][1]
top, bottom = paddings[0][0], paddings[0][1]
layer = builder.add_padding(
name=node.name + '_padding',
left=left,
right=right,
top=top,
bottom=bottom,
input_name=node.name + '_transpose1',
output_name=node.name + '_padding'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_reorganize_data(
name=node.name + '_reorganize',
input_name=node.name + '_transpose1' if not needs_paddings else node.name + '_padding',
output_name=node.name + '_reorganize',
mode='space_to_depth'.upper(),
block_size=block_shape[0]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_transpose(
name=node.name,
input_name=node.name + '_reorganize',
output_name=node.name,
axes=[1, 2, 3, 0]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_batch_to_space_nd(self, node):
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
block_shape = input_nodes[1].value.val
if block_shape[0] != block_shape[1]:
raise NotImplementedError('non-equal block shape is not yet supported')
crops = input_nodes[2].value.val
needs_cropping = any(crops.flatten())
builder = self._get_builder()
layer = builder.add_transpose(
name=node.name + '_transpose1',
input_name=input_names[0],
output_name=node.name + '_transpose1',
axes=[3, 0, 1, 2]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_reorganize_data(
name=node.name + '_reorganize',
input_name=node.name + '_transpose1',
output_name=node.name + '_reorganize',
mode='depth_to_space'.upper(),
block_size=block_shape[0]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
if needs_cropping:
left, right = crops[1][0], crops[1][1]
top, bottom = crops[0][0], crops[0][1]
layer = builder.add_crop(
name=node.name + '_cropping',
left=left,
right=right,
top=top,
bottom=bottom,
offset=0,
input_names=[node.name + '_reorganize'],
output_name=node.name + '_cropping'
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
layer = builder.add_transpose(
name=node.name,
input_name=node.name + '_reorganize' if not needs_cropping else node.name + '_cropping',
output_name=node.name,
axes=[1, 2, 3, 0]
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_conv2d_transpose(self, node):
assert len(node.inputs) == 3
input_nodes, input_names, input_types = self._get_input_tensors(node)
input_name = input_names[2]
weight = input_nodes[1].value.val
bias = node.attr.get('bias')
strides = node.attr.get('strides')
border_mode = node.attr.get('padding').lower()
stride_height = strides[1]
stride_width = strides[2]
kernel_channels = input_types[-1].get_shape()[1]
output_channels = node.datatype.get_shape()[1]
self._get_builder().add_convolution(
name=node.name,
kernel_channels=kernel_channels,
output_channels=output_channels,
height=weight.shape[0],
width=weight.shape[1],
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=1,
W=np.transpose(weight, (0, 1, 3, 2)),
b=bias,
has_bias=(bias is not None),
is_deconv=True,
output_shape=None,
input_name=input_name,
output_name=node.name
)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_lrn(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
alpha = node.attr.get('alpha')
beta = node.attr.get('beta')
bias = node.attr.get('bias')
depth_radius = node.attr.get('depth_radius')
n_channels = self._get_tensor_shape_from_type(input_types[-1])[-1]
if node.attr.get('data_format') == 'NHWC_format_inserted':
n_channels = self._get_tensor_shape_from_type(input_types[-1])[1]
layer = self._get_builder().add_lrn(
name=node.name,
input_name=input_names[0],
output_name=node.name,
alpha=alpha * n_channels,
beta=beta,
local_size=depth_radius,
k=bias
)
shapes.propagate_single_layer(layer, self.tensor_shapes)
def _convert_clip(self, node):
input_nodes, input_names, input_types = self._get_input_tensors(node)
min_value = input_nodes[1].value.val
max_value = input_nodes[2].value.val
layer = self._get_builder().add_clip(name=node.name,
input_name=input_names[0],
output_name=node.name,
min_value=min_value,
max_value=max_value)
self.tensor_shapes[node.name] = self._get_tensor_shape_from_type(node.datatype)
def _convert_zeros_like(self, node):
""" Convert a ZerosLike node.
"""
input_nodes, input_names, input_types = self._get_input_tensors(node)
shape = input_types[0].get_shape()
builder = self._get_builder()
if -1 not in shape:
# We can use fill static or load constant as shape is known
val = np.zeros(shape)
if len(shape) == 0:
val = np.array([0])
layer = builder.add_load_constant_nd(
name=node.name, output_name=node.name, constant_value=val, shape=val.shape)
else:
# Insert dynamic zeros like
layer = builder.add_fill_like(
name=node.name, input_name=input_names[0], output_name=node.name, value=0.0)
shapes.propagate_single_layer(layer, self.tensor_shapes)
``` |
{
"source": "jlhbaseball15/DIVA",
"score": 2
} |
#### File: manuals/processes/sample_process.py
```python
from sprokit.pipeline import process
from kwiver.kwiver_process import KwiverProcess
class ClassifierProcess(KwiverProcess):
def __init__(self, conf):
KwiverProcess.__init__(self, conf)
# declare configuration
self.add_config_trait("model_file", "model_file",
'dummy.model', 'Model file for the classifier')
self.declare_config_using_trait('model_file')
# set up flags
required = process.PortFlags()
required.add(self.flag_required)
optional = process.PortFlags()
# declare ports
self.declare_input_port_using_trait('image', required)
self.declare_input_port_using_trait('file_name', optional )
self.declare_output_port_using_trait( 'double_vector', required );
def _configure(self):
# Configure the process
self.classifier = Classifier(self.config_value("model_file"))
def _step(self):
# Step Function for the process
img_container = self.grab_input_using_trait('image')
video_name = self.grab_input_using_trait('file_name')
# Classify the image
class_score = self.classifier.classify(img_container.image())
# Push results to port
self.push_to_port_using_trait('double_vector', class_score)
def __sprokit_register__():
from sprokit.pipeline import process_factory
module_name = 'python:kwiver.ClassifierSample'
if process_factory.is_process_module_loaded(module_name):
return
process_factory.add_process('ClassifierSample', 'Dummy Classifier',
ClassifierProcess)
process_factory.mark_process_module_as_loaded(module_name)
``` |
{
"source": "jlhbaseball15/nmt_chinese_to_english",
"score": 3
} |
#### File: jlhbaseball15/nmt_chinese_to_english/data_preprocessor.py
```python
import numpy as np
import os
import gzip
import pickle
from IPython import embed
import xml.etree.ElementTree as ET
class CorpusFileMapping:
def __init__(self, english_filename, chinese_filename, sentence_mappings):
self.english_filename = english_filename
self.chinese_filename = chinese_filename
self.sentence_mappings = sentence_mappings
class Sentence:
def __init__(self, sentence, tag):
self.tag = tag
self.sentence = sentence
class DatasetProcessor:
def __init__(self):
self.ChineseDictionary = {}
self.EnglishDictionary = {}
self.EnglishDataset = []
self.ChineseDataset = []
def CreateDataset(self, filename, saveDictionary=True, saveDataset=True):
sentence_mappings = self.read_sentence_mapping(filename)
self.ProcessSentenceMappings(sentence_mappings)
if saveDictionary:
self.save_dictionaries()
def LoadCorpusFiles(self, filename):
english_corpus_files = []
chinese_corpus_files = []
return english_corpus_files, chinese_corpus_files
def CloseCorpusFiles(self, files):
for f in files:
f.close()
def ProcessSentenceMappings(self, file_mappings, saveDatasets=True):
dataset_count = 0
for i, fm in enumerate(file_mappings):
print "Processing " + fm.english_filename + " and " + fm.chinese_filename
english_data = self.ProcessCorpusFile(fm.english_filename, 'English')
chinese_data = self.ProcessCorpusFile(fm.chinese_filename, 'Chinese')
english_data, chinese_data = self.AlignDatasets(english_data, chinese_data, fm.sentence_mappings)
print "Aligned " + fm.english_filename + " and " + fm.chinese_filename
self.EnglishDataset.extend(english_data)
self.ChineseDataset.extend(chinese_data)
if i % 25 == 24:
if saveDatasets:
print "Saving Dataset" + str(dataset_count)
self.saveDatasets(dataset_count)
dataset_count += 1
self.EnglishDataset = []
self.ChineseDataset = []
self.saveDatasets(dataset_count)
def read_sentence_mapping(self, xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
file_maps = []
for linkGroup in root:
english_file = linkGroup.attrib['fromDoc']
chinese_file = linkGroup.attrib['toDoc']
sentence_mappings = []
for link in linkGroup:
mapping = self.processXMLMapping(link.attrib['xtargets'])
sentence_mappings.append(mapping)
file_map = CorpusFileMapping(english_file, chinese_file, sentence_mappings)
file_maps.append(file_map)
return file_maps
def AlignDatasets(self, english_data, chinese_data, sentence_mappings):
edata = []
cdata = []
for sm in sentence_mappings:
english = []
for i in sm[0]:
try:
english.extend(english_data[i - 1])
except:
print len(english_data)
print i
chinese = []
for i in sm[1]:
chinese.extend(chinese_data[i - 1])
edata.append(english)
cdata.append(chinese)
return edata, cdata
def processXMLMapping(self, link_attrib):
english_chinese_split = link_attrib.split(';')
for s in range(len(english_chinese_split)):
if english_chinese_split[s] is '':
english_chinese_split[s] = '-1'
english_chinese_split[0] = map(int, english_chinese_split[0].split(' '))
english_chinese_split[1] = map(int, english_chinese_split[1].split(' '))
return english_chinese_split
# this will need to change based on different xml structures, but for our data set, this splits and tokenizes the sentences
def ProcessCorpusFile(self, filename, language):
with gzip.open(filename, 'rb') as f:
tree = ET.parse(f)
data = []
root = tree.getroot()
f.close()
for child in root:
sentence = []
for token in child:
if (token.tag == 'w'):
text = token.text
if language is 'English':
text = self.fix_lower_l(text)
self.add_to_dictionary(text, language)
sentence.append(text)
sentence.append("</s>")
data.append(sentence)
return data
def fix_lower_l(self, text):
if 'l' in text:
if text.replace('l', '') == text.replace('l', '').upper():
text = text.replace('l', 'I')
return text
def add_to_dictionary(self, word, language):
d = None
if language is 'English':
d = self.EnglishDictionary
elif language is 'Chinese':
d = self.ChineseDictionary
if word not in d.keys():
d[word] = len(d.keys())
def save_dictionaries(self):
with open('Chinese_Dictionary.pkl', 'wb') as f:
pickle.dump(self.ChineseDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
with open('English_Dictionary.pkl', 'wb') as f:
pickle.dump(self.EnglishDictionary, f, pickle.HIGHEST_PROTOCOL)
f.close()
def saveDatasets(self, dataset_count):
e_filename = "pickle/english_dataset_" + str(dataset_count) + ".pkl"
c_filename = "pickle/chinese_dataset_" + str(dataset_count) + ".pkl"
e_file = open(e_filename, 'wb')
c_file = open(c_filename, 'wb')
pickle.dump(self.EnglishDataset, e_file)
pickle.dump(self.ChineseDataset, c_file)
e_file.close()
c_file.close()
def main():
dp = DatasetProcessor()
dp.CreateDataset('en-zh_cn.xml')
embed()
if __name__ == '__main__':
main()
```
#### File: jlhbaseball15/nmt_chinese_to_english/model.py
```python
import numpy as np
import pickle
from IPython import embed
class Weights:
def __init__(self, input_nodes, hidden_nodes, output_nodes, uniform):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# x weight values
self.wix = np.random.uniform(-uniform, uniform, (self.input_nodes, self.hidden_nodes))
self.wgx = np.random.uniform(-uniform, uniform, (self.input_nodes, self.hidden_nodes))
self.wfx = np.random.uniform(-uniform, uniform, (self.input_nodes, self.hidden_nodes))
self.wox = np.random.uniform(-uniform, uniform, (self.input_nodes, self.hidden_nodes))
# h weight values
self.wih = np.random.uniform(-uniform, uniform, (1, self.hidden_nodes))
self.wgh = np.random.uniform(-uniform, uniform, (1, self.hidden_nodes))
self.wfh = np.random.uniform(-uniform, uniform, (1, self.hidden_nodes))
self.woh = np.random.uniform(-uniform, uniform, (1, self.hidden_nodes))
# bias terms
self.bi = np.random.uniform(-uniform, uniform, (self.hidden_nodes, 1))
self.bg = np.random.uniform(-uniform, uniform, (self.hidden_nodes, 1))
self.bf = np.random.uniform(-uniform, uniform, (self.hidden_nodes, 1))
self.bo = np.random.uniform(-uniform, uniform, (self.hidden_nodes, 1))
# output weights
self.whv = np.random.uniform(-uniform, uniform, (self.hidden_nodes, self.output_nodes))
self.bv = np.random.uniform(-uniform, uniform, (self.output_nodes, 1))
class State:
def __init__(self, i, g, f, o, s, h, v):
self.i = i
self.g = g
self.f = f
self.o = o
self.s = s
self.h = h
self.v = v
class LSTM:
def __init__(self, input_nodes, hidden_nodes, output_nodes):
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
self.weights = Weights(input_nodes, hidden_nodes, output_nodes, 0.08)
def predict(self, x, initial_h=None):
b = False
if initial_h is not None:
h = initial_h
b = True
else:
h = np.zeros((self.hidden_nodes, 1))
s = np.zeros((self.hidden_nodes, 1))
v = np.zeros((self.output_nodes, 1))
state = State(s, s, s, s, s, h, v)
states = [state]
for j, token in enumerate(x):
try:
assert token.shape == (self.input_nodes, 1)
except AssertionError:
print token.shape
g = np.tanh(np.dot(self.weights.wgx.T, token) + self.weights.wgh.T*h + self.weights.bg)
i = self.sigmoid(np.dot(self.weights.wix.T, token) + self.weights.wih.T*h + self.weights.bi)
f = self.sigmoid(np.dot(self.weights.wfx.T, token) + self.weights.wfh.T*h + self.weights.bf)
o = self.sigmoid(np.dot(self.weights.wox.T, token) + self.weights.woh.T*h + self.weights.bo)
s = g*i + f*s
h = o*s
v = self.softmax(np.dot(self.weights.whv.T, h) + self.weights.bv)
if b and j < 10:
x.append(v)
state = State(i, g, f, o, s, h, v)
states.append(state)
return states
def sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def softmax(self, x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def save_weights(self, filename):
weights_file = open(filename, 'wb')
pickle.dump(self.weights, weights_file)
weights_file.close()
def load_weights(self, filename):
f = open(filename)
self.weights = pickle.load(f)
``` |
{
"source": "jlhbaseball15/object_store_downloader",
"score": 3
} |
#### File: object_store_downloader/cosio/upload.py
```python
import os
from .helper import is_file, to_absolute_path
def upload_to_object_store(local, bucket_name, remote, transfer_manager):
local = to_absolute_path(local)
if not os.path.exists(local):
print('Path does not exist')
return
try:
if is_file(local):
local_filename = os.path.basename(local)
remote = os.path.join(remote, local_filename)
upload_file(local, bucket_name, remote, transfer_manager)
else:
upload_directory(local, bucket_name, remote, transfer_manager)
except Exception as e:
print(e)
def upload_file(local_file, bucket_name, remote_file, transfer_manager):
print('Uploading file {} to bucket {} as {}'.format(
local_file, bucket_name, remote_file))
future = transfer_manager.upload(local_file, bucket_name, remote_file)
future.result()
def upload_directory(local_directory, bucket_name, remote_directory, transfer_manager):
print('Uploading local directory {} to bucket {} as {}'.format(
local_directory, bucket_name, remote_directory))
future = transfer_manager.upload_directory(local_directory, bucket_name, remote_directory)
future.result()
``` |
{
"source": "jlhcrawford/he-toolkit",
"score": 3
} |
#### File: logistic-regression/datasets/lr_base.py
```python
import numpy as np
# Sigmoid function
def sigmoid(x):
return 1. / (1 + np.exp(-x))
# 3-degree polynomial representation of sigmoid function, effective in range of [-5, 5]
def sigmoid_poly3(x):
return 0.5 - 1.20096 * (x / 8.) + 0.81562 * (x / 8.)**3
# 4-degree polynomial representation of log(sigmoid(x)) function, effective in range of [-5, 5]
def log_sig4(x):
return 0.000527 * x**4 - 0.0822 * x**2 + 0.5 * x - 0.78
# Realign target to -1, 1 and calculate X@y'
def get_z(X, y, add1=True):
if add1:
X_ = np.concatenate([np.ones((X.shape[0], 1)), X], axis=1)
else:
X_ = np.array(X)
y_ = 2 * y - 1
z = X_ * y_[:, None]
return np.array(z)
# Compute initial weight for logistic regression
def get_initweight(X, y, add1=True):
n = X.shape[0]
z = get_z(X, y, add1)
return np.sum(z, axis=0) / n
# get evaluation metrics (accuracy, f1 score, etc.)
def get_eval_metrics(actual, predicted):
tp = 0
tn = 0
fp = 0
fn = 0
for a, p in zip(actual, predicted):
if a == 1 and p == 1:
tp += 1
elif a == 1 and p == 0:
fn += 1
elif a == 0 and p == 1:
fp += 1
else:
tn += 1
acc = (tp + tn) / (tp + fp + tn + fn)
if tp + fp > 0:
precision = tp / (tp + fp) # correct 1s over predicted 1s
else:
precision = 0.
if tp + fn > 0:
recall = tp / (tp + fn) # correct 1s over actual 1s
else:
recall = 0.
if precision + recall == 0:
f1 = 0.
else:
f1 = 2 * (precision * recall) / (precision + recall)
return acc, precision, recall, f1
# loss/gradient descent with standard sigmoid
def get_lgd(X, y, w):
n = X.shape[0]
z = get_z(X, y)
zw = z @ w
# calculate loss
jw = np.sum(np.log(1 + np.exp(-zw))) / n
# calculate gradient descent
dw = 1. / (1 + np.exp(zw))
dzw = z * dw[:, None]
djw = -np.sum(dzw, axis=0) / n
return jw, djw
# loss/gradient descent with poly3 sigmoid
def get_lgd_poly3(X, y, w):
n = X.shape[0]
z = get_z(X, y)
zw = z @ w
# calculate loss
jw = np.sum(-log_sig4(zw)) / n
# calculate gradient descent
dw = sigmoid_poly3(zw)
dzw = z * dw[:, None]
djw = -np.sum(dzw, axis=0) / n
return jw, djw
# test standard sigmoid
def test(X, y, w, add1=True):
if add1:
X_ = np.concatenate([np.ones((X.shape[0], 1)), X], axis=1)
else:
X_ = np.array(X)
y_ = np.array([])
for xi in X_:
xiw = np.inner(xi, w)
yi = sigmoid(xiw) #1./(1+np.exp(-xiw))
if yi > 0.5:
yi = 1
else:
yi = 0
y_ = np.append(y_, yi)
acc, _, recall, f1 = get_eval_metrics(y, y_)
return np.array(y_), acc, recall, f1
# test poly3 sigmoid
def test_poly3(X, y, w, add1=True):
if add1:
X_ = np.concatenate([np.ones((X.shape[0], 1)), X], axis=1)
else:
X_ = np.array(X)
y_ = np.array([])
for xi in X_:
xiw = np.inner(xi, w)
yi = sigmoid_poly3(-xiw) #1./(1+np.exp(-xiw))
if yi > 0.5:
yi = 1
else:
yi = 0
y_ = np.append(y_, yi)
acc, _, recall, f1 = get_eval_metrics(y, y_)
return np.array(y_), acc, recall, f1
``` |
{
"source": "jlhitt1993/pytentiostat",
"score": 3
} |
#### File: GUI/code/GUI_load_config.py
```python
import sys
from PySide2 import QtGui
from PySide2.QtWidgets import QApplication, QWidget, QFileDialog
class Ui_Load(QWidget):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
file = self.openFileNameDialog()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../pics/icon_pytentiostat.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
return file
def setupUi_save(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
folder = self.saveFileDialog()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../pics/icon_pytentiostat.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
return folder
def openFileNameDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
file, _ = QFileDialog.getOpenFileName(self, "Load config file", "","All Files (*);;Config Files (*config.yml)", options=options)
if file:
return file
def saveFileDialog(self):
folder = QFileDialog.getExistingDirectory(self, "Select directory")
if folder:
return folder+'/'
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Ui_Load()
sys.exit(app.exec_())
```
#### File: GUI/code/LSV_GUI.py
```python
from PySide2 import QtCore, QtGui, QtWidgets
## Local library
# GUI_function
from GUI_load_config import Ui_Load
from Adv_params_GUI import Ui_Adv_Params
class Ui_LSV(object):
def load_folder_name(self):
"""
Initializes the 'Load config file' window
Returns
------
string : the loaded filename
"""
self.window = QtWidgets.QWidget()
self.Load = Ui_Load()
return self.Load.setupUi_save(self.window)
def AP_window(self):
"""
Initializes the 'Advanced parameters' window
Returns
------
AP : the Ui_Adv_Params object
window : QtWidgets.QMainWindow object
"""
self.window = QtWidgets.QMainWindow()
self.AP = Ui_Adv_Params()
self.AP.setupUi(self.window)
self.window.show()
return self.AP,self.window
def setupUi(self, LSV):
"""
Initializes the LSV window
"""
LSV.setObjectName("LSV")
LSV.resize(800, 519)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
LSV.setPalette(palette)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../pics/icon_pytentiostat.ico"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
LSV.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(LSV)
self.centralwidget.setObjectName("centralwidget")
self.rest_time_label = QtWidgets.QLabel(self.centralwidget)
self.rest_time_label.setEnabled(True)
self.rest_time_label.setGeometry(QtCore.QRect(10, 220, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.rest_time_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.rest_time_label.setFont(font)
self.rest_time_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.rest_time_label.setAcceptDrops(False)
self.rest_time_label.setAutoFillBackground(True)
self.rest_time_label.setFrameShape(QtWidgets.QFrame.Box)
self.rest_time_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.rest_time_label.setLineWidth(1)
self.rest_time_label.setMidLineWidth(1)
self.rest_time_label.setScaledContents(False)
self.rest_time_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.rest_time_label.setObjectName("rest_time_label")
self.experiment_type_label = QtWidgets.QLabel(self.centralwidget)
self.experiment_type_label.setEnabled(True)
self.experiment_type_label.setGeometry(QtCore.QRect(10, 50, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.experiment_type_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_type_label.setFont(font)
self.experiment_type_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.experiment_type_label.setAcceptDrops(False)
self.experiment_type_label.setAutoFillBackground(True)
self.experiment_type_label.setFrameShape(QtWidgets.QFrame.Box)
self.experiment_type_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.experiment_type_label.setLineWidth(1)
self.experiment_type_label.setMidLineWidth(1)
self.experiment_type_label.setScaledContents(False)
self.experiment_type_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.experiment_type_label.setObjectName("experiment_type_label")
self.experiment_step_number = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_step_number.setGeometry(QtCore.QRect(190, 260, 61, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_step_number.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_step_number.setFont(font)
self.experiment_step_number.setText("")
self.experiment_step_number.setFrame(True)
self.experiment_step_number.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_step_number.setObjectName("experiment_step_number")
self.select_output_filepath_button = QtWidgets.QPushButton(self.centralwidget)
self.select_output_filepath_button.setGeometry(QtCore.QRect(10, 130, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(91, 166, 232))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(248, 221, 23))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
self.select_output_filepath_button.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.select_output_filepath_button.setFont(font)
self.select_output_filepath_button.setAcceptDrops(False)
self.select_output_filepath_button.setWhatsThis("")
self.select_output_filepath_button.setAutoFillBackground(True)
self.select_output_filepath_button.setInputMethodHints(QtCore.Qt.ImhNone)
self.select_output_filepath_button.setAutoDefault(False)
self.select_output_filepath_button.setDefault(True)
self.select_output_filepath_button.setFlat(True)
self.select_output_filepath_button.setObjectName("select_output_filepath_button")
self.sweep_rate_label = QtWidgets.QLabel(self.centralwidget)
self.sweep_rate_label.setEnabled(True)
self.sweep_rate_label.setGeometry(QtCore.QRect(10, 380, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.sweep_rate_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.sweep_rate_label.setFont(font)
self.sweep_rate_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.sweep_rate_label.setAcceptDrops(False)
self.sweep_rate_label.setAutoFillBackground(True)
self.sweep_rate_label.setFrameShape(QtWidgets.QFrame.Box)
self.sweep_rate_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.sweep_rate_label.setLineWidth(1)
self.sweep_rate_label.setMidLineWidth(1)
self.sweep_rate_label.setScaledContents(False)
self.sweep_rate_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.sweep_rate_label.setObjectName("sweep_rate_label")
self.end_voltage_label = QtWidgets.QLabel(self.centralwidget)
self.end_voltage_label.setEnabled(True)
self.end_voltage_label.setGeometry(QtCore.QRect(10, 340, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.end_voltage_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.end_voltage_label.setFont(font)
self.end_voltage_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.end_voltage_label.setAcceptDrops(False)
self.end_voltage_label.setAutoFillBackground(True)
self.end_voltage_label.setFrameShape(QtWidgets.QFrame.Box)
self.end_voltage_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.end_voltage_label.setLineWidth(1)
self.end_voltage_label.setMidLineWidth(1)
self.end_voltage_label.setScaledContents(False)
self.end_voltage_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.end_voltage_label.setObjectName("end_voltage_label")
self.general_parameters_label = QtWidgets.QLabel(self.centralwidget)
self.general_parameters_label.setEnabled(True)
self.general_parameters_label.setGeometry(QtCore.QRect(10, 10, 191, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.general_parameters_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.general_parameters_label.setFont(font)
self.general_parameters_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.general_parameters_label.setAcceptDrops(False)
self.general_parameters_label.setAutoFillBackground(True)
self.general_parameters_label.setFrameShape(QtWidgets.QFrame.Box)
self.general_parameters_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.general_parameters_label.setLineWidth(1)
self.general_parameters_label.setMidLineWidth(1)
self.general_parameters_label.setScaledContents(False)
self.general_parameters_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.general_parameters_label.setObjectName("general_parameters_label")
self.experiment_end_voltage = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_end_voltage.setGeometry(QtCore.QRect(190, 340, 61, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_end_voltage.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_end_voltage.setFont(font)
self.experiment_end_voltage.setText("")
self.experiment_end_voltage.setFrame(True)
self.experiment_end_voltage.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_end_voltage.setObjectName("experiment_end_voltage")
self.experiment_type_verify = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_type_verify.setGeometry(QtCore.QRect(190, 50, 211, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_type_verify.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_type_verify.setFont(font)
self.experiment_type_verify.setStyleSheet("background-color: rgb(240, 240, 240);")
self.experiment_type_verify.setFrame(False)
self.experiment_type_verify.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_type_verify.setReadOnly(True)
self.experiment_type_verify.setObjectName("experiment_type_verify")
self.experiment_start_voltage = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_start_voltage.setGeometry(QtCore.QRect(190, 300, 61, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_start_voltage.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_start_voltage.setFont(font)
self.experiment_start_voltage.setText("")
self.experiment_start_voltage.setFrame(True)
self.experiment_start_voltage.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_start_voltage.setObjectName("experiment_start_voltage")
self.output_filename_label = QtWidgets.QLabel(self.centralwidget)
self.output_filename_label.setEnabled(True)
self.output_filename_label.setGeometry(QtCore.QRect(10, 90, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.output_filename_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.output_filename_label.setFont(font)
self.output_filename_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.output_filename_label.setAcceptDrops(False)
self.output_filename_label.setAutoFillBackground(True)
self.output_filename_label.setFrameShape(QtWidgets.QFrame.Box)
self.output_filename_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.output_filename_label.setLineWidth(1)
self.output_filename_label.setMidLineWidth(1)
self.output_filename_label.setScaledContents(False)
self.output_filename_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.output_filename_label.setObjectName("output_filename_label")
self.experiment_file_name = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_file_name.setGeometry(QtCore.QRect(190, 90, 241, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_file_name.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_file_name.setFont(font)
self.experiment_file_name.setInputMethodHints(QtCore.Qt.ImhNone)
self.experiment_file_name.setText("")
self.experiment_file_name.setFrame(True)
self.experiment_file_name.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_file_name.setObjectName("experiment_file_name")
self.step_number_label = QtWidgets.QLabel(self.centralwidget)
self.step_number_label.setEnabled(True)
self.step_number_label.setGeometry(QtCore.QRect(10, 260, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.step_number_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.step_number_label.setFont(font)
self.step_number_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.step_number_label.setAcceptDrops(False)
self.step_number_label.setAutoFillBackground(True)
self.step_number_label.setFrameShape(QtWidgets.QFrame.Box)
self.step_number_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.step_number_label.setLineWidth(1)
self.step_number_label.setMidLineWidth(1)
self.step_number_label.setScaledContents(False)
self.step_number_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.step_number_label.setObjectName("step_number_label")
self.experiment_sweep_rate = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_sweep_rate.setGeometry(QtCore.QRect(190, 380, 61, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_sweep_rate.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_sweep_rate.setFont(font)
self.experiment_sweep_rate.setText("")
self.experiment_sweep_rate.setFrame(True)
self.experiment_sweep_rate.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_sweep_rate.setObjectName("experiment_sweep_rate")
self.experiment_parameters_label = QtWidgets.QLabel(self.centralwidget)
self.experiment_parameters_label.setEnabled(True)
self.experiment_parameters_label.setGeometry(QtCore.QRect(10, 180, 191, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.experiment_parameters_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_parameters_label.setFont(font)
self.experiment_parameters_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.experiment_parameters_label.setAcceptDrops(False)
self.experiment_parameters_label.setAutoFillBackground(True)
self.experiment_parameters_label.setFrameShape(QtWidgets.QFrame.Box)
self.experiment_parameters_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.experiment_parameters_label.setLineWidth(1)
self.experiment_parameters_label.setMidLineWidth(1)
self.experiment_parameters_label.setScaledContents(False)
self.experiment_parameters_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.experiment_parameters_label.setObjectName("experiment_parameters_label")
self.experiment_duration = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_duration.setGeometry(QtCore.QRect(220, 430, 171, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_duration.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_duration.setFont(font)
self.experiment_duration.setText("")
self.experiment_duration.setFrame(True)
self.experiment_duration.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_duration.setReadOnly(True)
self.experiment_duration.setObjectName("experiment_duration")
self.experiment_file_path = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_file_path.setGeometry(QtCore.QRect(190, 130, 241, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_file_path.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.experiment_file_path.setFont(font)
self.experiment_file_path.setText("")
self.experiment_file_path.setFrame(True)
self.experiment_file_path.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_file_path.setReadOnly(False)
self.experiment_file_path.setObjectName("experiment_file_path")
self.experiment_preview_label = QtWidgets.QLabel(self.centralwidget)
self.experiment_preview_label.setEnabled(True)
self.experiment_preview_label.setGeometry(QtCore.QRect(440, 10, 351, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.experiment_preview_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_preview_label.setFont(font)
self.experiment_preview_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.experiment_preview_label.setAcceptDrops(False)
self.experiment_preview_label.setAutoFillBackground(True)
self.experiment_preview_label.setFrameShape(QtWidgets.QFrame.Box)
self.experiment_preview_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.experiment_preview_label.setLineWidth(1)
self.experiment_preview_label.setMidLineWidth(1)
self.experiment_preview_label.setScaledContents(False)
self.experiment_preview_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.experiment_preview_label.setObjectName("experiment_preview_label")
self.sweep_rate_units_label = QtWidgets.QLineEdit(self.centralwidget)
self.sweep_rate_units_label.setGeometry(QtCore.QRect(260, 380, 41, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.sweep_rate_units_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.sweep_rate_units_label.setFont(font)
self.sweep_rate_units_label.setStatusTip("")
self.sweep_rate_units_label.setStyleSheet("background-color: rgb(240, 240, 240);")
self.sweep_rate_units_label.setFrame(False)
self.sweep_rate_units_label.setObjectName("sweep_rate_units_label")
self.experiment_duration_label = QtWidgets.QLabel(self.centralwidget)
self.experiment_duration_label.setEnabled(True)
self.experiment_duration_label.setGeometry(QtCore.QRect(10, 430, 201, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(212, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(113, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(85, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.experiment_duration_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_duration_label.setFont(font)
self.experiment_duration_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.experiment_duration_label.setAcceptDrops(False)
self.experiment_duration_label.setAutoFillBackground(True)
self.experiment_duration_label.setFrameShape(QtWidgets.QFrame.Box)
self.experiment_duration_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.experiment_duration_label.setLineWidth(1)
self.experiment_duration_label.setMidLineWidth(1)
self.experiment_duration_label.setScaledContents(False)
self.experiment_duration_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.experiment_duration_label.setObjectName("experiment_duration_label")
self.v_vs_label_1 = QtWidgets.QLineEdit(self.centralwidget)
self.v_vs_label_1.setGeometry(QtCore.QRect(260, 300, 41, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.v_vs_label_1.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.v_vs_label_1.setFont(font)
self.v_vs_label_1.setStatusTip("")
self.v_vs_label_1.setStyleSheet("background-color: rgb(240, 240, 240);")
self.v_vs_label_1.setFrame(False)
self.v_vs_label_1.setObjectName("v_vs_label_1")
self.v_vs_label_2 = QtWidgets.QLineEdit(self.centralwidget)
self.v_vs_label_2.setGeometry(QtCore.QRect(260, 340, 41, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.v_vs_label_2.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.v_vs_label_2.setFont(font)
self.v_vs_label_2.setStatusTip("")
self.v_vs_label_2.setStyleSheet("background-color: rgb(240, 240, 240);")
self.v_vs_label_2.setFrame(False)
self.v_vs_label_2.setObjectName("v_vs_label_2")
self.experiment_rest_time = QtWidgets.QLineEdit(self.centralwidget)
self.experiment_rest_time.setGeometry(QtCore.QRect(190, 220, 61, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.experiment_rest_time.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.experiment_rest_time.setFont(font)
self.experiment_rest_time.setText("")
self.experiment_rest_time.setFrame(True)
self.experiment_rest_time.setAlignment(QtCore.Qt.AlignCenter)
self.experiment_rest_time.setObjectName("experiment_rest_time")
self.advanced_parameters_button = QtWidgets.QPushButton(self.centralwidget)
self.advanced_parameters_button.setGeometry(QtCore.QRect(400, 430, 201, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(91, 166, 232))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(248, 221, 23))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.advanced_parameters_button.setFont(font)
self.advanced_parameters_button.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.advanced_parameters_button.setAcceptDrops(False)
self.advanced_parameters_button.setWhatsThis("")
self.advanced_parameters_button.setAutoFillBackground(True)
self.advanced_parameters_button.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.advanced_parameters_button.setInputMethodHints(QtCore.Qt.ImhNone)
self.advanced_parameters_button.setAutoRepeatDelay(301)
self.advanced_parameters_button.setAutoRepeatInterval(96)
self.advanced_parameters_button.setAutoDefault(False)
self.advanced_parameters_button.setDefault(False)
self.advanced_parameters_button.setFlat(False)
self.advanced_parameters_button.setObjectName("advanced_parameters_button")
self.start_voltage_label = QtWidgets.QLabel(self.centralwidget)
self.start_voltage_label.setEnabled(True)
self.start_voltage_label.setGeometry(QtCore.QRect(10, 300, 161, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(161, 188, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(84, 151, 213))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(44, 80, 114))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(33, 60, 85))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(67, 121, 171))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.start_voltage_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.start_voltage_label.setFont(font)
self.start_voltage_label.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.start_voltage_label.setAcceptDrops(False)
self.start_voltage_label.setAutoFillBackground(True)
self.start_voltage_label.setFrameShape(QtWidgets.QFrame.Box)
self.start_voltage_label.setFrameShadow(QtWidgets.QFrame.Sunken)
self.start_voltage_label.setLineWidth(1)
self.start_voltage_label.setMidLineWidth(1)
self.start_voltage_label.setScaledContents(False)
self.start_voltage_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.start_voltage_label.setObjectName("start_voltage_label")
self.rest_time_units_label = QtWidgets.QLineEdit(self.centralwidget)
self.rest_time_units_label.setGeometry(QtCore.QRect(260, 220, 16, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
self.rest_time_units_label.setPalette(palette)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.rest_time_units_label.setFont(font)
self.rest_time_units_label.setStatusTip("")
self.rest_time_units_label.setStyleSheet("background-color: rgb(240, 240, 240);")
self.rest_time_units_label.setFrame(False)
self.rest_time_units_label.setObjectName("rest_time_units_label")
self.save_experiment_file_button = QtWidgets.QPushButton(self.centralwidget)
self.save_experiment_file_button.setGeometry(QtCore.QRect(610, 430, 181, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(91, 166, 232))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(248, 221, 23))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.save_experiment_file_button.setFont(font)
self.save_experiment_file_button.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.save_experiment_file_button.setAcceptDrops(False)
self.save_experiment_file_button.setWhatsThis("")
self.save_experiment_file_button.setAutoFillBackground(True)
self.save_experiment_file_button.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.save_experiment_file_button.setInputMethodHints(QtCore.Qt.ImhNone)
self.save_experiment_file_button.setAutoRepeatDelay(301)
self.save_experiment_file_button.setAutoRepeatInterval(96)
self.save_experiment_file_button.setAutoDefault(False)
self.save_experiment_file_button.setDefault(False)
self.save_experiment_file_button.setFlat(False)
self.save_experiment_file_button.setObjectName("save_experiment_file_button")
self.generate_preview_button = QtWidgets.QPushButton(self.centralwidget)
self.generate_preview_button.setGeometry(QtCore.QRect(610, 350, 181, 31))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(91, 166, 232))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(248, 221, 23))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.NoRole, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 120, 215))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Highlight, brush)
brush = QtGui.QBrush(QtGui.QColor(247, 217, 21))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Link, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.NoRole, brush)
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
self.generate_preview_button.setFont(font)
self.generate_preview_button.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.generate_preview_button.setAcceptDrops(False)
self.generate_preview_button.setWhatsThis("")
self.generate_preview_button.setAutoFillBackground(True)
self.generate_preview_button.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.generate_preview_button.setInputMethodHints(QtCore.Qt.ImhNone)
self.generate_preview_button.setAutoRepeatDelay(301)
self.generate_preview_button.setAutoRepeatInterval(96)
self.generate_preview_button.setAutoDefault(False)
self.generate_preview_button.setDefault(False)
self.generate_preview_button.setFlat(False)
self.generate_preview_button.setObjectName("generate_preview_button")
self.voltage_ref = QtWidgets.QComboBox(self.centralwidget)
self.voltage_ref.setGeometry(QtCore.QRect(310, 300, 101, 31))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.voltage_ref.setFont(font)
self.voltage_ref.setEditable(False)
self.voltage_ref.setInsertPolicy(QtWidgets.QComboBox.InsertAtBottom)
self.voltage_ref.setObjectName("voltage_ref")
self.voltage_ref.addItem("")
self.voltage_ref.addItem("")
self.voltage_ref_2 = QtWidgets.QComboBox(self.centralwidget)
self.voltage_ref_2.setGeometry(QtCore.QRect(310, 340, 101, 31))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.voltage_ref_2.setFont(font)
self.voltage_ref_2.setEditable(False)
self.voltage_ref_2.setInsertPolicy(QtWidgets.QComboBox.InsertAtBottom)
self.voltage_ref_2.setObjectName("voltage_ref_2")
self.voltage_ref_2.addItem("")
self.voltage_ref_2.addItem("")
self.plot_area = QtWidgets.QWidget(self.centralwidget)
self.plot_area.setGeometry(QtCore.QRect(440, 50, 351, 281))
self.plot_area.setStyleSheet("border: 1px solid black;\n"
"background-color: rgb(255, 255, 255);\n"
"\n"
"")
self.plot_area.setObjectName("plot_area")
LSV.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(LSV)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 18))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(240, 240, 240))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
self.menubar.setPalette(palette)
self.menubar.setObjectName("menubar")
LSV.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(LSV)
self.statusbar.setStyleSheet("background-color: rgb(228, 228, 228);")
self.statusbar.setObjectName("statusbar")
LSV.setStatusBar(self.statusbar)
self.V1 = QtWidgets.QAction(LSV)
self.V1.setCheckable(True)
self.V1.setObjectName("V1")
self.V2 = QtWidgets.QAction(LSV)
self.V2.setObjectName("V2")
self.retranslateUi(LSV)
QtCore.QMetaObject.connectSlotsByName(LSV)
def retranslateUi(self, LSV):
_translate = QtCore.QCoreApplication.translate
LSV.setWindowTitle(_translate("LSV", "LSV Experiment Creator"))
self.rest_time_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Rest Time</p></body></html>"))
self.experiment_type_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Experiment Type</p></body></html>"))
self.experiment_step_number.setStatusTip(_translate("LSV", "Number of steps between voltages over which the experiment will be conducted."))
self.select_output_filepath_button.setStatusTip(_translate("LSV", "Click this button to select the output filepath for your files."))
self.select_output_filepath_button.setText(_translate("LSV", "Output Filepath"))
self.sweep_rate_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Sweep Rate</p></body></html>"))
self.end_voltage_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">End Voltage</p></body></html>"))
self.general_parameters_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">General Parameters</p></body></html>"))
self.experiment_end_voltage.setStatusTip(_translate("LSV", "Voltage to end sweep at."))
self.experiment_type_verify.setStatusTip(_translate("LSV", "Type of experiment for this file."))
self.experiment_type_verify.setText(_translate("LSV", "Linear Sweep Voltammetry"))
self.experiment_start_voltage.setStatusTip(_translate("LSV", "Voltage to start sweep at."))
self.output_filename_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Output Filename</p></body></html>"))
self.experiment_file_name.setStatusTip(_translate("LSV", "Name that will be attached to. Data will be output to _data.csv and an experiment file will be saved to _config.yml."))
self.step_number_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Step Number</p></body></html>"))
self.experiment_sweep_rate.setStatusTip(_translate("LSV", "Rate at which voltage is swept over."))
self.experiment_parameters_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Experiment Parameters</p></body></html>"))
self.experiment_duration.setStatusTip(_translate("LSV", "Estimated length of experiment in H:M:S."))
self.experiment_file_path.setStatusTip(_translate("LSV", "Path at which data and experiment files will be exported to."))
self.experiment_preview_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Experiment Preview</p></body></html>"))
self.sweep_rate_units_label.setText(_translate("LSV", "mV/s"))
self.experiment_duration_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Experiment Duration</p></body></html>"))
self.v_vs_label_1.setText(_translate("LSV", "V vs."))
self.v_vs_label_2.setText(_translate("LSV", "V vs."))
self.experiment_rest_time.setStatusTip(_translate("LSV", "Length of time to wait before start voltage."))
self.advanced_parameters_button.setStatusTip(_translate("LSV", "Click this button to edit hardset paraemeters used for all experiments."))
self.advanced_parameters_button.setText(_translate("LSV", "Advanced Parameters"))
self.start_voltage_label.setText(_translate("LSV", "<html><head/><body><p align=\"center\">Start Voltage</p></body></html>"))
self.rest_time_units_label.setText(_translate("LSV", "s"))
self.save_experiment_file_button.setStatusTip(_translate("LSV", "Click this button to save a config file with the created experiment."))
self.save_experiment_file_button.setText(_translate("LSV", "Save Experiment File"))
self.generate_preview_button.setStatusTip(_translate("LSV", "Click this button to generate a preview of the experiment to be run."))
self.generate_preview_button.setText(_translate("LSV", "Generate Preview"))
self.voltage_ref.setCurrentText(_translate("LSV", "Vref"))
self.voltage_ref.setItemText(0, _translate("LSV", "Vref"))
self.voltage_ref.setItemText(1, _translate("LSV", "Vocv"))
self.voltage_ref_2.setCurrentText(_translate("LSV", "Vref"))
self.voltage_ref_2.setItemText(0, _translate("LSV", "Vref"))
self.voltage_ref_2.setItemText(1, _translate("LSV", "Vocv"))
self.V1.setText(_translate("LSV", "V1"))
self.V2.setText(_translate("LSV", "V2"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
LSV = QtWidgets.QMainWindow()
ui = Ui_LSV()
ui.setupUi(LSV)
LSV.show()
sys.exit(app.exec_())
``` |
{
"source": "jlhonora/rabbitmq-demo",
"score": 3
} |
#### File: jlhonora/rabbitmq-demo/client.py
```python
import os
import datetime
import sys
sys.path.append("..")
import puka
def get_queue_name():
return os.environ.get('RABBITMQ_QUEUE', 'default_queue')
def get_timestamp():
return datetime.datetime.utcnow().strftime("%H:%M:%S")
# Puka uses IPv6 addresses first, so we can't use localhost
# as address. Check https://github.com/majek/puka/issues/35
client = puka.Client("amqp://guest:[email protected]:5672/")
promise = client.connect()
client.wait(promise)
queue_name = get_queue_name()
args = {}
args['x-message-ttl'] = 5000
promise = client.queue_declare(
queue=queue_name,
auto_delete=False,
exclusive=False,
durable=True,
arguments=args
)
client.wait(promise)
print " [*] Waiting for messages in %s. Press CTRL+C to quit." % queue_name
consume_promise = client.basic_consume(queue=queue_name, prefetch_count=1)
while True:
result = client.wait(consume_promise)
print "%s [python client] Received %r" % (get_timestamp(), result['body'])
client.basic_ack(result)
promise = client.close()
client.wait(promise)
``` |
{
"source": "jlhourENSAE/podcast-app",
"score": 3
} |
#### File: podcast-app/src/cli.py
```python
from docopt import docopt
import os
import sys
import yaml
import vlc
import time
from src.podcastClasses import Podcast, Episode
from src.playerOperations import stopListening, resumeListening, jumpTime
def main():
args = docopt(__doc__)
# Load podcast list :
config_file = 'subscriptions.yml'
with open(config_file, 'r') as stream:
config = yaml.safe_load(stream)
if args['ls']:
podcasts = list(config['subscriptions'].keys())
print('You are subscribed to the following podcasts :')
print(', \n'.join(podcasts))
if args['lastep']:
url = config['subscriptions'].get(args['<podcast_name>'])
if url is not None:
podcast = Podcast(url)
print(f'You have selected : {podcast.title}')
history = podcast.getLastEpisode()
# New episode
newEpisode = next(history)
newEpisode.displayInfos()
player = vlc.MediaPlayer(newEpisode.audioUrl)
resumeListening(newEpisode, player)
try:
while player.is_playing() == 1:
continue
stopListening(newEpisode, player)
except KeyboardInterrupt:
stopListening(newEpisode, player)
try:
sys.exit(0)
except SystemExit:
os._exit(0)
if args['pastep']:
url = config['subscriptions'].get(args['<podcast_name>'])
if args['stop']:
pass
if __name__=='__main__':
pass
``` |
{
"source": "jli0108/simplex",
"score": 3
} |
#### File: jli0108/simplex/revised_simplex.py
```python
import numpy as np
from numpy.linalg import inv
import sys
# -- Solve your LP problems with this simple code!!! -----
# -- Your LP must be in the following format: ------------
# -- Maximize/Minimize c^T x -----------------------------
# -- subject to Ax <= b ----------------------------------
# -- Modify if you want to maximize or minimize -----------------------
maximize : bool = True
# -- Modify if your problem is in standard or canonical form ----------
standard : bool = False
# -- Modify these arrays in the correct format ------------------------
c : np.ndarray = np.array([[7, 6, 5, -2, 3]])
A_B : np.ndarray = np.array([[1, 3, 5, -2, 2],
[4, 2, -2, 1, 1],
[2, 4, 4, -2, 5],
[3, 1, 2, -1, -2]])
b : np.ndarray = np.array([[4],
[3],
[5],
[1]])
assert A_B.shape[0] == b.shape[0]
assert c.shape[1] == A_B.shape[1]
def solve_LP(c : np.ndarray, A_B : np.ndarray, b : np.ndarray, maximize : bool, standard : bool) -> None:
if (standard):
solve_standard(c, A_B, b, maximize)
else:
solve_canonical(c, A_B, b, maximize)
# solves problem in canonical form
def solve_canonical(c : np.ndarray, A_B : np.ndarray, b : np.ndarray, maximize : bool) -> None:
if not maximize:
c = -c
optimal_cost, RHS, basic_variables, shadow_prices, A_B_inv = maximize_canonical(c, A_B, b)
print("----- Solution -----")
if maximize:
print("Max value of objective function:", optimal_cost)
else:
print("Min value of objective function:", -optimal_cost)
for i in range(basic_variables.shape[0]):
print(f"x{basic_variables[i]+1} = {RHS[i,0] : 0.7f}")
for i in range(shadow_prices.shape[1]):
print(f"Shadow price of x{basic_variables[i]+1}: {shadow_prices[0,i] : 0.7f}")
for i in range(RHS.shape[0]):
allowable_increase = None
allowable_decrease = None
for j in range(RHS.shape[0]):
if A_B_inv[j,i] > 0:
if allowable_decrease is None or allowable_decrease > RHS[i,0] / A_B_inv[j,i]:
allowable_decrease = RHS[i,0] / A_B_inv[j,i]
elif A_B_inv[j,i] < 0:
if allowable_increase is None or allowable_increase > - RHS[i,0] / A_B_inv[j,i]:
allowable_increase = - RHS[i,0] / A_B_inv[j,i]
if allowable_decrease is None:
allowable_decrease = np.Infinity
if allowable_increase is None:
allowable_increase = np.Infinity
print(f"Row {i+1}: Allowable decrease = {allowable_decrease : 0.001f}, Allowable increase = {allowable_increase : 0.001f}")
#print(A_B_inv)
print("Set all other variables to zero.")
def solve_standard(c, A_B, b, maximize):
sys.exit("Not implemented without tableaus yet. Use simplex.py")
# takes problem in standard form and solves phase 1 LP
def solve_phase_one(A_B, b):
sys.exit("Not implemented without tableaus yet. Use simplex.py")
# solves maximization problem in canonical form
# returns the final tableau and a list of the basis variables
def maximize_canonical(c_N, A_N, b):
#if b.min() < 0:
# sys.exit("Initial feasibility problem. Not implemented yet.")
m, n = A_N.shape
A_B = np.eye(m)
for i in range(m):
if b[i] < 0:
A_N[i] *= -1
b[i,0] *= -1
A_B[i] *= -1
A = np.concatenate((A_N, np.eye(m)), axis=1)
#print(A)
# c_B is coefficients from original c (not getting reduced)
c_B = np.zeros((1,m))
c = np.concatenate((c_N, c_B), axis=1)
basic_variables = np.arange(n, m+n)
nonbasic_variables = np.arange(0, n)
A_B_inv = np.eye(m)
y = np.zeros((1,m))
b_bar = b
reduced_c_N = c_N
index_of_entering_variable = None
i = 0
# for first iteration, entering variable is equal to index
while i < n:# and index_of_entering_variable is None:
# Uncomment lines above and below to use Bland's rule
# (if so, we do not need to check if entering_variable is None)
if reduced_c_N[0,i] > 0 and (index_of_entering_variable is None or reduced_c_N[0,i] > reduced_c_N[0,index_of_entering_variable]):
# It should usually be faster if we take the largest coefficient
#if c_N[i] > 0 and (entering_variable is None or c_N[i] > c_N[entering_variable]):
index_of_entering_variable = i
i = i + 1
while index_of_entering_variable is not None:
reduced_col_of_A_N = np.dot(A_B_inv, A_N[:,index_of_entering_variable])
#print(A_B_inv)
#print(b_bar[:,0] / reduced_col_of_A_N)
index_of_leaving_variable = None
for i in range(m):
if (reduced_col_of_A_N[i] > 0 and
(index_of_leaving_variable is None or
b_bar[i,0] * reduced_col_of_A_N[index_of_leaving_variable] < b_bar[index_of_leaving_variable,0] * reduced_col_of_A_N[i])):
index_of_leaving_variable = i
if index_of_leaving_variable is None:
sys.exit("LP is unbounded")
entering_variable = nonbasic_variables[index_of_entering_variable]
leaving_variable = basic_variables[index_of_leaving_variable]
#print("basis",basic_variables+1)
#print("nonbasic",nonbasic_variables+1)
#print(f"x{entering_variable+1} enters, x{leaving_variable+1} leaves")
# variable enters the basis
basic_variables[index_of_leaving_variable] = entering_variable
nonbasic_variables[index_of_entering_variable] = leaving_variable
# A_B gets updated based on new basic variables
A_B[:,index_of_leaving_variable] = A[:,entering_variable]
A_N[:,index_of_entering_variable] = A[:,leaving_variable]
# c_B gets updated based on new basic variables
c_B[0,index_of_leaving_variable] = c[0,entering_variable]
c_N[0,index_of_entering_variable] = c[0,leaving_variable]
#print(A_B)
#if (np.linalg.det(A_B) == 0):
A_B_inv = inv(A_B)
y = np.dot(c_B, A_B_inv)
b_bar = np.dot(A_B_inv, b)
reduced_c_N = c_N - np.dot(y, A_N)
index_of_entering_variable = None
i = 0
#print(reduced_c_N)
# for first iteration, entering variable is equal to index
while i < n:# and index_of_entering_variable is None:
# Uncomment lines above and below to use Bland's rule
# (if so, we do not need to check if entering_variable is None)
if reduced_c_N[0,i] > 0 and (index_of_entering_variable is None or reduced_c_N[0,i] > reduced_c_N[0,index_of_entering_variable]):
# It should usually be faster if we take the largest coefficient
#if c_N[i] > 0 and (entering_variable is None or c_N[i] > c_N[entering_variable]):
index_of_entering_variable = i
i = i + 1
#print(index_of_leaving_variable)
return np.dot(y, b)[0,0], b_bar, basic_variables, y, A_B_inv
#solve_canonical(c, A_B, b, maximize)
#solve_standard(c, A_B, b, maximize)
solve_LP(c, A_B, b, maximize, standard)
``` |
{
"source": "jli0117/ehrMGAN",
"score": 2
} |
#### File: jli0117/ehrMGAN/Bilateral_lstm_class.py
```python
import tensorflow as tf
class Bilateral_LSTM_cell():
def __init__(self, input_dim, hidden_dim, scope_name):
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.scope_name = scope_name
def __call__(self, x, hidden_memory_tm1, hidden_memory_tm2):
## unstack hidden vectors and context vectors
previous_hidden_state, c_prev = tf.unstack(hidden_memory_tm1)
previous_hidden_state_, _ = tf.unstack(hidden_memory_tm2)
# Input Gate (Wi, Ui, Vi)
with tf.variable_scope(self.scope_name + "Input_gate", reuse=tf.AUTO_REUSE):
Wi = tf.get_variable(name='Wi', shape=[self.input_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Ui = tf.get_variable(name='Ui', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Vi = tf.get_variable(name='Vi', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
i = tf.sigmoid(
tf.matmul(x, Wi) +
tf.matmul(previous_hidden_state, Ui) +
tf.matmul(previous_hidden_state_, Vi)
)
# Forget gate (Wf, Uf, Vf)
with tf.variable_scope(self.scope_name + "Forget_gate", reuse=tf.AUTO_REUSE):
Wf = tf.get_variable(name='Wf', shape=[self.input_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Uf = tf.get_variable(name='Uf', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Vf = tf.get_variable(name='Vf', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
f = tf.sigmoid(
tf.matmul(x, Wf) +
tf.matmul(previous_hidden_state, Uf) +
tf.matmul(previous_hidden_state_, Vf)
)
# Output gate (Wo, Uo, Vo)
with tf.variable_scope(self.scope_name + "Output_gate", reuse=tf.AUTO_REUSE):
Wo = tf.get_variable(name='Wo', shape=[self.input_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Uo = tf.get_variable(name='Uo', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Vo = tf.get_variable(name='Vo', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
o = tf.sigmoid(
tf.matmul(x, Wo) +
tf.matmul(previous_hidden_state, Uo) +
tf.matmul(previous_hidden_state_, Vo)
)
# Updated part for new cell state (Wc, Uc, Vc)
with tf.variable_scope(self.scope_name + "Cell_gate", reuse=tf.AUTO_REUSE):
Wc = tf.get_variable(name='Wc', shape=[self.input_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Uc = tf.get_variable(name='Uc', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
Vc = tf.get_variable(name='Vc', shape=[self.hidden_dim, self.hidden_dim], initializer=tf.random_normal_initializer(mean=0, stddev=0.1))
c_ = tf.nn.tanh(
tf.matmul(x, Wc) +
tf.matmul(previous_hidden_state, Uc) +
tf.matmul(previous_hidden_state_, Vc)
)
# Final Memory cell
c = f * c_prev + i * c_
# Current Hidden state
current_hidden_state = o * tf.nn.tanh(c)
return current_hidden_state, tf.stack([current_hidden_state, c])
class MultilayerCells():
def __init__(self, cells):
self.cells = cells
def __call__(self, input, state, state_):
cur_inp = input
new_states = []
for i in range(len(self.cells)):
with tf.variable_scope("cell_%d" % i):
cell = self.cells[i]
cur_inp, new_state = cell(x=cur_inp, hidden_memory_tm1=state[i], hidden_memory_tm2=state_[i])
new_states.append(new_state)
return cur_inp, new_states
``` |
{
"source": "jli05/CS229-TimeSeries-LSTM",
"score": 3
} |
#### File: jli05/CS229-TimeSeries-LSTM/tspred_qtl.py
```python
import random
import sys
import argparse
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
def build_lstm_graph(n_features, n_targets, quantiles, burn_in,
num_units, input_keep_prob=1.0, output_keep_prob=1.0,
variable_scope='ts', dtype=tf.float32):
''' Build the symbolic graph for modeling the time series '''
# x, y are indexed by batch, time_step and feature
with tf.variable_scope(variable_scope):
x = tf.placeholder(dtype, [None, None, n_features], name='x')
y = tf.placeholder(dtype, [None, None, n_targets], name='y')
cell = tf.contrib.rnn.LSTMCell(num_units, use_peepholes=True)
dropout_cell = tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob,
output_keep_prob)
outputs, state = tf.nn.dynamic_rnn(dropout_cell, x, dtype=dtype)
w_fcst = tf.get_variable('w_fcst', [n_features + num_units,
len(quantiles) * n_targets])
b_fcst = tf.get_variable('b_fcst', [len(quantiles) * n_targets])
# Use the last n_targets elements in each output vector at
# each time step to match against y
# Features for linear forecast
features_ = tf.concat([tf.reshape(x, [-1, n_features]),
tf.reshape(outputs, [-1, num_units])], axis=1)
# Predicted quantiles
pred = tf.nn.xw_plus_b(features_, w_fcst, b_fcst)
# Transform into shape [n_samples, n_steps, n_quantiles * n_targets]
y_tiled = tf.tile(y, [1, 1, len(quantiles)])
pred = tf.reshape(pred, tf.shape(y_tiled))
# TODO: add penalty on LSTM weight matrices and w_fcst
theta = y_tiled[:, burn_in:, :] - pred[:, burn_in:, :]
err = theta * np.repeat(quantiles, n_targets) - tf.minimum(theta, 0)
cost = tf.reduce_mean(tf.reshape(err, [-1, len(quantiles) * n_targets]),
axis=0)
cost = tf.reduce_mean(cost)
return {'x': x, 'y': y, 'pred': pred, 'cost': cost,
'lstm_state': state, 'lstm_outputs': outputs,
'lstm_weights': cell.weights,
'w_fcst': w_fcst, 'b_fcst': b_fcst}, cell
def train_lstm(sess, ts, y=None,
features_func=None, targets_func=None,
quantiles=[.5], burn_in=50,
batch_size=50, lr0=1e-5, lr_decay=(50, .99),
n_iter=500, valid_every=5, print_every=5,
variable_scope='ts', **kwargs):
''' Train LSTM for given features and targets functions '''
assert (y is not None or
((features_func is not None) and (targets_func is not None)))
# ts <num samples>-by-<length of every sample>
# Split ts into train, dev set; we'll only use ts_test once at the end
test_size = .1
if y is not None:
features, dev_features, targets, dev_targets = (
train_test_split(ts, y, test_size=test_size))
else:
ts_train, ts_dev = train_test_split(ts, test_size=test_size)
# Make features, targets for LSTM training
features = np.apply_along_axis(features_func, axis=1, arr=ts_train)
targets = np.apply_along_axis(targets_func, axis=1, arr=ts_train)
dev_features = np.apply_along_axis(features_func, axis=1, arr=ts_dev)
dev_targets = np.apply_along_axis(targets_func, axis=1, arr=ts_dev)
if features.ndim == 2:
features = features[:, :, None]
dev_features = dev_features[:, :, None]
if targets.ndim == 2:
targets = targets[:, :, None]
dev_targets = dev_targets[:, :, None]
n_features = features.shape[2]
n_targets = targets.shape[2]
# The burn-in period would be excluded from cost calculation
if np.isscalar(quantiles):
quantiles = [quantiles]
lstm, cell = build_lstm_graph(n_features, n_targets, quantiles, burn_in,
variable_scope=variable_scope, **kwargs)
# Initialise optimiser
with tf.variable_scope(variable_scope):
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(lr0, global_step,
lr_decay[0], lr_decay[1])
optimizer = (tf.train.MomentumOptimizer(learning_rate, momentum=.5)
.minimize(lstm['cost'], global_step=global_step))
# Begin training
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
variable_scope)
sess.run(tf.variables_initializer(var_list))
# Run minibatch SGD
# Break when Ctrl-C is pressed
try:
for i in range(n_iter):
msg = f'Iter {i}'
# Run SGD
batch = random.sample(range(features.shape[0]), batch_size)
_, cost = sess.run([optimizer, lstm['cost']],
feed_dict={lstm['x']: features[batch],
lstm['y']: targets[batch]})
msg += f' Train loss {cost:.4f}'
if i % valid_every == 0:
dict_ = {lstm['x']: dev_features, lstm['y']: dev_targets}
dev_cost = sess.run(lstm['cost'], feed_dict=dict_)
msg += f' Dev loss {dev_cost:.4f}'
if i % print_every == 0:
print(msg, file=sys.stderr)
except KeyboardInterrupt:
pass
return lstm, cell
def eval_ar(sess, lstm, ts_test, features_func, targets_func, burn_in):
''' Evaluate the AR model '''
# ts_test <num samples>-by-<num variables>
# -by-<length of every sample/series>
TS_WITH_NOISE = 0
TS_WITH_NO_NOISE = 1
x = ts_test[:, TS_WITH_NOISE, :].squeeze()
x_no_noise = ts_test[:, TS_WITH_NO_NOISE, :].squeeze()
features = np.apply_along_axis(features_func, axis=1, arr=x)
targets = np.apply_along_axis(targets_func, axis=1, arr=x)
targets_no_noise = np.apply_along_axis(targets_func, axis=1,
arr=x_no_noise)
if features.ndim == 2:
features = features[:, :, None]
if targets.ndim == 2:
targets = targets[:, :, None]
targets_no_noise = targets_no_noise[:, :, None]
dict_ = {lstm['x']: features, lstm['y']: targets}
cost, pred = sess.run([lstm['cost'], lstm['pred']], feed_dict=dict_)
# For simple feature and median quantile
cost_no_noise = mean_squared_error(targets_no_noise[:, burn_in:, 0],
pred[:, burn_in:, 0])
return cost, np.sqrt(cost_no_noise), pred
if __name__ == '__main__':
''' Command line interface
Usage:
seq 1 50 | xargs -I {} -P 3 python3 tspred_qtl.py simulation.npz simulation_test.npz >> out.csv
'''
# Parse command line
parser = argparse.ArgumentParser()
parser.add_argument('train_file')
parser.add_argument('test_file')
args = parser.parse_args()
# Read data
data = np.load(args.train_file)['data']
data_test = np.load(args.test_file)['data']
# Train
simple_features = lambda x: x[:-1]
moments_features = lambda x: np.column_stack([x[:-1], x[:-1] ** 2])
sess = tf.Session()
burn_in = 50
features_func = simple_features
res = train_lstm(sess, data[:, 0, :].squeeze() * 10,
features_func, lambda x: x[1:],
quantiles=[.5], burn_in=burn_in,
batch_size=50, lr0=3e-3, lr_decay=(50, .99),
n_iter=300, num_units=10)
# Test
cost, cost_no_noise, pred = eval_ar(sess, res[0],
data_test * 10,
features_func,
lambda x: x[1:], burn_in)
pred_error = data_test[:, 1, 1:].squeeze() - pred.squeeze() / 10
print(' '.join([str(w) for w in pred_error.flat]))
``` |
{
"source": "jli113/mortar",
"score": 3
} |
#### File: mortar/python/load_csv.py
```python
import io
import csv
import requests
from requests.utils import quote
import sys
if len(sys.argv) != 2:
print("Usage: python load_csv.py <path to csv file>")
sys.exit(1)
def register(source, name, uri, btype, units):
d = {
'SourceName': source,
'Name': name,
'Units': units,
'BrickURI': uri,
'BrickClass': btype
}
resp = requests.post("http://localhost:5001/register_stream", json=d)
if not resp.ok:
print(resp.content)
# TODO: need to split the files by source!!
with open(sys.argv[1], 'r') as f:
with io.StringIO() as buf:
w = csv.writer(buf)
r = csv.DictReader(f)
registered = False
for row in r:
if not registered:
source = quote(row['site'])
name = quote(row['label'])
uri = quote(row['id'])
btype = quote(row['type'])
units = 'degF'
registered = True
w.writerow([row['time'], row['value']])
url = f'http://localhost:5001/insert/csv?source={source}&\
name={name}&brick_uri={uri}&units={units}&brick_class={btype}&apikey=f7851e93-5717-4921-a978-26c5c550e0a5'
print(url)
b = io.BytesIO(buf.getvalue().encode('utf8'))
resp = requests.post(url, data=b, headers={'Content-Type': 'text/csv'})
if not resp.ok:
print(resp.content)
```
#### File: pymortar/pymortar/application.py
```python
import toml
from functools import lru_cache
# would use cached_property but we need to be compliant down to python 3.7
class Application:
def __init__(self, filename, client):
self.spec = toml.load(open(filename))
self.queries = self.spec["queries"]
self.name = self.spec["name"]
self.client = client
@property
@lru_cache(maxsize=0)
def valid_sites(self):
return self.refresh_valid_sites()
def refresh_valid_sites(self):
df = self.client.qualify(self.queries).df
sites = list(df[df.all(axis=1)].index)
return sites
```
#### File: pymortar/pymortar/mortar_pb2_grpc.py
```python
import grpc
from . import mortar_pb2 as mortar__pb2
class MortarStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAPIKey = channel.unary_unary(
"/mortar.Mortar/GetAPIKey",
request_serializer=mortar__pb2.GetAPIKeyRequest.SerializeToString,
response_deserializer=mortar__pb2.APIKeyResponse.FromString,
)
self.Qualify = channel.unary_unary(
"/mortar.Mortar/Qualify",
request_serializer=mortar__pb2.QualifyRequest.SerializeToString,
response_deserializer=mortar__pb2.QualifyResponse.FromString,
)
self.Fetch = channel.unary_stream(
"/mortar.Mortar/Fetch",
request_serializer=mortar__pb2.FetchRequest.SerializeToString,
response_deserializer=mortar__pb2.FetchResponse.FromString,
)
class MortarServicer(object):
# missing associated documentation comment in .proto file
pass
def GetAPIKey(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Qualify(self, request, context):
"""identify which sites meet the requirements of the queries"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def Fetch(self, request, context):
"""pull data from Mortar"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_MortarServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetAPIKey": grpc.unary_unary_rpc_method_handler(
servicer.GetAPIKey,
request_deserializer=mortar__pb2.GetAPIKeyRequest.FromString,
response_serializer=mortar__pb2.APIKeyResponse.SerializeToString,
),
"Qualify": grpc.unary_unary_rpc_method_handler(
servicer.Qualify,
request_deserializer=mortar__pb2.QualifyRequest.FromString,
response_serializer=mortar__pb2.QualifyResponse.SerializeToString,
),
"Fetch": grpc.unary_stream_rpc_method_handler(
servicer.Fetch,
request_deserializer=mortar__pb2.FetchRequest.FromString,
response_serializer=mortar__pb2.FetchResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"mortar.Mortar", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
``` |
{
"source": "jli755/colectica_api",
"score": 3
} |
#### File: jli755/colectica_api/get_mode_collection.py
```python
import colectica
from colectica import ColecticaObject
import api
import pandas as pd
import os
import numpy as np
def get_all_series(C):
"""
Get a list of all series
"""
all_series = C.general_search(C.item_code('Series'),'',MaxResults=0)['Results']
return all_series
def from_series_get_study(C, Agency, ID):
"""
From a series, get list of studies
"""
d = C.item_to_dict(Agency, ID)
return d['study']
def from_study_get_instrument(C, Agency, ID):
"""
From a study, get instrument and Mode of Data Collection
"""
d = C.item_to_dict(Agency, ID)
c = C.item_to_dict(d['Data Collection']['Agency'], d['Data Collection']['ID'])
name = c['Name']
mode_list = [c['CollectionEvent']['ModeOfCollection'][i]['TypeOfMode'] for i in range(len(c['CollectionEvent']['ModeOfCollection']))]
if 'InstrumentReferences' in c['Ref'].keys():
instrument_urn = c['Ref']['InstrumentReferences']
else:
instrument_urn = None
return name, instrument_urn, mode_list
def get_instruments_df(C):
"""
From series, return a dataframe of instrument/mode_list
"""
all_series = get_all_series(C)
df = pd.DataFrame(columns=['study_name', 'instrument_name', 'instrument_urn', 'data_collection_mode'])
for s in all_series:
# print("*****")
study_name = list(s['ItemName'].values())[0]
# print('series')
# print(s['AgencyId'], s['Identifier'])
all_studies = from_series_get_study(C, s['AgencyId'], s['Identifier'])
for st in all_studies:
# print("======")
# print('studies')
# print(st['Agency'], st['ID'])
name, instrument_urn, mode_list = from_study_get_instrument(C, st['Agency'], st['ID'])
df = df.append({'study_name': study_name,
'instrument_name': name,
'instrument_urn': instrument_urn,
'data_collection_mode': mode_list},
ignore_index=True)
lst_col = 'data_collection_mode'
df_unlist = pd.DataFrame({col:np.repeat(df[col].values, df[lst_col].str.len())
for col in df.columns.difference([lst_col])}).assign(**{lst_col:np.concatenate(df[lst_col].values)})[df.columns.tolist()]
return df_unlist
def main():
outdir = 'output'
if not os.path.exists(outdir):
os.makedirs(outdir)
hostname = None
username = None
password = None
if not hostname:
hostname = input ("enter the url of the site: ")
if not username:
username = input("enter your username: ")
if not password:
password = input("enter your password: ")
C = ColecticaObject(hostname, username, password)
df = get_instruments_df(C)
df.to_csv(os.path.join(outdir, 'instrument_mode_data_collection.csv'), index=False, sep=';')
if __name__ == '__main__':
main()
```
#### File: jli755/colectica_api/get_questions.py
```python
import colectica
from colectica import ColecticaObject
import api
import pandas as pd
import os
import numpy as np
import json
def from_instrument_get_question_response(C, Agency, ID):
"""
From an instrument get all questions, all response
"""
df_instrument_set, instrument_info = C.item_info_set(Agency, ID)
df_question = df_instrument_set.loc[(df_instrument_set.ItemType == 'Question') , :]
question_df_list = []
codelist_df_list = []
response_df_list = []
for question_id in df_question['Identifier']:
# print(question_id)
df_question, df_response = C.get_question_all(Agency, question_id)
# store DataFrame in list
question_df_list.append(df_question)
if df_question['response_type'][0] == 'CodeList':
codelist_df_list.append(df_response)
else:
response_df_list.append(df_response)
df_question_all = pd.concat(question_df_list)
if codelist_df_list == []:
df_codelist_all = pd.DataFrame()
else:
df_codelist_all = pd.concat(codelist_df_list)
if response_df_list == []:
df_response_all = pd.DataFrame()
else:
df_response_all = pd.concat(response_df_list)
return instrument_info, df_question_all, df_codelist_all, df_response_all
def from_instrument_get_statement(C, Agency, ID):
"""
From an instrument get all Statement
"""
df_instrument_set, instrument_info = C.item_info_set(Agency, ID)
df_statement = df_instrument_set.loc[(df_instrument_set.ItemType == 'Statement') , :]
statement_df_list = []
for statement_id in df_statement['Identifier']:
dict_statement = C.item_to_dict(Agency, statement_id)
df_statement = pd.DataFrame([dict_statement], columns=dict_statement.keys())
statement_df_list.append(df_statement)
if not statement_df_list == []:
df_statement_all = pd.concat(statement_df_list)
else:
df_statement_all = pd.DataFrame(columns=['AgencyId', 'Version', 'Identifier', 'URN', 'SourceId', 'Instruction', 'Label', 'Literal'])
return df_statement_all
def main():
outdir = 'instrument'
if not os.path.exists(outdir):
os.makedirs(outdir)
hostname = None
username = None
password = None
if not hostname:
hostname = input ("enter the url of the site: ")
if not username:
username = input("enter your username: ")
if not password:
password = input("enter your password: ")
C = ColecticaObject(hostname, username, password)
# get all instruments
# L = C.general_search('f196cc07-9c99-4725-ad55-5b34f479cf7d', '', 0)
# print(L['TotalResults']) # 313
# json.dump(L, open(os.path.join(outdir, 'all_instrument.txt'),'w'))
L = json.load(open(os.path.join(outdir, 'all_instrument.txt')))
# print(L)
all_idx = np.array(range(L['TotalResults']))
# split into 10 chunks
chunks = np.array_split(all_idx, 10)
this_chunk = 9
for i in chunks[this_chunk]:
print(i)
Agency = L['Results'][i]['AgencyId']
ID = L['Results'][i]['Identifier']
Version = L['Results'][i]['Version']
instrument_name = '_'.join(' '.join(L['Results'][i]['ItemName'].values()).split(' '))
instrument_dir = os.path.join(outdir, instrument_name)
if not os.path.exists(instrument_dir):
os.makedirs(instrument_dir)
# From an instrument get all questions, all response, print to file
instrument_info, df_question_all, df_codelist_all, df_response_all = from_instrument_get_question_response(C, Agency, ID)
with open(os.path.join(instrument_dir, 'instrument.txt'), 'w') as f:
print(instrument_info, file=f)
df_question_all.to_csv(os.path.join(instrument_dir, 'question.csv'), index=False, sep='\t')
df_codelist_all.to_csv(os.path.join(instrument_dir, 'codelist.csv'), index=False, sep='\t')
df_response_all.to_csv(os.path.join(instrument_dir, 'response.csv'), index=False, sep='\t')
# From an instrument get all statements
df_statement_all = from_instrument_get_statement(C, Agency, ID)
df_statement_out = df_statement_all.loc[:, ['AgencyId', 'Version', 'Identifier', 'URN', 'SourceId', 'Instruction', 'Label', 'Literal']]
df_statement_out.to_csv(os.path.join(instrument_dir, 'statement.csv'), index=False, sep='\t')
if __name__ == '__main__':
main()
``` |
{
"source": "jliahut/02-Text-adventure",
"score": 3
} |
#### File: jliahut/02-Text-adventure/main.py
```python
import sys, os, json
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
# The game and item description files (in the same folder as this script)
game_file = 'zork.json'
item_file = 'items.json'
def load_files():
try:
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, game_file)) as json_file: game = json.load(json_file)
with open(os.path.join(__location__, item_file)) as json_file: items = json.load(json_file)
return (game,items)
except:
print("There was a problem reading either the game or item file.")
os._exit(1)
# The main function for the game
def main():
current = 'PIZZ1' # The starting location
end_game = ['GAME1'] # Any of the end-game locations
(game,items) = load_files()
# Add your code here
# run the main function
if __name__ == '__main__':
main()
``` |
{
"source": "jliang117/jliang117.github.io",
"score": 3
} |
#### File: _site/sampleSaleProject/scrape.py
```python
from bs4 import BeautifulSoup
import requests
import json
import os
class MyEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that leverages an object's `__json__()` method,
if available, to obtain its default JSON representation.
"""
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
return json.JSONEncoder.default(self, obj)
class MarkerData():
def __init__(self, title, latitude, longitude, description):
self.title = title
self.latitude = latitude
self.longitude = longitude
self.description = description
def __json__(self):
return{'title': self.title, 'latitude': self.latitude, 'longitude': self.longitude, 'description': self.description}
def saveScrapeToJson(filename):
"""
The site content looks like:
<p class="lead">Tuesday through Friday, 6/4-6/7,
<a href="https://www.260samplesale.com/" rel="noreferrer noopener" target="_blank">260 Sample Sale
</a>hosts
<a href="http://www.montblanc.com/en-us/home.html" rel="noreferrer noopener" target="_blank">Mont Blanc
</a>. Watches, writing instruments, leather accessories and jewelry will all be up to 80% off.
</p>
*usefulData*
</div>
<p>Mont Blanc – 260 Fifth Ave btw 28th & 29th – Tues-Thurs 9am-7pm, Fri 9am-12pm –
<a href="https://www.google.com/maps/place/260+Sample+Sale/@40.7451863,-73.9895217,17z/data=!3m1!4b1!4m5!3m4!1s0x89c259a63b66ffeb:0x85f1cf2e6ed1fa24!8m2!3d40.7451823!4d-73.987333"
rel="noreferrer noopener" target="_blank">Map
</a>
</p>:
"""
HEADERS = {'User-agent':'sample sale scraper - contact on twitter @j_liang_'}
URL = 'http://www.thechoosybeggar.com'
r = requests.get(URL, headers=HEADERS, timeout=10)
soup = BeautifulSoup(r.text, "html5lib")
# create markerList
markerDataList = []
marker_list = []
for sectionData in soup.find_all("div", class_="entry"):
paragraphs = sectionData.find_all('p')
if len(paragraphs) <= 1: # skip if just one paragraph
continue
usefulData = paragraphs[1]
markerText = usefulData.getText()
if "am" not in markerText: # skipping this entry if there's no date
continue
firstWord = markerText.split(" ")[0]
mapLink = usefulData.find('a').get('href')
mapUrlSplit = mapLink.split("/")
for s in mapUrlSplit:
if s.startswith("@"):
latLong = s.split(",")
latitude = latLong[0][1:]
longitude = latLong[1]
marker_list.append("markers=size:large|label:" + firstWord +
"|color:0xFFFF00|" + latitude + "," + longitude + "|")
markerDataList.append(MarkerData(
firstWord, latitude, longitude, markerText))
with open(filename, 'w') as outfile:
json.dump(markerDataList, outfile, cls=MyEncoder)
saveScrapeToJson('pins.json')
``` |
{
"source": "jlianglab/CAiD",
"score": 2
} |
#### File: jlianglab/CAiD/trainer.py
```python
from utils import AverageMeter,ProgressMeter
import torch
import time
def train(train_loader, model, nce_criterion, mse_criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
nce_losses = AverageMeter('NCE Loss', ':.4e')
mse_losses = AverageMeter('MSE Loss', ':.4e')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, nce_losses,mse_losses,losses],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.mode.lower() == "id":
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
output, target = model(im_q=images[0], im_k=images[1])
loss = nce_criterion(output, target)
nce_losses.update(loss.item(), images[0].size(0))
losses.update(loss.item(), images[0].size(0))
elif args.mode.lower() == "caid":
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
images[2] = images[2].cuda(args.gpu, non_blocking=True)
output, target, rec_output = model(im_q=images[0], im_k=images[1])
nce_loss = nce_criterion(output, target)
mse_loss = mse_criterion(rec_output, images[2])
loss = args.contrastive_weight * nce_loss + args.mse_weight * mse_loss
nce_losses.update(nce_loss.item(), images[0].size(0))
mse_losses.update(mse_loss.item(), images[0].size(0))
losses.update(loss.item(), images[0].size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, nce_criterion, mse_criterion, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
nce_losses = AverageMeter('NCE Loss', ':.4e')
mse_losses = AverageMeter('MSE Loss', ':.4e')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
len(val_loader),
[batch_time, data_time, nce_losses,mse_losses,losses],
prefix="Validation: ")
model.eval()
counter = torch.zeros((2,), device=torch.device(f'cuda:{args.rank}'))
end = time.time()
for i, (images) in enumerate(val_loader):
with torch.no_grad():
# measure data loading time
data_time.update(time.time() - end)
if args.mode.lower() == "id":
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
output, target = model(im_q=images[0], im_k=images[1])
loss = nce_criterion(output, target)
nce_losses.update(loss.item(), images[0].size(0))
losses.update(loss.item(), images[0].size(0))
elif args.mode.lower() == "caid":
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
images[2] = images[2].cuda(args.gpu, non_blocking=True)
output, target, rec_output = model(im_q=images[0], im_k=images[1])
nce_loss = nce_criterion(output, target)
mse_loss = mse_criterion(rec_output, images[2])
loss = args.contrastive_weight * nce_loss + args.mse_weight * mse_loss
nce_losses.update(nce_loss.item(), images[0].size(0))
mse_losses.update(mse_loss.item(), images[0].size(0))
losses.update(loss.item(), images[0].size(0))
counter[0] += loss.item()
counter[1] += 1
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return counter
``` |
{
"source": "jlianglab/TransVW",
"score": 2
} |
#### File: TransVW/self_discovery/utils.py
```python
import numpy as np
import SimpleITK as sitk
def resample_img(itk_image, out_spacing=[2.0, 2.0, 2.0], is_label=True):
# Resample images to 2mm spacing with SimpleITK
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [
int(np.round(original_size[0] * (original_spacing[0] / out_spacing[0]))),
int(np.round(original_size[1] * (original_spacing[1] / out_spacing[1]))),
int(np.round(original_size[2] * (original_spacing[2] / out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
``` |
{
"source": "jlibovicky/asses-multilingual-bert",
"score": 3
} |
#### File: jlibovicky/asses-multilingual-bert/att_entropies_per_lng.py
```python
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from pytorch_pretrained_bert import BertTokenizer, BertModel
import logging
logging.basicConfig(level=logging.INFO)
def text_data_generator(path, tokenizer):
with open(path, 'r', encoding='utf-8') as f:
for line in f:
sentence = line.strip()
# 512 is the maximum input size of BERT
tokens = tokenizer.tokenize(sentence)
tokenized = ["[CLS]"] + tokens[:510] + ["[SEP]"]
token_ids = tokenizer.convert_tokens_to_ids(tokenized)
yield torch.tensor(token_ids)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"bert_model",
choices=["bert-base-uncased", "bert-large-uncased", "bert-base-cased",
"bert-base-multilingual-cased", "bert-base-multilingual-uncased", "bert-base-chinese"],
help="Variant of pre-trained model.")
parser.add_argument(
"language_data", nargs="+", type=str,
help="Files with data, name of the file is language code.")
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument("--limit", type=int, default=10000)
args = parser.parse_args()
torch.set_num_threads(args.num_threads)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=False)
model = BertModel.from_pretrained(
args.bert_model,
output_attentions=True,
keep_multihead_output=True).to(device)
model.eval()
languages = []
entropies = []
with torch.no_grad():
for input_file in args.language_data:
lng_code = input_file.split("/")[-1][:-4]
print(f"Working on {lng_code}")
entropies_sums = None
sentence_count = 0
for sentence_tensor in text_data_generator(input_file, tokenizer):
sentence_count += 1
layer_attentions = model(sentence_tensor.unsqueeze(0))[0]
head_count = layer_attentions[0].shape[1]
if entropies_sums is None:
entropies_sums = np.zeros(
len(layer_attentions) * head_count)
head_id = 0
for att_matrices in layer_attentions:
for matrix in att_matrices.squeeze(0):
entropy = -torch.mean((matrix * torch.log(matrix + 1e-9)).sum(1))
entropies_sums[head_id] += entropy.cpu().numpy()
head_id += 1
if sentence_count >= args.limit:
break
languages.append(lng_code)
entropies.append(entropies_sums / sentence_count)
for lng, entropy in zip(languages, entropies):
formatted_ent = "\t".join([f"{e:.5f}" for e in entropy])
print(f"{lng}\t{formatted_ent}")
if __name__ == "__main__":
main()
```
#### File: jlibovicky/asses-multilingual-bert/lang_id_embeddings.py
```python
import argparse
import numpy as np
from sklearn.linear_model import LogisticRegression
from utils import load_word_embeddings, mean_word_embedding
def load_dataset(txt_file, lng_file, all_embeddings, lng2idx):
representations = []
targets = []
with open(txt_file) as f_txt, open(lng_file) as f_lng:
for sentence, lng in zip(f_txt, f_lng):
lng = lng.strip()
vector = mean_word_embedding(
all_embeddings[lng], sentence.strip(), lng)
if vector.shape == tuple():
continue
representations.append(vector)
targets.append(lng2idx[lng])
return np.stack(representations), np.array(targets)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"embeddings_prefix", type=str, help="Directory with word embeddings.")
parser.add_argument(
"languages", type=str,
help="File with a list of languages.")
parser.add_argument(
"train_data_txt", type=str, help="Training sentences.")
parser.add_argument(
"train_data_lng", type=str,
help="Language codes for training sentences.")
parser.add_argument(
"val_data_txt", type=str, help="Validation sentences.")
parser.add_argument(
"val_data_lng", type=str,
help="Language codes for validation sentences.")
parser.add_argument(
"test_data_txt", type=str, help="Test sentences.")
parser.add_argument(
"test_data_lng", type=str, help="Language codes for test sentences.")
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument(
"--save-model", type=str, help="Path where to save the best model.")
parser.add_argument(
"--save-centroids", type=str, help="Path to save language centroids.")
parser.add_argument(
"--test-output", type=str, default=None,
help="Output for example classification.")
parser.add_argument(
"--center-lng", default=False, action="store_true",
help="Center languages to be around coordinate origin.")
args = parser.parse_args()
with open(args.languages) as f_lang:
languages = [line.strip() for line in f_lang]
lng2idx = {lng: i for i, lng in enumerate(languages)}
print("Loading embeddings.")
all_embeddings = {
lng: load_word_embeddings(f"{args.embeddings_prefix}/{lng}.vec")
for lng in languages}
print("Loading training data.")
train_repr, train_tgt = load_dataset(
args.train_data_txt, args.train_data_lng, all_embeddings, lng2idx)
print("Loading test data.")
test_repr, test_tgt = load_dataset(
args.test_data_txt, args.test_data_lng, all_embeddings, lng2idx)
if args.center_lng:
centroids = np.stack([
np.mean(train_repr[train_tgt == i], axis=0)
for i in range(len(all_embeddings))])
train_repr = train_repr - centroids[train_tgt]
test_repr = test_repr - centroids[test_tgt]
model = LogisticRegression()
model.fit(train_repr, train_tgt)
test_prediction = model.predict(test_repr)
accuracy = np.mean(test_prediction == test_tgt)
print(accuracy)
if __name__ == "__main__":
main()
```
#### File: jlibovicky/asses-multilingual-bert/lang_id.py
```python
import argparse
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from utils import (
text_data_generator, batch_generator, get_repr_from_layer, load_bert)
logging.basicConfig(level=logging.INFO)
def lng_data_generator(path, lng2idx, epochs=1):
for _ in range(epochs):
with open(path, 'r', encoding='utf-8') as f_lang:
for line in f_lang:
lng = line.strip()
lng_id = lng2idx[lng]
yield torch.tensor(lng_id)
def get_centroids(
device, model, data, languages, labels, layer, tokenizer, mean_pool=False):
"""Get language centeroids based on labels."""
labels = torch.cat(labels).to(device)
text_repr = torch.cat([
get_repr_from_layer(model, d.to(device), layer,
tokenizer.pad_token_id, mean_pool=mean_pool)
for d in data])
centroids = torch.zeros((len(languages), text_repr.size(1)))
for i, _ in enumerate(languages):
centroids[i] = text_repr[labels == i].mean(0)
return centroids
def load_and_batch_data(txt, lng, tokenizer, lng2idx, batch_size=32, epochs=1):
text_batches = batch_generator(
text_data_generator(
txt, tokenizer, epochs=epochs, max_len=110),
size=batch_size, tokenizer=tokenizer, padding=True)
lng_batches = batch_generator(
lng_data_generator(lng, lng2idx, epochs=epochs),
size=batch_size, tokenizer=None, padding=False)
return zip(text_batches, lng_batches)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"bert_model", type=str, help="Variant of pre-trained model.")
parser.add_argument(
"layer", type=int,
help="Layer from of layer from which the representation is taken.")
parser.add_argument(
"languages", type=str,
help="File with a list of languages.")
parser.add_argument(
"train_data_txt", type=str, help="Training sentences.")
parser.add_argument(
"train_data_lng", type=str,
help="Language codes for training sentences.")
parser.add_argument(
"val_data_txt", type=str, help="Validation sentences.")
parser.add_argument(
"val_data_lng", type=str,
help="Language codes for validation sentences.")
parser.add_argument(
"test_data_txt", type=str, help="Test sentences.")
parser.add_argument(
"test_data_lng", type=str, help="Language codes for test sentences.")
parser.add_argument(
"--hidden", default=None, type=int,
help="Size of the hidden classification layer.")
parser.add_argument("--num-threads", type=int, default=4)
parser.add_argument(
"--save-model", type=str, help="Path where to save the best model.")
parser.add_argument(
"--save-centroids", type=str, help="Path to save language centroids.")
parser.add_argument(
"--test-output", type=str, default=None,
help="Output for example classification.")
parser.add_argument(
"--skip-tokenization", default=False, action="store_true",
help="Only split on spaces, skip wordpieces.")
parser.add_argument(
"--mean-pool", default=False, action="store_true",
help="If true, use mean-pooling instead of [CLS] vecotr.")
parser.add_argument(
"--center-lng", default=False, action="store_true",
help="Center languages to be around coordinate origin.")
args = parser.parse_args()
with open(args.languages) as f_lang:
languages = [line.strip() for line in f_lang]
lng2idx = {lng: i for i, lng in enumerate(languages)}
torch.set_num_threads(args.num_threads)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer, model, model_dim, _ = load_bert(
args.bert_model, device)
if args.layer < -1:
print("Layer index cannot be negative.")
exit(1)
num_layers = None
if hasattr(model.config, "num_hidden_layers"):
num_layers = model.config.num_hidden_layers
if hasattr(model.config, "n_layers"):
num_layers = model.config.n_layers
if args.layer >= num_layers:
print(f"Model only has {num_layers} layers, {args.layer} is too much.")
exit(1)
train_batches = load_and_batch_data(
args.train_data_txt, args.train_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1000)
print("Train data iterator initialized.")
centroids = None
if args.center_lng:
print("Estimating language centroids.")
with torch.no_grad():
texts, labels = [], []
for _, (txt, lab) in zip(range(100), train_batches):
texts.append(txt)
labels.append(lab)
centroids = get_centroids(
device, model, texts, languages, labels,
args.layer, tokenizer, mean_pool=args.mean_pool)
centroids = centroids.to(device)
if args.save_centroids:
torch.save(centroids.cpu(), args.save_centroids)
print("Loading validation data.")
val_batches_raw = list(load_and_batch_data(
args.val_data_txt, args.val_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Validation data loaded in memory, pre-computing BERT.")
val_batches = []
with torch.no_grad():
for tokens, lng in val_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
val_batches.append((bert_features, lng))
print("Loading test data.")
test_batches_raw = list(load_and_batch_data(
args.test_data_txt, args.test_data_lng, tokenizer,
lng2idx, batch_size=32, epochs=1))
print("Test data loaded in memory, pre-computing BERT.")
test_batches = []
with torch.no_grad():
for tokens, lng in test_batches_raw:
bert_features = get_repr_from_layer(
model, tokens.to(device), args.layer,
tokenizer.pad_token_id, args.mean_pool).cpu()
test_batches.append((bert_features, lng))
print()
test_accuracies = []
all_test_outputs = []
trained_models = []
for exp_no in range(5):
print(f"Starting experiment no {exp_no + 1}")
print(f"------------------------------------")
if args.hidden is None:
classifier = nn.Linear(model_dim, len(languages))
else:
classifier = nn.Sequential(
nn.Linear(model_dim, args.hidden),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(args.hidden, len(languages)))
classifier = classifier.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=1e-3)
def evaluate(data_batches):
classifier.eval()
with torch.no_grad():
running_val_loss = 0.
running_val_acc = 0.
val_count = 0
outputs = []
for bert_features, lng in data_batches:
bert_features, lng = (
bert_features.to(device), lng.to(device))
batch_size = bert_features.size(0)
if centroids is not None:
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
batch_loss = criterion(prediction, lng)
predicted_lng = prediction.max(-1)[1]
batch_accuracy = torch.sum((predicted_lng == lng).float())
running_val_loss += (
batch_size * batch_loss.cpu().numpy().tolist())
running_val_acc += batch_accuracy.cpu().numpy().tolist()
val_count += batch_size
outputs.extend(predicted_lng.cpu().numpy().tolist())
val_loss = running_val_loss / val_count
accuracy = running_val_acc / val_count
return val_loss, accuracy, outputs
best_accuracy = 0.0
no_improvement = 0
learning_rate_decreased = 0
learning_rate = 1e-3
for i, (sentences, lng) in enumerate(train_batches):
try:
classifier.train()
optimizer.zero_grad()
sentences, lng = sentences.to(device), lng.to(device)
bert_features = get_repr_from_layer(
model, sentences, args.layer, tokenizer.pad_token_id,
mean_pool=args.mean_pool)
if centroids is not None:
with torch.no_grad():
bert_features = bert_features - centroids[lng]
prediction = classifier(bert_features)
loss = criterion(prediction, lng)
loss.backward()
optimizer.step()
if i % 10 == 9:
print(f"loss: {loss.cpu().detach().numpy().tolist():5g}")
if i % 50 == 49:
print()
val_loss, accuracy, _ = evaluate(val_batches)
print("Validation: "
f"loss: {val_loss:5g}, "
f"accuracy: {accuracy:5g}")
if accuracy > best_accuracy:
best_accuracy = accuracy
no_improvement = 0
else:
no_improvement += 1
if no_improvement >= 5:
if learning_rate_decreased >= 5:
print(
"Learning rate decreased five times, ending.")
break
learning_rate /= 2
print(f"Decreasing learning rate to {learning_rate}.")
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
learning_rate_decreased += 1
no_improvement = 0
print()
except KeyboardInterrupt:
break
model.eval()
test_loss, test_accuracy, test_outputs = evaluate(test_batches)
print()
print("Testing:")
print(f"test loss: {test_loss:5g}, "
f"test accuracy: {test_accuracy:5g}")
test_accuracies.append(test_accuracy)
this_test_outputs = []
for lng_prediction in test_outputs:
this_test_outputs.append(languages[lng_prediction])
all_test_outputs.append(this_test_outputs)
trained_models.append(classifier.cpu())
print()
print("===============================================")
print("All experiments done.")
print("===============================================")
print(f"Mean test accuracy {np.mean(test_accuracies)}")
print(f"Mean test stdev {np.std(test_accuracies)}")
best_exp_id = np.argmax(test_accuracies)
print(f"Best test accuracy {max(test_accuracies)}")
if args.save_model:
torch.save(trained_models[best_exp_id], args.save_model)
if args.test_output is not None:
with open(args.test_output, 'w') as f_out:
for prediction in all_test_outputs[best_exp_id]:
print(prediction, file=f_out)
if __name__ == "__main__":
main()
```
#### File: jlibovicky/asses-multilingual-bert/qe_by_cosine_embeddings.py
```python
import argparse
import logging
import sys
import numpy as np
import torch
from qe_by_cosine import apply_sklearn_proj
from utils import load_word_embeddings, word_embeddings_for_file
logging.basicConfig(level=logging.INFO)
def center(lng_repr):
return lng_repr - lng_repr.mean(0, keepdim=True)
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"src", type=str, help="Sentences in source language.")
parser.add_argument(
"mt", type=str, help="Sentences in the target language.")
parser.add_argument(
"src_emb", type=str, help="Source language word embeddings.")
parser.add_argument(
"mt_emb", type=str, help="Target language word embeddings.")
parser.add_argument(
"src_lng", type=str, help="Source language code.")
parser.add_argument(
"mt_lng", type=str, help="Target language code.")
parser.add_argument(
"--mean-pool", default=False, action="store_true",
help="If true, use mean-pooling instead of [CLS] vecotr.")
parser.add_argument(
"--center-lng", default=False, action="store_true",
help="If true, center representations first.")
parser.add_argument(
"--batch-size", type=int, default=32)
parser.add_argument(
"--src-proj", default=None, type=str,
help="Sklearn projection of the source language.")
parser.add_argument(
"--mt-proj", default=None, type=str,
help="Sklearn projection of the target language.")
parser.add_argument("--num-threads", type=int, default=4)
args = parser.parse_args()
if args.center_lng and (
args.src_proj is not None and args.src_proj is not None):
print("You can either project or center "
"the representations, not both.", file=sys.stderr)
exit(1)
torch.set_num_threads(args.num_threads)
src_embeddings = load_word_embeddings(args.src_emb)
mt_embeddings = load_word_embeddings(args.mt_emb)
src_repr = torch.from_numpy(np.stack(
word_embeddings_for_file(args.src, src_embeddings, args.src_lng)))
mt_repr = torch.from_numpy(np.stack(
word_embeddings_for_file(args.mt, mt_embeddings, args.mt_lng)))
if args.center_lng:
src_repr = center(src_repr)
mt_repr = center(mt_repr)
if args.src_proj is not None:
src_repr = apply_sklearn_proj(src_repr, args.src_proj)
if args.mt_proj is not None:
mt_repr = apply_sklearn_proj(mt_repr, args.mt_proj)
src_norm = (src_repr * src_repr).sum(1).sqrt()
mt_norm = (mt_repr * mt_repr).sum(1).sqrt()
cosine = (src_repr * mt_repr).sum(1) / src_norm / mt_norm
for num in cosine.cpu().detach().numpy():
print(num)
if __name__ == "__main__":
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.