repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kaonashi-tyc/zi2zi | model/unet.py | 1 | 32928 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os
import time
from collections import namedtuple
from .ops import conv2d, deconv2d, lrelu, fc, batch_norm, init_embedding, conditional_instance_norm
from .dataset import TrainDataProvider, InjectDataProvider, NeverEndingLoopingProvider
from .utils import scale_back, merge, save_concat_images
# Auxiliary wrapper classes
# Used to save handles(important nodes in computation graph) for later evaluation
LossHandle = namedtuple("LossHandle", ["d_loss", "g_loss", "const_loss", "l1_loss",
"category_loss", "cheat_loss", "tv_loss"])
InputHandle = namedtuple("InputHandle", ["real_data", "embedding_ids", "no_target_data", "no_target_ids"])
EvalHandle = namedtuple("EvalHandle", ["encoder", "generator", "target", "source", "embedding"])
SummaryHandle = namedtuple("SummaryHandle", ["d_merged", "g_merged"])
class UNet(object):
def __init__(self, experiment_dir=None, experiment_id=0, batch_size=16, input_width=256, output_width=256,
generator_dim=64, discriminator_dim=64, L1_penalty=100, Lconst_penalty=15, Ltv_penalty=0.0,
Lcategory_penalty=1.0, embedding_num=40, embedding_dim=128, input_filters=3, output_filters=3):
self.experiment_dir = experiment_dir
self.experiment_id = experiment_id
self.batch_size = batch_size
self.input_width = input_width
self.output_width = output_width
self.generator_dim = generator_dim
self.discriminator_dim = discriminator_dim
self.L1_penalty = L1_penalty
self.Lconst_penalty = Lconst_penalty
self.Ltv_penalty = Ltv_penalty
self.Lcategory_penalty = Lcategory_penalty
self.embedding_num = embedding_num
self.embedding_dim = embedding_dim
self.input_filters = input_filters
self.output_filters = output_filters
# init all the directories
self.sess = None
# experiment_dir is needed for training
if experiment_dir:
self.data_dir = os.path.join(self.experiment_dir, "data")
self.checkpoint_dir = os.path.join(self.experiment_dir, "checkpoint")
self.sample_dir = os.path.join(self.experiment_dir, "sample")
self.log_dir = os.path.join(self.experiment_dir, "logs")
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
print("create checkpoint directory")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
print("create log directory")
if not os.path.exists(self.sample_dir):
os.makedirs(self.sample_dir)
print("create sample directory")
def encoder(self, images, is_training, reuse=False):
with tf.variable_scope("generator"):
if reuse:
tf.get_variable_scope().reuse_variables()
encode_layers = dict()
def encode_layer(x, output_filters, layer):
act = lrelu(x)
conv = conv2d(act, output_filters=output_filters, scope="g_e%d_conv" % layer)
enc = batch_norm(conv, is_training, scope="g_e%d_bn" % layer)
encode_layers["e%d" % layer] = enc
return enc
e1 = conv2d(images, self.generator_dim, scope="g_e1_conv")
encode_layers["e1"] = e1
e2 = encode_layer(e1, self.generator_dim * 2, 2)
e3 = encode_layer(e2, self.generator_dim * 4, 3)
e4 = encode_layer(e3, self.generator_dim * 8, 4)
e5 = encode_layer(e4, self.generator_dim * 8, 5)
e6 = encode_layer(e5, self.generator_dim * 8, 6)
e7 = encode_layer(e6, self.generator_dim * 8, 7)
e8 = encode_layer(e7, self.generator_dim * 8, 8)
return e8, encode_layers
def decoder(self, encoded, encoding_layers, ids, inst_norm, is_training, reuse=False):
with tf.variable_scope("generator"):
if reuse:
tf.get_variable_scope().reuse_variables()
s = self.output_width
s2, s4, s8, s16, s32, s64, s128 = int(s / 2), int(s / 4), int(s / 8), int(s / 16), int(s / 32), int(
s / 64), int(s / 128)
def decode_layer(x, output_width, output_filters, layer, enc_layer, dropout=False, do_concat=True):
dec = deconv2d(tf.nn.relu(x), [self.batch_size, output_width,
output_width, output_filters], scope="g_d%d_deconv" % layer)
if layer != 8:
# IMPORTANT: normalization for last layer
# Very important, otherwise GAN is unstable
# Trying conditional instance normalization to
# overcome the fact that batch normalization offers
# different train/test statistics
if inst_norm:
dec = conditional_instance_norm(dec, ids, self.embedding_num, scope="g_d%d_inst_norm" % layer)
else:
dec = batch_norm(dec, is_training, scope="g_d%d_bn" % layer)
if dropout:
dec = tf.nn.dropout(dec, 0.5)
if do_concat:
dec = tf.concat([dec, enc_layer], 3)
return dec
d1 = decode_layer(encoded, s128, self.generator_dim * 8, layer=1, enc_layer=encoding_layers["e7"],
dropout=True)
d2 = decode_layer(d1, s64, self.generator_dim * 8, layer=2, enc_layer=encoding_layers["e6"], dropout=True)
d3 = decode_layer(d2, s32, self.generator_dim * 8, layer=3, enc_layer=encoding_layers["e5"], dropout=True)
d4 = decode_layer(d3, s16, self.generator_dim * 8, layer=4, enc_layer=encoding_layers["e4"])
d5 = decode_layer(d4, s8, self.generator_dim * 4, layer=5, enc_layer=encoding_layers["e3"])
d6 = decode_layer(d5, s4, self.generator_dim * 2, layer=6, enc_layer=encoding_layers["e2"])
d7 = decode_layer(d6, s2, self.generator_dim, layer=7, enc_layer=encoding_layers["e1"])
d8 = decode_layer(d7, s, self.output_filters, layer=8, enc_layer=None, do_concat=False)
output = tf.nn.tanh(d8) # scale to (-1, 1)
return output
def generator(self, images, embeddings, embedding_ids, inst_norm, is_training, reuse=False):
e8, enc_layers = self.encoder(images, is_training=is_training, reuse=reuse)
local_embeddings = tf.nn.embedding_lookup(embeddings, ids=embedding_ids)
local_embeddings = tf.reshape(local_embeddings, [self.batch_size, 1, 1, self.embedding_dim])
embedded = tf.concat([e8, local_embeddings], 3)
output = self.decoder(embedded, enc_layers, embedding_ids, inst_norm, is_training=is_training, reuse=reuse)
return output, e8
def discriminator(self, image, is_training, reuse=False):
with tf.variable_scope("discriminator"):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = lrelu(conv2d(image, self.discriminator_dim, scope="d_h0_conv"))
h1 = lrelu(batch_norm(conv2d(h0, self.discriminator_dim * 2, scope="d_h1_conv"),
is_training, scope="d_bn_1"))
h2 = lrelu(batch_norm(conv2d(h1, self.discriminator_dim * 4, scope="d_h2_conv"),
is_training, scope="d_bn_2"))
h3 = lrelu(batch_norm(conv2d(h2, self.discriminator_dim * 8, sh=1, sw=1, scope="d_h3_conv"),
is_training, scope="d_bn_3"))
# real or fake binary loss
fc1 = fc(tf.reshape(h3, [self.batch_size, -1]), 1, scope="d_fc1")
# category loss
fc2 = fc(tf.reshape(h3, [self.batch_size, -1]), self.embedding_num, scope="d_fc2")
return tf.nn.sigmoid(fc1), fc1, fc2
def build_model(self, is_training=True, inst_norm=False, no_target_source=False):
real_data = tf.placeholder(tf.float32,
[self.batch_size, self.input_width, self.input_width,
self.input_filters + self.output_filters],
name='real_A_and_B_images')
embedding_ids = tf.placeholder(tf.int64, shape=None, name="embedding_ids")
no_target_data = tf.placeholder(tf.float32,
[self.batch_size, self.input_width, self.input_width,
self.input_filters + self.output_filters],
name='no_target_A_and_B_images')
no_target_ids = tf.placeholder(tf.int64, shape=None, name="no_target_embedding_ids")
# target images
real_B = real_data[:, :, :, :self.input_filters]
# source images
real_A = real_data[:, :, :, self.input_filters:self.input_filters + self.output_filters]
embedding = init_embedding(self.embedding_num, self.embedding_dim)
fake_B, encoded_real_A = self.generator(real_A, embedding, embedding_ids, is_training=is_training,
inst_norm=inst_norm)
real_AB = tf.concat([real_A, real_B], 3)
fake_AB = tf.concat([real_A, fake_B], 3)
# Note it is not possible to set reuse flag back to False
# initialize all variables before setting reuse to True
real_D, real_D_logits, real_category_logits = self.discriminator(real_AB, is_training=is_training, reuse=False)
fake_D, fake_D_logits, fake_category_logits = self.discriminator(fake_AB, is_training=is_training, reuse=True)
# encoding constant loss
# this loss assume that generated imaged and real image
# should reside in the same space and close to each other
encoded_fake_B = self.encoder(fake_B, is_training, reuse=True)[0]
const_loss = (tf.reduce_mean(tf.square(encoded_real_A - encoded_fake_B))) * self.Lconst_penalty
# category loss
true_labels = tf.reshape(tf.one_hot(indices=embedding_ids, depth=self.embedding_num),
shape=[self.batch_size, self.embedding_num])
real_category_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_category_logits,
labels=true_labels))
fake_category_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_category_logits,
labels=true_labels))
category_loss = self.Lcategory_penalty * (real_category_loss + fake_category_loss)
# binary real/fake loss
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=real_D_logits,
labels=tf.ones_like(real_D)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits,
labels=tf.zeros_like(fake_D)))
# L1 loss between real and generated images
l1_loss = self.L1_penalty * tf.reduce_mean(tf.abs(fake_B - real_B))
# total variation loss
width = self.output_width
tv_loss = (tf.nn.l2_loss(fake_B[:, 1:, :, :] - fake_B[:, :width - 1, :, :]) / width
+ tf.nn.l2_loss(fake_B[:, :, 1:, :] - fake_B[:, :, :width - 1, :]) / width) * self.Ltv_penalty
# maximize the chance generator fool the discriminator
cheat_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_D_logits,
labels=tf.ones_like(fake_D)))
d_loss = d_loss_real + d_loss_fake + category_loss / 2.0
g_loss = cheat_loss + l1_loss + self.Lcategory_penalty * fake_category_loss + const_loss + tv_loss
if no_target_source:
# no_target source are examples that don't have the corresponding target images
# however, except L1 loss, we can compute category loss, binary loss and constant losses with those examples
# it is useful when discriminator get saturated and d_loss drops to near zero
# those data could be used as additional source of losses to break the saturation
no_target_A = no_target_data[:, :, :, self.input_filters:self.input_filters + self.output_filters]
no_target_B, encoded_no_target_A = self.generator(no_target_A, embedding, no_target_ids,
is_training=is_training,
inst_norm=inst_norm, reuse=True)
no_target_labels = tf.reshape(tf.one_hot(indices=no_target_ids, depth=self.embedding_num),
shape=[self.batch_size, self.embedding_num])
no_target_AB = tf.concat([no_target_A, no_target_B], 3)
no_target_D, no_target_D_logits, no_target_category_logits = self.discriminator(no_target_AB,
is_training=is_training,
reuse=True)
encoded_no_target_B = self.encoder(no_target_B, is_training, reuse=True)[0]
no_target_const_loss = tf.reduce_mean(
tf.square(encoded_no_target_A - encoded_no_target_B)) * self.Lconst_penalty
no_target_category_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_category_logits,
labels=no_target_labels)) * self.Lcategory_penalty
d_loss_no_target = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_D_logits,
labels=tf.zeros_like(
no_target_D)))
cheat_loss += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=no_target_D_logits,
labels=tf.ones_like(no_target_D)))
d_loss = d_loss_real + d_loss_fake + d_loss_no_target + (category_loss + no_target_category_loss) / 3.0
g_loss = cheat_loss / 2.0 + l1_loss + \
(self.Lcategory_penalty * fake_category_loss + no_target_category_loss) / 2.0 + \
(const_loss + no_target_const_loss) / 2.0 + tv_loss
d_loss_real_summary = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_summary = tf.summary.scalar("d_loss_fake", d_loss_fake)
category_loss_summary = tf.summary.scalar("category_loss", category_loss)
cheat_loss_summary = tf.summary.scalar("cheat_loss", cheat_loss)
l1_loss_summary = tf.summary.scalar("l1_loss", l1_loss)
fake_category_loss_summary = tf.summary.scalar("fake_category_loss", fake_category_loss)
const_loss_summary = tf.summary.scalar("const_loss", const_loss)
d_loss_summary = tf.summary.scalar("d_loss", d_loss)
g_loss_summary = tf.summary.scalar("g_loss", g_loss)
tv_loss_summary = tf.summary.scalar("tv_loss", tv_loss)
d_merged_summary = tf.summary.merge([d_loss_real_summary, d_loss_fake_summary,
category_loss_summary, d_loss_summary])
g_merged_summary = tf.summary.merge([cheat_loss_summary, l1_loss_summary,
fake_category_loss_summary,
const_loss_summary,
g_loss_summary, tv_loss_summary])
# expose useful nodes in the graph as handles globally
input_handle = InputHandle(real_data=real_data,
embedding_ids=embedding_ids,
no_target_data=no_target_data,
no_target_ids=no_target_ids)
loss_handle = LossHandle(d_loss=d_loss,
g_loss=g_loss,
const_loss=const_loss,
l1_loss=l1_loss,
category_loss=category_loss,
cheat_loss=cheat_loss,
tv_loss=tv_loss)
eval_handle = EvalHandle(encoder=encoded_real_A,
generator=fake_B,
target=real_B,
source=real_A,
embedding=embedding)
summary_handle = SummaryHandle(d_merged=d_merged_summary,
g_merged=g_merged_summary)
# those operations will be shared, so we need
# to make them visible globally
setattr(self, "input_handle", input_handle)
setattr(self, "loss_handle", loss_handle)
setattr(self, "eval_handle", eval_handle)
setattr(self, "summary_handle", summary_handle)
def register_session(self, sess):
self.sess = sess
def retrieve_trainable_vars(self, freeze_encoder=False):
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
if freeze_encoder:
# exclude encoder weights
print("freeze encoder weights")
g_vars = [var for var in g_vars if not ("g_e" in var.name)]
return g_vars, d_vars
def retrieve_generator_vars(self):
all_vars = tf.global_variables()
generate_vars = [var for var in all_vars if 'embedding' in var.name or "g_" in var.name]
return generate_vars
def retrieve_handles(self):
input_handle = getattr(self, "input_handle")
loss_handle = getattr(self, "loss_handle")
eval_handle = getattr(self, "eval_handle")
summary_handle = getattr(self, "summary_handle")
return input_handle, loss_handle, eval_handle, summary_handle
def get_model_id_and_dir(self):
model_id = "experiment_%d_batch_%d" % (self.experiment_id, self.batch_size)
model_dir = os.path.join(self.checkpoint_dir, model_id)
return model_id, model_dir
def checkpoint(self, saver, step):
model_name = "unet.model"
model_id, model_dir = self.get_model_id_and_dir()
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver.save(self.sess, os.path.join(model_dir, model_name), global_step=step)
def restore_model(self, saver, model_dir):
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt:
saver.restore(self.sess, ckpt.model_checkpoint_path)
print("restored model %s" % model_dir)
else:
print("fail to restore model %s" % model_dir)
def generate_fake_samples(self, input_images, embedding_ids):
input_handle, loss_handle, eval_handle, summary_handle = self.retrieve_handles()
fake_images, real_images, \
d_loss, g_loss, l1_loss = self.sess.run([eval_handle.generator,
eval_handle.target,
loss_handle.d_loss,
loss_handle.g_loss,
loss_handle.l1_loss],
feed_dict={
input_handle.real_data: input_images,
input_handle.embedding_ids: embedding_ids,
input_handle.no_target_data: input_images,
input_handle.no_target_ids: embedding_ids
})
return fake_images, real_images, d_loss, g_loss, l1_loss
def validate_model(self, val_iter, epoch, step):
labels, images = next(val_iter)
fake_imgs, real_imgs, d_loss, g_loss, l1_loss = self.generate_fake_samples(images, labels)
print("Sample: d_loss: %.5f, g_loss: %.5f, l1_loss: %.5f" % (d_loss, g_loss, l1_loss))
merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
merged_real_images = merge(scale_back(real_imgs), [self.batch_size, 1])
merged_pair = np.concatenate([merged_real_images, merged_fake_images], axis=1)
model_id, _ = self.get_model_id_and_dir()
model_sample_dir = os.path.join(self.sample_dir, model_id)
if not os.path.exists(model_sample_dir):
os.makedirs(model_sample_dir)
sample_img_path = os.path.join(model_sample_dir, "sample_%02d_%04d.png" % (epoch, step))
misc.imsave(sample_img_path, merged_pair)
def export_generator(self, save_dir, model_dir, model_name="gen_model"):
saver = tf.train.Saver()
self.restore_model(saver, model_dir)
gen_saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
gen_saver.save(self.sess, os.path.join(save_dir, model_name), global_step=0)
def infer(self, source_obj, embedding_ids, model_dir, save_dir):
source_provider = InjectDataProvider(source_obj)
if isinstance(embedding_ids, int) or len(embedding_ids) == 1:
embedding_id = embedding_ids if isinstance(embedding_ids, int) else embedding_ids[0]
source_iter = source_provider.get_single_embedding_iter(self.batch_size, embedding_id)
else:
source_iter = source_provider.get_random_embedding_iter(self.batch_size, embedding_ids)
tf.global_variables_initializer().run()
saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
self.restore_model(saver, model_dir)
def save_imgs(imgs, count):
p = os.path.join(save_dir, "inferred_%04d.png" % count)
save_concat_images(imgs, img_path=p)
print("generated images saved at %s" % p)
count = 0
batch_buffer = list()
for labels, source_imgs in source_iter:
fake_imgs = self.generate_fake_samples(source_imgs, labels)[0]
merged_fake_images = merge(scale_back(fake_imgs), [self.batch_size, 1])
batch_buffer.append(merged_fake_images)
if len(batch_buffer) == 10:
save_imgs(batch_buffer, count)
batch_buffer = list()
count += 1
if batch_buffer:
# last batch
save_imgs(batch_buffer, count)
def interpolate(self, source_obj, between, model_dir, save_dir, steps):
tf.global_variables_initializer().run()
saver = tf.train.Saver(var_list=self.retrieve_generator_vars())
self.restore_model(saver, model_dir)
# new interpolated dimension
new_x_dim = steps + 1
alphas = np.linspace(0.0, 1.0, new_x_dim)
def _interpolate_tensor(_tensor):
"""
Compute the interpolated tensor here
"""
x = _tensor[between[0]]
y = _tensor[between[1]]
interpolated = list()
for alpha in alphas:
interpolated.append(x * (1. - alpha) + alpha * y)
interpolated = np.asarray(interpolated, dtype=np.float32)
return interpolated
def filter_embedding_vars(var):
var_name = var.name
if var_name.find("embedding") != -1:
return True
if var_name.find("inst_norm/shift") != -1 or var_name.find("inst_norm/scale") != -1:
return True
return False
embedding_vars = filter(filter_embedding_vars, tf.trainable_variables())
# here comes the hack, we overwrite the original tensor
# with interpolated ones. Note, the shape might differ
# this is to restore the embedding at the end
embedding_snapshot = list()
for e_var in embedding_vars:
val = e_var.eval(session=self.sess)
embedding_snapshot.append((e_var, val))
t = _interpolate_tensor(val)
op = tf.assign(e_var, t, validate_shape=False)
print("overwrite %s tensor" % e_var.name, "old_shape ->", e_var.get_shape(), "new shape ->", t.shape)
self.sess.run(op)
source_provider = InjectDataProvider(source_obj)
input_handle, _, eval_handle, _ = self.retrieve_handles()
for step_idx in range(len(alphas)):
alpha = alphas[step_idx]
print("interpolate %d -> %.4f + %d -> %.4f" % (between[0], 1. - alpha, between[1], alpha))
source_iter = source_provider.get_single_embedding_iter(self.batch_size, 0)
batch_buffer = list()
count = 0
for _, source_imgs in source_iter:
count += 1
labels = [step_idx] * self.batch_size
generated, = self.sess.run([eval_handle.generator],
feed_dict={
input_handle.real_data: source_imgs,
input_handle.embedding_ids: labels
})
merged_fake_images = merge(scale_back(generated), [self.batch_size, 1])
batch_buffer.append(merged_fake_images)
if len(batch_buffer):
save_concat_images(batch_buffer,
os.path.join(save_dir, "frame_%02d_%02d_step_%02d.png" % (
between[0], between[1], step_idx)))
# restore the embedding variables
print("restore embedding values")
for var, val in embedding_snapshot:
op = tf.assign(var, val, validate_shape=False)
self.sess.run(op)
def train(self, lr=0.0002, epoch=100, schedule=10, resume=True, flip_labels=False,
freeze_encoder=False, fine_tune=None, sample_steps=50, checkpoint_steps=500):
g_vars, d_vars = self.retrieve_trainable_vars(freeze_encoder=freeze_encoder)
input_handle, loss_handle, _, summary_handle = self.retrieve_handles()
if not self.sess:
raise Exception("no session registered")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(loss_handle.d_loss, var_list=d_vars)
g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(loss_handle.g_loss, var_list=g_vars)
tf.global_variables_initializer().run()
real_data = input_handle.real_data
embedding_ids = input_handle.embedding_ids
no_target_data = input_handle.no_target_data
no_target_ids = input_handle.no_target_ids
# filter by one type of labels
data_provider = TrainDataProvider(self.data_dir, filter_by=fine_tune)
total_batches = data_provider.compute_total_batch_num(self.batch_size)
val_batch_iter = data_provider.get_val_iter(self.batch_size)
saver = tf.train.Saver(max_to_keep=3)
summary_writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
if resume:
_, model_dir = self.get_model_id_and_dir()
self.restore_model(saver, model_dir)
current_lr = lr
counter = 0
start_time = time.time()
for ei in range(epoch):
train_batch_iter = data_provider.get_train_iter(self.batch_size)
if (ei + 1) % schedule == 0:
update_lr = current_lr / 2.0
# minimum learning rate guarantee
update_lr = max(update_lr, 0.0002)
print("decay learning rate from %.5f to %.5f" % (current_lr, update_lr))
current_lr = update_lr
for bid, batch in enumerate(train_batch_iter):
counter += 1
labels, batch_images = batch
shuffled_ids = labels[:]
if flip_labels:
np.random.shuffle(shuffled_ids)
# Optimize D
_, batch_d_loss, d_summary = self.sess.run([d_optimizer, loss_handle.d_loss,
summary_handle.d_merged],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
# Optimize G
_, batch_g_loss = self.sess.run([g_optimizer, loss_handle.g_loss],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
# magic move to Optimize G again
# according to https://github.com/carpedm20/DCGAN-tensorflow
# collect all the losses along the way
_, batch_g_loss, category_loss, cheat_loss, \
const_loss, l1_loss, tv_loss, g_summary = self.sess.run([g_optimizer,
loss_handle.g_loss,
loss_handle.category_loss,
loss_handle.cheat_loss,
loss_handle.const_loss,
loss_handle.l1_loss,
loss_handle.tv_loss,
summary_handle.g_merged],
feed_dict={
real_data: batch_images,
embedding_ids: labels,
learning_rate: current_lr,
no_target_data: batch_images,
no_target_ids: shuffled_ids
})
passed = time.time() - start_time
log_format = "Epoch: [%2d], [%4d/%4d] time: %4.4f, d_loss: %.5f, g_loss: %.5f, " + \
"category_loss: %.5f, cheat_loss: %.5f, const_loss: %.5f, l1_loss: %.5f, tv_loss: %.5f"
print(log_format % (ei, bid, total_batches, passed, batch_d_loss, batch_g_loss,
category_loss, cheat_loss, const_loss, l1_loss, tv_loss))
summary_writer.add_summary(d_summary, counter)
summary_writer.add_summary(g_summary, counter)
if counter % sample_steps == 0:
# sample the current model states with val data
self.validate_model(val_batch_iter, ei, counter)
if counter % checkpoint_steps == 0:
print("Checkpoint: save checkpoint step %d" % counter)
self.checkpoint(saver, counter)
# save the last checkpoint
print("Checkpoint: last checkpoint step %d" % counter)
self.checkpoint(saver, counter)
| apache-2.0 | -3,483,903,955,147,367,400 | 53.88 | 120 | 0.528426 | false |
nathanhilbert/FPA_Core | openspending/views/admin.py | 1 | 1077 | import colander
from flask import Blueprint, render_template, request, redirect, Response
from flask.ext.login import current_user
from flask import current_app
from openspending.auth import require
from wtforms import Form, TextField, PasswordField, validators
from openspending.model import Dataset
from openspending.admin.helpers import LoadReport
blueprint = Blueprint('findadmin', __name__)
@blueprint.route('/findadmin/dataloader', methods=['GET'])
def dataloader():
""" Render the login/registration page. """
if not require.account.is_admin():
return redirect("/login", code=302)
return render_template('findadmin/index.html')
@blueprint.route('/findadmin/report')
def report():
dataset_id = request.args.get("id", None)
if not dataset_id:
raise
dataset = Dataset.by_id(dataset_id)
if not dataset:
raise
lr = LoadReport(dataset)
return Response(lr.get_output(),
mimetype='application/zip',
headers={'Content-Disposition':'attachment;filename=%s.zip'%dataset.name})
| agpl-3.0 | 7,766,214,459,022,135,000 | 24.642857 | 90 | 0.704735 | false |
pingpan2013/sensor-box-project | sensor_project/genGraphs.py | 1 | 4044 | #!/usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd # used to convert datetime64 to datetime
import csv
import sys
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
class Gen_Graph:
def __init__(self, _filename):
self.filename = _filename
self.data = []
self.dtype = []
def readData(self):
'''Read the data from .csv file'''
with open(self.filename, 'r') as file:
reader = csv.reader(file)
for row in reader:
self.data.append(tuple(row))
return self.data
def genDtype(self):
'''Get the data type, always put DATE in the last '''
for i in xrange(len(self.data[0])):
if i != len(self.data[0]) - 1:
self.dtype.append((str(self.data[0][i]), '<f8'))
else:
self.dtype.append((self.data[0][i], '<M8[s]'))
print "Data Type: " + str(self.dtype)
print '=============================================================='
def uniqueish_color(self):
'''Randomly select a color'''
return plt.cm.gist_ncar(np.random.random())
def genGraph(self):
'''Generate the graph with unique y axis'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig, ax = plt.subplots(1)
'''Drawing multiple lines in one graph'''
for label in self.data[0]:
if label != 'Time':
dtype = t['{0}'.format(label)]
ax.plot(pd.to_datetime(t.Time), dtype)
ax.set_xlabel(' date ')
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
labels = list(self.data[0][:-1])
plt.legend(labels, loc='lower left')
plt.show()
def genGraph_m(self):
'''Generate the graph with multiple y axises'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig = plt.figure()
fig.subplots_adjust(right=0.75)
ax = fig.add_subplot(111)
'''Drawing multiple lines with different y axises in one graph'''
lines = []
labels = list(self.data[0][:-1])
for num in xrange(len(self.data[0]) - 1):
label = labels[num]
if num == 0:
dtype = t['{0}'.format(label)]
line1, = ax.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line1)
ax.set_ylabel(label)
ax.set_xlabel('Date')
elif label != 'Time':
dtype = t['{0}'.format(label)]
par = ax.twinx()
line2, = par.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line2)
par.set_ylabel(label)
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
ax.legend(lines, labels, loc='lower left')
plt.draw()
plt.show()
def main():
if len(sys.argv) != 2:
print "Error with the parameters! Please specify the file path!"
sys.exit(2)
filename = sys.argv[1]
gg = Gen_Graph(filename)
data = gg.readData()
print "Original Data: "
for i in data:
print i
print '=============================================================='
gg.genGraph_m()
print "Finished Drawing!"
if __name__ == "__main__":
main()
| gpl-3.0 | 6,884,106,537,588,109,000 | 30.348837 | 94 | 0.516815 | false |
opendatatrentino/ckan-api-client | ckan_api_client/tests/functional/client_lowlev/test_data_loss_on_update.py | 1 | 2614 | """
Here we test data loss when updating objects.
In a perfect case, we should just be able to update
things by passing in only the updates, but unfortunately
we need to attach a lot more stuff in order to prevent
them from being deleted.
"""
# ----------------------------------------------------------------------
# todo: write test to check updating "groups"
# ----------------------------------------------------------------------
# todo: write test to check updating "relationships"
# ----------------------------------------------------------------------
# todo: write test to check updating "resources"
# ----------------------------------------------------------------------
# todo: check that resources keep the same id upon update
# - create dataset with some resources
# - update dataset adding a resource and removing another
# - check that resources kept the same id based on URL
# - if not, we have to hack around this.. :(
# ----------------------------------------------------------------------
import copy
from .utils import check_dataset, clean_dataset
from ckan_api_client.tests.utils.diff import diff_mappings
from ckan_api_client.tests.utils.generate import generate_dataset
def test_data_loss_on_update(request, ckan_client_ll):
"""
Check whether / which data gets lost if not passed back
with the object during an update.
Strategy:
1. We create the dataset
2. We retrieve the dataset and keep it for later
3. We send an update
4. We check that update affected only the desired keys
"""
# --------------------------------------------------
# Create a brand new dataset
stage_1pre = generate_dataset()
stage_1 = ckan_client_ll.post_dataset(stage_1pre)
dataset_id = stage_1['id']
check_dataset(stage_1pre, stage_1)
# --------------------------------------------------
# Make sure that the thing we created makes sense
retrieved = ckan_client_ll.get_dataset(dataset_id)
assert retrieved == stage_1
check_dataset(stage_1pre, stage_1)
# --------------------------------------------------
# Try updating, then double-check
stage_2pre = copy.deepcopy(retrieved)
stage_2pre['title'] = 'My new dataset title'
stage_2 = ckan_client_ll.put_dataset(stage_2pre)
assert stage_2['title'] == 'My new dataset title'
# Compare with previous stage
diffs = diff_mappings(*map(clean_dataset, (stage_1, stage_2)))
assert diffs['right'] == diffs['left'] == set()
assert diffs['differing'] == set(['title'])
check_dataset(stage_2pre, stage_2)
| bsd-2-clause | -6,517,960,197,875,200,000 | 34.808219 | 72 | 0.552028 | false |
FederatedAI/FATE | python/federatedml/nn/hetero_nn/backend/tf_keras/data_generator.py | 1 | 1243 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
import numpy as np
class KerasSequenceData(tf.keras.utils.Sequence):
def __init__(self, X, y=None):
if X.shape[0] == 0:
raise ValueError("Data is empty!")
self.X = X
if y is None:
self.y = np.zeros(X.shape[0])
else:
self.y = y
def __len__(self):
return 1
def __getitem__(self, idx):
return self.X, self.y
class KerasSequenceDataConverter(object):
@classmethod
def convert_data(cls, x=None, y = None):
return KerasSequenceData(x, y)
| apache-2.0 | 3,918,844,614,413,614,600 | 24.367347 | 75 | 0.650845 | false |
michal-ruzicka/archivematica | src/archivematicaCommon/lib/databaseFunctions.py | 1 | 14914 | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaCommon
# @author Joseph Perry <[email protected]>
import os
import string
import sys
import uuid
sys.path.append("/usr/share/archivematica/dashboard")
from django.utils import timezone
from main.models import Derivation, Event, File, FileID, FPCommandOutput, Job, SIP, Task, Transfer, UnitVariable
def getUTCDate():
"""Returns a timezone-aware representation of the current datetime in UTC."""
return timezone.now()
def getDeciDate(date):
valid = "." + string.digits
ret = ""
for c in date:
if c in valid:
ret += c
#else:
#ret += replacementChar
return str("{:10.10f}".format(float(ret)))
def insertIntoFiles(fileUUID, filePath, enteredSystem=None, transferUUID="", sipUUID="", use="original"):
"""
Creates a new entry in the Files table using the supplied arguments.
:param str fileUUID:
:param str filePath: The current path of the file on disk. Can contain variables; see the documentation for ReplacementDict for supported names.
:param datetime enteredSystem: Timestamp for the event of file ingestion. Defaults to the current timestamp when the record is created.
:param str transferUUID: UUID for the transfer containing this file. Can be empty. At least one of transferUUID or sipUUID must be defined. Mutually exclusive with sipUUID.
:param str sipUUID: UUID for the SIP containing this file. Can be empty. At least one of transferUUID or sipUUID must be defined. Mutually exclusive with transferUUID.
:param str use: A category used to group the file with others of the same kind. Will be included in the AIP's METS document in the USE attribute. Defaults to "original".
:returns: None
"""
if enteredSystem is None:
enteredSystem = getUTCDate()
kwargs = {
"uuid": fileUUID,
"originallocation": filePath,
"currentlocation": filePath,
"enteredsystem": enteredSystem,
"filegrpuse": use
}
if transferUUID != "" and sipUUID == "":
kwargs["transfer_id"] = transferUUID
elif transferUUID == "" and sipUUID != "":
kwargs["sip_id"] = sipUUID
else:
print >>sys.stderr, "not supported yet - both SIP and transfer UUID's defined (or neither defined)"
print >>sys.stderr, "SIP UUID:", sipUUID
print >>sys.stderr, "transferUUID:", transferUUID
raise Exception("not supported yet - both SIP and transfer UUID's defined (or neither defined)", sipUUID + "-" + transferUUID)
File.objects.create(**kwargs)
def getAgentForFileUUID(fileUUID):
"""
Fetches the ID for the agent associated with the given file, if one exists.
The agent ID is stored in a UnitVariable with the name "activeAgent", associated with either the SIP or the transfer containing the file.
This function will attempt to fetch the unit variable from a SIP first,
then the transfer.
The agent ID is the pk to a row in the Agent table.
Note that this transfer does not actually verify that an agent with this pk exists, just that the value is contained in a UnitVariable associated with this SIP.
:returns: The agent ID, as a string, or None if no agent could be found.
"""
agent = None
if fileUUID == 'None':
error_message = "Unable to get agent for file: no file UUID provided."
print >>sys.stderr, error_message
raise Exception(error_message)
else:
try:
f = File.objects.get(uuid=fileUUID)
except File.DoesNotExist:
return
if f.sip:
try:
var = UnitVariable.objects.get(unittype='SIP', unituuid=f.sip_id,
variable='activeAgent')
agent = var.variablevalue
except UnitVariable.DoesNotExist:
pass
if f.transfer and agent is None: # agent hasn't been found yet
try:
var = UnitVariable.objects.get(unittype='Transfer',
unituuid=f.transfer_id,
variable='activeAgent')
agent = var.variablevalue
except UnitVariable.DoesNotExist:
pass
return agent
def insertIntoEvents(fileUUID="", eventIdentifierUUID="", eventType="", eventDateTime=None, eventDetail="", eventOutcome="", eventOutcomeDetailNote=""):
"""
Creates a new entry in the Events table using the supplied arguments.
:param str fileUUID: The UUID of the file with which this event is associated. Can be blank.
:param str eventIdentifierUUID: The UUID for the event being generated. If not provided, a new UUID will be calculated using the version 4 scheme.
:param str eventType: Can be blank.
:param datetime eventDateTime: The time at which the event occurred. If not provided, the current date will be used.
:param str eventDetail: Can be blank. Will be used in the eventDetail element in the AIP METS.
:param str eventOutcome: Can be blank. Will be used in the eventOutcome element in the AIP METS.
:param str eventOutcomeDetailNote: Can be blank. Will be used in the eventOutcomeDetailNote element in the AIP METS.
"""
if eventDateTime is None:
eventDateTime = getUTCDate()
agent = getAgentForFileUUID(fileUUID)
if not eventIdentifierUUID:
eventIdentifierUUID = str(uuid.uuid4())
Event.objects.create(event_id=eventIdentifierUUID, file_uuid_id=fileUUID,
event_type=eventType, event_datetime=eventDateTime,
event_detail=eventDetail, event_outcome=eventOutcome,
event_outcome_detail=eventOutcomeDetailNote,
linking_agent=agent)
def insertIntoDerivations(sourceFileUUID="", derivedFileUUID="", relatedEventUUID=""):
"""
Creates a new entry in the Derivations table using the supplied arguments. The two files in this relationship should already exist in the Files table.
:param str sourceFileUUID: The UUID of the original file.
:param str derivedFileUUID: The UUID of the derived file.
:param str relatedEventUUID: The UUID for an event describing the creation of the derived file. Can be blank.
"""
if not sourceFileUUID:
raise ValueError("sourceFileUUID must be specified")
if not derivedFileUUID:
raise ValueError("derivedFileUUID must be specified")
Derivation.objects.create(source_file_id=sourceFileUUID,
derived_file_id=derivedFileUUID,
event_id=relatedEventUUID)
def insertIntoFPCommandOutput(fileUUID="", fitsXMLString="", ruleUUID=""):
"""
Creates a new entry in the FPCommandOutput table using the supplied argument.
This is typically used to store output of file characterization.
This data is intended to be unique per combination of fileUUID and ruleUUID; an exception will be raised if FPCommandOutput data already exists for a file with this ruleUUID.
:param str fileUUID:
:param str fitsXMLString: An XML document, encoded into a string. The name is historical; this can represent XML output from any software.
:param str ruleUUID: The UUID of the FPR rule used to generate this XML data. Foreign key to FPRule.
"""
FPCommandOutput.objects.create(file_id=fileUUID, content=fitsXMLString,
rule_id=ruleUUID)
def insertIntoFilesIDs(fileUUID="", formatName="", formatVersion="", formatRegistryName="", formatRegistryKey=""):
"""
Creates a new entry in the FilesIDs table using the provided data.
This function, and its associated table, may be removed in the future.
"""
f = FileID(file_id=fileUUID,
format_name=formatName,
format_version=formatVersion,
format_registry_name=formatRegistryName,
format_registry_key=formatRegistryKey)
f.save()
#user approved?
#client connected/disconnected.
def logTaskCreatedSQL(taskManager, commandReplacementDic, taskUUID, arguments):
"""
Creates a new entry in the Tasks table using the supplied data.
:param MCPServer.linkTaskManager taskManager: A linkTaskManager subclass instance.
:param ReplacementDict commandReplacementDic: A ReplacementDict or dict instance. %fileUUID% and %relativeLocation% variables will be looked up from this dict.
:param str taskUUID: The UUID to be used for this Task in the database.
:param str arguments: The arguments to be passed to the command when it is executed, as a string. Can contain replacement variables; see ReplacementDict for supported values.
"""
jobUUID = taskManager.jobChainLink.UUID
fileUUID = ""
if "%fileUUID%" in commandReplacementDic:
fileUUID = commandReplacementDic["%fileUUID%"]
taskexec = taskManager.execute
fileName = os.path.basename(os.path.abspath(commandReplacementDic["%relativeLocation%"]))
Task.objects.create(taskuuid=taskUUID,
job_id=jobUUID,
fileuuid=fileUUID,
filename=fileName,
execution=taskexec,
arguments=arguments,
createdtime=getUTCDate())
def logTaskCompletedSQL(task):
"""
Fetches execution data from the completed task and logs it to the database.
Updates the entry in the Tasks table with data in the provided task.
Saves the following fields: exitCode, stdOut, stdError
:param task:
"""
print "Logging task output to db", task.UUID
taskUUID = task.UUID.__str__()
exitCode = task.results["exitCode"].__str__()
stdOut = task.results["stdOut"]
stdError = task.results["stdError"]
task = Task.objects.get(taskuuid=taskUUID)
task.endtime = getUTCDate()
task.exitcode = exitCode
task.stdout = stdOut
task.stderror = stdError
task.save()
def logJobCreatedSQL(job):
"""
Logs a job's properties into the Jobs table in the database.
:param jobChainLink job: A jobChainLink instance.
:returns None:
"""
unitUUID = job.unit.UUID
decDate = getDeciDate("." + str(job.createdDate.microsecond))
if job.unit.owningUnit != None:
unitUUID = job.unit.owningUnit.UUID
Job.objects.create(jobuuid=job.UUID,
jobtype=job.description,
directory=job.unit.currentPath,
sipuuid=unitUUID,
currentstep="Executing command(s)",
unittype=job.unit.__class__.__name__,
microservicegroup=str(job.microserviceGroup),
createdtime=job.createdDate,
createdtimedec=decDate,
microservicechainlink_id=str(job.pk),
subjobof=str(job.subJobOf))
# TODO -un hardcode executing exeCommand
def fileWasRemoved(fileUUID, utcDate=None, eventDetail = "", eventOutcomeDetailNote = "", eventOutcome=""):
"""
Logs the removal of a file from the database.
Updates the properties of the row in the Files table for the provided fileUUID, and logs the removal in the Events table with an event of type "file removed".
:param str fileUUID:
:param datetime utcDate: The date of the removal. Defaults to the current date.
:param str eventDetail: The eventDetail for the logged event. Can be blank.
:param str eventOutcomeDetailNote: The eventOutcomeDetailNote for the logged event. Can be blank.
:param str eventOutcome: The eventOutcome for the logged event. Can be blank.
"""
if utcDate is None:
utcDate = getUTCDate()
eventIdentifierUUID = uuid.uuid4().__str__()
eventType = "file removed"
eventDateTime = utcDate
insertIntoEvents(fileUUID=fileUUID, \
eventIdentifierUUID=eventIdentifierUUID, \
eventType=eventType, \
eventDateTime=eventDateTime, \
eventDetail=eventDetail, \
eventOutcome=eventOutcome, \
eventOutcomeDetailNote=eventOutcomeDetailNote)
f = File.objects.get(uuid=fileUUID)
f.removedtime = utcDate
f.currentlocation = None
f.save()
def createSIP(path, UUID=None, sip_type='SIP'):
"""
Create a new SIP object for a SIP at the given path.
:param str path: The current path of the SIP on disk. Can contain variables; see the documentation for ReplacementDict for supported names.
:param str UUID: The UUID to be created for the SIP. If not specified, a new UUID will be generated using the version 4 scheme.
:param str sip_type: A string representing the type of the SIP. Defaults to "SIP". The other value typically used is "AIC".
:returns str: The UUID for the created SIP.
"""
if UUID is None:
UUID = str(uuid.uuid4())
print "Creating SIP:", UUID, "-", path
sip = SIP(uuid=UUID,
currentpath=path,
sip_type=sip_type)
sip.save()
return UUID
def getAccessionNumberFromTransfer(UUID):
"""
Fetches the accession number from a transfer, given its UUID.
:param str UUID: The UUID of the transfer, as a string.
:returns str: The accession number, as a string.
:raises ValueError: if the requested Transfer cannot be found.
"""
try:
return Transfer.objects.get(uuid=UUID).accessionid
except Transfer.DoesNotExist:
raise ValueError("No Transfer found for UUID: {}".format(UUID))
def deUnicode(str):
"""
Convert a unicode string into an str by encoding it using UTF-8.
:param unicode: A string. If not already a unicode string, it will be converted to one before encoding.
:returns str: A UTF-8 encoded string, or None if the provided string was None. May be identical to the original string, if the original string contained only ASCII values.
"""
if str == None:
return None
return unicode(str).encode('utf-8')
| agpl-3.0 | 1,886,681,623,791,319,000 | 43.12426 | 178 | 0.671584 | false |
dongweiming/flask_reveal | social/apps/django_app/default/models.py | 1 | 2611 | """Django ORM models for Social Auth"""
from django.db import models
from django.conf import settings
from django.db.utils import IntegrityError
from social.utils import setting_name
from social.storage.django_orm import DjangoUserMixin, \
DjangoAssociationMixin, \
DjangoNonceMixin, \
BaseDjangoStorage
from social.apps.django_app.default.fields import JSONField
USER_MODEL = getattr(settings, setting_name('USER_MODEL'), None) or \
getattr(settings, 'AUTH_USER_MODEL', None) or \
'auth.User'
UID_LENGTH = getattr(settings, setting_name('UID_LENGTH'), 255)
class UserSocialAuth(models.Model, DjangoUserMixin):
"""Social Auth association model"""
user = models.ForeignKey(USER_MODEL, related_name='social_auth')
provider = models.CharField(max_length=32)
uid = models.CharField(max_length=UID_LENGTH)
extra_data = JSONField()
class Meta:
"""Meta data"""
unique_together = ('provider', 'uid')
db_table = 'social_auth_usersocialauth'
@classmethod
def get_social_auth(cls, provider, uid):
try:
return cls.objects.select_related('user').get(provider=provider,
uid=uid)
except UserSocialAuth.DoesNotExist:
return None
@classmethod
def username_max_length(cls):
field = UserSocialAuth.user_model()._meta.get_field('username')
return field.max_length
@classmethod
def user_model(cls):
return UserSocialAuth._meta.get_field('user').rel.to
class Nonce(models.Model, DjangoNonceMixin):
"""One use numbers"""
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=40)
class Meta:
db_table = 'social_auth_nonce'
class Association(models.Model, DjangoAssociationMixin):
"""OpenId account association"""
server_url = models.CharField(max_length=255)
handle = models.CharField(max_length=255)
secret = models.CharField(max_length=255) # Stored base64 encoded
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.CharField(max_length=64)
class Meta:
db_table = 'social_auth_association'
class DjangoStorage(BaseDjangoStorage):
user = UserSocialAuth
nonce = Nonce
association = Association
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is IntegrityError
| bsd-3-clause | -8,976,431,457,367,216,000 | 31.6375 | 76 | 0.646879 | false |
chromium/chromium | tools/android/native_lib_memory/java_code_pages_pss.py | 7 | 2976 | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the total PSS attributed to another app in Chrome's mappings.
This script assumes a device with Monochrome, and requires root access.
For instance, to get the part of Chrome's memory footprint coming from GMSCore
code and bytecode pages:
$ tools/android/native_lib_memory/java_code_pages_pss.py
--chrome-package com.android.chrome
--app-package com.google.android.gms --verbose
"""
from __future__ import print_function
import argparse
import logging
import os
import re
import sys
import parse_smaps
_SRC_PATH = os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir)
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
def _GetPssInKb(mappings, app_package, verbose):
"""Returns the total PSS from mappings.
Args:
mappings: ([parse_smaps.Mapping]) List of mappings.
app_package: (str) App package to look for.
verbose: (bool) Verbose output or not.
Returns:
(executable_pss (int), other_pss (int)) Executable mappings and others,
in kB.
"""
executable_pss, other_pss = (0, 0)
for mapping in mappings:
if app_package in mapping.pathname:
if mapping.permissions == 'r-xp':
executable_pss += mapping.fields['Pss']
else:
other_pss += mapping.fields['Pss']
if verbose:
print(mapping.ToString())
return (executable_pss, other_pss)
def _CreateArgumentParser():
parser = argparse.ArgumentParser()
parser.add_argument('--chrome-package', help='Chrome package to look for.',
required=True)
parser.add_argument('--app-package', help='Application to inspect.',
required=True)
parser.add_argument('--verbose', help='Verbose output.',
action='store_true')
return parser
def main():
parser = _CreateArgumentParser()
args = parser.parse_args()
devices = device_utils.DeviceUtils.HealthyDevices()
if not devices:
logging.error('No connected devices')
return
device = devices[0]
device.EnableRoot()
processes = device.ListProcesses(args.chrome_package)
logging.basicConfig(level=logging.INFO)
logging.info('Processes:\n\t' + '\n\t'.join(p.name for p in processes))
total_executable_pss_kb, total_other_pss_kb = (0, 0)
for process in processes:
mappings = parse_smaps.ParseProcSmaps(device, process.pid)
executable_pss_kb, other_pss_kb = _GetPssInKb(
mappings, args.app_package, args.verbose)
total_executable_pss_kb += executable_pss_kb
total_other_pss_kb += other_pss_kb
print('Total executable PSS = %dkB' % total_executable_pss_kb)
print('Total other mappings PSS = %dkB' % total_other_pss_kb)
if __name__ == '__main__':
main()
| bsd-3-clause | -2,056,070,294,114,388,500 | 31.347826 | 78 | 0.679772 | false |
jalexvig/tensorflow | tensorflow/python/ops/rnn.py | 1 | 62998 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transposes the batch and time dimensions of a Tensor.
If the input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A Tensor.
Returns:
x transposed along the first two dimensions.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
return x
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape
`[max_time, batch_size, ...]`.
All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _maybe_tensor_shape_from_tensor(shape):
if isinstance(shape, ops.Tensor):
return tensor_shape.as_shape(tensor_util.constant_value(shape))
else:
return shape
def _should_cache():
"""Returns True if a default caching device should be set, otherwise False."""
if context.executing_eagerly():
return False
# Don't set a caching device when running in a loop, since it is possible that
# train steps could be wrapped in a tf.while_loop. In that scenario caching
# prevents forward computations in loop iterations from re-reading the
# updated weights.
ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
return control_flow_util.GetContainingWhileContext(ctxt) is None
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on `sequence_length`.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on whether we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_length[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_length[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: int32 `Tensor` scalar.
sequence_length: int32 `Tensor` vector of size [batch_size].
min_sequence_length: int32 `Tensor` scalar, min of sequence_length.
max_sequence_length: int32 `Tensor` scalar, max of sequence_length.
zero_output: `Tensor` vector of shape [output_size].
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
# Vector describing which batch entries are finished.
copy_cond = time >= sequence_length
def _copy_one_through(output, new_output):
# TensorArray and scalar get passed through.
if isinstance(output, tensor_array_ops.TensorArray):
return new_output
if output.shape.ndims == 0:
return new_output
# Otherwise propagate the old or the new value.
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
if not isinstance(substate, tensor_array_ops.TensorArray):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
@tf_export("nn.bidirectional_dynamic_rnn")
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_axis = 1
batch_axis = 0
else:
time_axis = 0
batch_axis = 1
def _reverse(input_, seq_lengths, seq_axis, batch_axis):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_axis=seq_axis, batch_axis=batch_axis)
else:
return array_ops.reverse(input_, axis=[seq_axis])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_axis=time_axis, batch_axis=batch_axis)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_axis=time_axis, batch_axis=batch_axis)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
@tf_export("nn.dynamic_rnn")
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for performance than correctness.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
with vs.variable_scope(scope or "rnn") as varscope:
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if not context.executing_eagerly() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nested) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
else:
max_sequence_length = time_steps
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, element_shape, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
element_shape=element_shape,
tensor_array_name=base_name + name)
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
output_ta = tuple(
_create_ta(
"output_%d" % i,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(out_size))),
dtype=_infer_state_dtype(dtype, state))
for i, out_size in enumerate(flat_output_size))
input_ta = tuple(
_create_ta(
"input_%d" % i,
element_shape=flat_input_i.shape[1:],
dtype=flat_input_i.dtype)
for i, flat_input_i in enumerate(flat_input))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
else:
output_ta = tuple([0 for _ in range(time_steps.numpy())]
for i in range(len(flat_output_size)))
input_ta = flat_input
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
if in_graph_mode:
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
else:
input_t = tuple(ta[time.numpy()] for ta in input_ta)
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
if in_graph_mode:
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
else:
for ta, out in zip(output_ta_t, output):
ta[time.numpy()] = out
return (time + 1, output_ta_t, new_state)
if in_graph_mode:
# Make sure that we run at least 1 step, if necessary, to ensure
# the TensorArrays pick up the dynamic shape.
loop_bound = math_ops.minimum(
time_steps, math_ops.maximum(1, max_sequence_length))
else:
# Using max_sequence_length isn't currently supported in the Eager branch.
loop_bound = time_steps
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < loop_bound,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
maximum_iterations=time_steps,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
if in_graph_mode:
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
else:
final_outputs = output_final_ta
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
if not in_graph_mode:
final_outputs = nest.map_structure_up_to(
cell.output_size, lambda x: array_ops.stack(x, axis=0), final_outputs)
return (final_outputs, final_state)
@tf_export("nn.raw_rnn")
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, emit_structure, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit_structure), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors which is aggregated in the `emit_ta` inside the
`while_loop`. For the first call to `loop_fn`, the `emit_output`
corresponds to the `emit_structure` which is then used to determine the
size of the `zero_tensor` for the `emit_ta` (defaults to
`cell.output_size`). For the subsequent calls to the `loop_fn`, the
`emit_output` corresponds to the actual output tensor
that is to be aggregated in the `emit_ta`. The parameter `cell_state`
and output `next_cell_state` may be either a single or (possibly nested)
tuple of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
const_batch_size = batch_size
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i,
dynamic_size=True,
element_shape=(tensor_shape.TensorShape([const_batch_size])
.concatenate(
_maybe_tensor_shape_from_tensor(size_i))),
size=0,
name="rnn_output_%d" % i)
for i, (dtype_i, size_i)
in enumerate(zip(flat_emit_dtypes, flat_emit_size))]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
# TensorArray and scalar get passed through.
if isinstance(cur_i, tensor_array_ops.TensorArray):
return cand_i
if cur_i.shape.ndims == 0:
return cand_i
# Otherwise propagate the old or the new value.
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
@tf_export("nn.static_rnn")
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
rnn_cell_impl.assert_like_rnncell("cell", cell)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if _should_cache():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
@tf_export("nn.static_state_saving_rnn")
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
if state_is_tuple:
state = nest.pack_sequence_as(
structure=state,
flat_sequence=[array_ops.identity(s) for s in flat_state])
else:
state = array_ops.identity(state)
return (outputs, state)
@tf_export("nn.static_bidirectional_rnn")
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
rnn_cell_impl.assert_like_rnncell("cell_fw", cell_fw)
rnn_cell_impl.assert_like_rnncell("cell_bw", cell_bw)
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
| apache-2.0 | 2,907,007,333,491,746,300 | 40.473338 | 96 | 0.662418 | false |
Kuzapura/pypy | globals.py | 1 | 1650 | #!/usr/bin/python
#-*- coding: utf-8 -*-
# Author: Adrian Czapla
# Date: 2016
################################################################################
import pygame
import pygame.image as img
from pygame import transform
################################################################################
display_w, display_h = 800, 600
display = pygame.display.set_mode( (display_w, display_h) )
pygame.display.set_caption( 'Springifilous' )
#world_img = img.load( 'background.png' )
actor_img = img.load( 'actor.png' )
actor_img = transform.scale( actor_img, (actor_img.get_width() // 5, actor_img.get_height() // 5) )
cloud_img = img.load( 'lakitu_cloud.png' )
cloud_img = transform.scale( cloud_img, (cloud_img.get_width() // 4, cloud_img.get_height() // 4) )
bubble_img = img.load( 'bubble.png' )
bubble_img = transform.scale( bubble_img, (bubble_img.get_width() // 8, bubble_img.get_height() // 8) )
coin_sprite = img.load('coin_gold.png')
coin_sprite = transform.scale( coin_sprite, (coin_sprite.get_width() * 2, coin_sprite.get_height() * 2) )
coin_sprite_w, coin_sprite_h = coin_sprite.get_width(), coin_sprite.get_height()
coin_img = pygame.Surface( (coin_sprite_h, coin_sprite_h), pygame.SRCALPHA, 32 )
clock = pygame.time.Clock()
frames_per_second = 24
dt = 1. / frames_per_second
g, buoyancy = 9.81, 15.0
key_F, jump_F, v_max = 30., 210., 10.
air_resistance, water_resistance, surface_friction, elasticity = 0.15, 0.4, 0.5, 0.3
underwater_level = False
continue_game = True
won = False
points_gathered = 0
objects = {}
show_stats = False
################################################################################
| apache-2.0 | -5,745,554,665,870,129,000 | 34.106383 | 105 | 0.58303 | false |
Azure/azure-sdk-for-python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_07_01/models/_models_py3.py | 1 | 122965 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._container_service_client_enums import *
class SubResource(msrest.serialization.Model):
"""Reference to another subresource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgentPool(SubResource):
"""Agent Pool.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource group. This name can be
used to access the resource.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:type count: int
:param vm_size: Size of agent VMs. Possible values include: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", "Standard_NV6".
:type vm_size: str or
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling.
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling.
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler.
:type enable_auto_scaling: bool
:param type_properties_type: AgentPoolType represents types of an agent pool. Possible values
include: "VirtualMachineScaleSets", "AvailabilitySet".
:type type_properties_type: str or
~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolType
:param mode: AgentPoolMode represents mode of an agent pool. Possible values include: "System",
"User".
:type mode: str or ~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolMode
:param orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:type orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:param upgrade_settings: Settings for upgrading the agentpool.
:type upgrade_settings:
~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:type availability_zones: list[str]
:param enable_node_public_ip: Enable public IP for nodes.
:type enable_node_public_ip: bool
:param scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Possible values include: "Spot", "Regular". Default value:
"Regular".
:type scale_set_priority: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetPriority
:param scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Possible values include: "Delete",
"Deallocate". Default value: "Delete".
:type scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetEvictionPolicy
:param spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:type spot_max_price: float
:param tags: A set of tags. Agent pool tags to be persisted on the agent pool virtual machine
scale set.
:type tags: dict[str, str]
:param node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:type node_labels: dict[str, str]
:param node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:type node_taints: list[str]
:param proximity_placement_group_id: The ID for Proximity Placement Group.
:type proximity_placement_group_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'os_disk_size_gb': {'maximum': 1023, 'minimum': 0},
'node_image_version': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'count': {'key': 'properties.count', 'type': 'int'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'properties.osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'properties.vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'properties.maxPods', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'max_count': {'key': 'properties.maxCount', 'type': 'int'},
'min_count': {'key': 'properties.minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'properties.enableAutoScaling', 'type': 'bool'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'mode': {'key': 'properties.mode', 'type': 'str'},
'orchestrator_version': {'key': 'properties.orchestratorVersion', 'type': 'str'},
'node_image_version': {'key': 'properties.nodeImageVersion', 'type': 'str'},
'upgrade_settings': {'key': 'properties.upgradeSettings', 'type': 'AgentPoolUpgradeSettings'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'availability_zones': {'key': 'properties.availabilityZones', 'type': '[str]'},
'enable_node_public_ip': {'key': 'properties.enableNodePublicIP', 'type': 'bool'},
'scale_set_priority': {'key': 'properties.scaleSetPriority', 'type': 'str'},
'scale_set_eviction_policy': {'key': 'properties.scaleSetEvictionPolicy', 'type': 'str'},
'spot_max_price': {'key': 'properties.spotMaxPrice', 'type': 'float'},
'tags': {'key': 'properties.tags', 'type': '{str}'},
'node_labels': {'key': 'properties.nodeLabels', 'type': '{str}'},
'node_taints': {'key': 'properties.nodeTaints', 'type': '[str]'},
'proximity_placement_group_id': {'key': 'properties.proximityPlacementGroupID', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
vm_size: Optional[Union[str, "ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Optional[Union[str, "OSType"]] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type_properties_type: Optional[Union[str, "AgentPoolType"]] = None,
mode: Optional[Union[str, "AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Optional[Union[str, "ScaleSetPriority"]] = "Regular",
scale_set_eviction_policy: Optional[Union[str, "ScaleSetEvictionPolicy"]] = "Delete",
spot_max_price: Optional[float] = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs
):
super(AgentPool, self).__init__(**kwargs)
self.count = count
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.vnet_subnet_id = vnet_subnet_id
self.max_pods = max_pods
self.os_type = os_type
self.max_count = max_count
self.min_count = min_count
self.enable_auto_scaling = enable_auto_scaling
self.type_properties_type = type_properties_type
self.mode = mode
self.orchestrator_version = orchestrator_version
self.node_image_version = None
self.upgrade_settings = upgrade_settings
self.provisioning_state = None
self.availability_zones = availability_zones
self.enable_node_public_ip = enable_node_public_ip
self.scale_set_priority = scale_set_priority
self.scale_set_eviction_policy = scale_set_eviction_policy
self.spot_max_price = spot_max_price
self.tags = tags
self.node_labels = node_labels
self.node_taints = node_taints
self.proximity_placement_group_id = proximity_placement_group_id
class AgentPoolAvailableVersions(msrest.serialization.Model):
"""The list of available versions for an agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Id of the agent pool available versions.
:vartype id: str
:ivar name: Name of the agent pool available versions.
:vartype name: str
:ivar type: Type of the agent pool available versions.
:vartype type: str
:param agent_pool_versions: List of versions available for agent pool.
:type agent_pool_versions:
list[~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'agent_pool_versions': {'key': 'properties.agentPoolVersions', 'type': '[AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem]'},
}
def __init__(
self,
*,
agent_pool_versions: Optional[List["AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem"]] = None,
**kwargs
):
super(AgentPoolAvailableVersions, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.agent_pool_versions = agent_pool_versions
class AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem(msrest.serialization.Model):
"""AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem.
:param default: Whether this version is the default agent pool version.
:type default: bool
:param kubernetes_version: Kubernetes version (major, minor, patch).
:type kubernetes_version: str
:param is_preview: Whether Kubernetes version is currently in preview.
:type is_preview: bool
"""
_attribute_map = {
'default': {'key': 'default', 'type': 'bool'},
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'is_preview': {'key': 'isPreview', 'type': 'bool'},
}
def __init__(
self,
*,
default: Optional[bool] = None,
kubernetes_version: Optional[str] = None,
is_preview: Optional[bool] = None,
**kwargs
):
super(AgentPoolAvailableVersionsPropertiesAgentPoolVersionsItem, self).__init__(**kwargs)
self.default = default
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class AgentPoolListResult(msrest.serialization.Model):
"""The response from the List Agent Pools operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of agent pools.
:type value: list[~azure.mgmt.containerservice.v2020_07_01.models.AgentPool]
:ivar next_link: The URL to get the next set of agent pool results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[AgentPool]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["AgentPool"]] = None,
**kwargs
):
super(AgentPoolListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class AgentPoolUpgradeProfile(msrest.serialization.Model):
"""The list of available upgrades for an agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of the agent pool upgrade profile.
:vartype id: str
:ivar name: Name of the agent pool upgrade profile.
:vartype name: str
:ivar type: Type of the agent pool upgrade profile.
:vartype type: str
:param kubernetes_version: Required. Kubernetes version (major, minor, patch).
:type kubernetes_version: str
:param os_type: Required. OsType to be used to specify os type. Choose from Linux and Windows.
Default to Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OSType
:param upgrades: List of orchestrator types and versions available for upgrade.
:type upgrades:
list[~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolUpgradeProfilePropertiesUpgradesItem]
:param latest_node_image_version: LatestNodeImageVersion is the latest AKS supported node image
version.
:type latest_node_image_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kubernetes_version': {'required': True},
'os_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'upgrades': {'key': 'properties.upgrades', 'type': '[AgentPoolUpgradeProfilePropertiesUpgradesItem]'},
'latest_node_image_version': {'key': 'properties.latestNodeImageVersion', 'type': 'str'},
}
def __init__(
self,
*,
kubernetes_version: str,
os_type: Union[str, "OSType"] = "Linux",
upgrades: Optional[List["AgentPoolUpgradeProfilePropertiesUpgradesItem"]] = None,
latest_node_image_version: Optional[str] = None,
**kwargs
):
super(AgentPoolUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.kubernetes_version = kubernetes_version
self.os_type = os_type
self.upgrades = upgrades
self.latest_node_image_version = latest_node_image_version
class AgentPoolUpgradeProfilePropertiesUpgradesItem(msrest.serialization.Model):
"""AgentPoolUpgradeProfilePropertiesUpgradesItem.
:param kubernetes_version: Kubernetes version (major, minor, patch).
:type kubernetes_version: str
:param is_preview: Whether Kubernetes version is currently in preview.
:type is_preview: bool
"""
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'is_preview': {'key': 'isPreview', 'type': 'bool'},
}
def __init__(
self,
*,
kubernetes_version: Optional[str] = None,
is_preview: Optional[bool] = None,
**kwargs
):
super(AgentPoolUpgradeProfilePropertiesUpgradesItem, self).__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class AgentPoolUpgradeSettings(msrest.serialization.Model):
"""Settings for upgrading an agentpool.
:param max_surge: Count or percentage of additional nodes to be added during upgrade. If empty
uses AKS default.
:type max_surge: str
"""
_attribute_map = {
'max_surge': {'key': 'maxSurge', 'type': 'str'},
}
def __init__(
self,
*,
max_surge: Optional[str] = None,
**kwargs
):
super(AgentPoolUpgradeSettings, self).__init__(**kwargs)
self.max_surge = max_surge
class BaseManagedCluster(msrest.serialization.Model):
"""BaseManagedCluster.
Variables are only populated by the server, and will be ignored when sending a request.
:param identity: The identity of the managed cluster, if configured.
:type identity: ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterIdentity
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar max_agent_pools: The max number of agent pools for the managed cluster.
:vartype max_agent_pools: int
:param kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:type kubernetes_version: str
:param dns_prefix: DNS prefix specified when creating the managed cluster.
:type dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:ivar private_fqdn: FQDN of private cluster.
:vartype private_fqdn: str
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAgentPoolProfile]
:param linux_profile: Profile for Linux VMs in the container service cluster.
:type linux_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceLinuxProfile
:param windows_profile: Profile for Windows VMs in the container service cluster.
:type windows_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterWindowsProfile
:param service_principal_profile: Information about a service principal identity for the
cluster to use for manipulating Azure APIs.
:type service_principal_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterServicePrincipalProfile
:param addon_profiles: Profile of managed cluster add-on.
:type addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAddonProfile]
:param node_resource_group: Name of the resource group containing agent pool nodes.
:type node_resource_group: str
:param enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:type enable_rbac: bool
:param enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:type enable_pod_security_policy: bool
:param network_profile: Profile of network configuration.
:type network_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceNetworkProfile
:param aad_profile: Profile of Azure Active Directory configuration.
:type aad_profile: ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAADProfile
:param auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:type auto_scaler_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPropertiesAutoScalerProfile
:param api_server_access_profile: Access profile for managed cluster API server.
:type api_server_access_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAPIServerAccessProfile
:param disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:type disk_encryption_set_id: str
:param identity_profile: Identities associated with the cluster.
:type identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPropertiesIdentityProfileValue]
"""
_validation = {
'provisioning_state': {'readonly': True},
'max_agent_pools': {'readonly': True},
'fqdn': {'readonly': True},
'private_fqdn': {'readonly': True},
}
_attribute_map = {
'identity': {'key': 'identity', 'type': 'ManagedClusterIdentity'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'max_agent_pools': {'key': 'properties.maxAgentPools', 'type': 'int'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'private_fqdn': {'key': 'properties.privateFQDN', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'windows_profile': {'key': 'properties.windowsProfile', 'type': 'ManagedClusterWindowsProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'enable_pod_security_policy': {'key': 'properties.enablePodSecurityPolicy', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'auto_scaler_profile': {'key': 'properties.autoScalerProfile', 'type': 'ManagedClusterPropertiesAutoScalerProfile'},
'api_server_access_profile': {'key': 'properties.apiServerAccessProfile', 'type': 'ManagedClusterAPIServerAccessProfile'},
'disk_encryption_set_id': {'key': 'properties.diskEncryptionSetID', 'type': 'str'},
'identity_profile': {'key': 'properties.identityProfile', 'type': '{ManagedClusterPropertiesIdentityProfileValue}'},
}
def __init__(
self,
*,
identity: Optional["ManagedClusterIdentity"] = None,
kubernetes_version: Optional[str] = None,
dns_prefix: Optional[str] = None,
agent_pool_profiles: Optional[List["ManagedClusterAgentPoolProfile"]] = None,
linux_profile: Optional["ContainerServiceLinuxProfile"] = None,
windows_profile: Optional["ManagedClusterWindowsProfile"] = None,
service_principal_profile: Optional["ManagedClusterServicePrincipalProfile"] = None,
addon_profiles: Optional[Dict[str, "ManagedClusterAddonProfile"]] = None,
node_resource_group: Optional[str] = None,
enable_rbac: Optional[bool] = None,
enable_pod_security_policy: Optional[bool] = None,
network_profile: Optional["ContainerServiceNetworkProfile"] = None,
aad_profile: Optional["ManagedClusterAADProfile"] = None,
auto_scaler_profile: Optional["ManagedClusterPropertiesAutoScalerProfile"] = None,
api_server_access_profile: Optional["ManagedClusterAPIServerAccessProfile"] = None,
disk_encryption_set_id: Optional[str] = None,
identity_profile: Optional[Dict[str, "ManagedClusterPropertiesIdentityProfileValue"]] = None,
**kwargs
):
super(BaseManagedCluster, self).__init__(**kwargs)
self.identity = identity
self.provisioning_state = None
self.max_agent_pools = None
self.kubernetes_version = kubernetes_version
self.dns_prefix = dns_prefix
self.fqdn = None
self.private_fqdn = None
self.agent_pool_profiles = agent_pool_profiles
self.linux_profile = linux_profile
self.windows_profile = windows_profile
self.service_principal_profile = service_principal_profile
self.addon_profiles = addon_profiles
self.node_resource_group = node_resource_group
self.enable_rbac = enable_rbac
self.enable_pod_security_policy = enable_pod_security_policy
self.network_profile = network_profile
self.aad_profile = aad_profile
self.auto_scaler_profile = auto_scaler_profile
self.api_server_access_profile = api_server_access_profile
self.disk_encryption_set_id = disk_encryption_set_id
self.identity_profile = identity_profile
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the Container service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.containerservice.v2020_07_01.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class ContainerServiceDiagnosticsProfile(msrest.serialization.Model):
"""Profile for diagnostics on the container service cluster.
All required parameters must be populated in order to send to Azure.
:param vm_diagnostics: Required. Profile for diagnostics on the container service VMs.
:type vm_diagnostics:
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceVMDiagnostics
"""
_validation = {
'vm_diagnostics': {'required': True},
}
_attribute_map = {
'vm_diagnostics': {'key': 'vmDiagnostics', 'type': 'ContainerServiceVMDiagnostics'},
}
def __init__(
self,
*,
vm_diagnostics: "ContainerServiceVMDiagnostics",
**kwargs
):
super(ContainerServiceDiagnosticsProfile, self).__init__(**kwargs)
self.vm_diagnostics = vm_diagnostics
class ContainerServiceLinuxProfile(msrest.serialization.Model):
"""Profile for Linux VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for Linux VMs.
:type admin_username: str
:param ssh: Required. SSH configuration for Linux-based VMs running on Azure.
:type ssh: ~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceSshConfiguration
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'},
'ssh': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'ssh': {'key': 'ssh', 'type': 'ContainerServiceSshConfiguration'},
}
def __init__(
self,
*,
admin_username: str,
ssh: "ContainerServiceSshConfiguration",
**kwargs
):
super(ContainerServiceLinuxProfile, self).__init__(**kwargs)
self.admin_username = admin_username
self.ssh = ssh
class ContainerServiceMasterProfile(msrest.serialization.Model):
"""Profile for the container service master.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Number of masters (VMs) in the container service cluster. Allowed values are 1,
3, and 5. The default value is 1. Possible values include: 1, 3, 5. Default value: "1".
:type count: str or ~azure.mgmt.containerservice.v2020_07_01.models.Count
:param dns_prefix: Required. DNS prefix to be used to create the FQDN for the master pool.
:type dns_prefix: str
:param vm_size: Required. Size of agent VMs. Possible values include: "Standard_A1",
"Standard_A10", "Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2",
"Standard_A2m_v2", "Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2",
"Standard_A5", "Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2",
"Standard_A8m_v2", "Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms",
"Standard_B8ms", "Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo",
"Standard_D12", "Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", "Standard_NV6".
:type vm_size: str or
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:type vnet_subnet_id: str
:param first_consecutive_static_ip: FirstConsecutiveStaticIP used to specify the first static
ip of masters.
:type first_consecutive_static_ip: str
:param storage_profile: Storage profile specifies what kind of storage used. Choose from
StorageAccount and ManagedDisks. Leave it empty, we will choose for you based on the
orchestrator choice. Possible values include: "StorageAccount", "ManagedDisks".
:type storage_profile: str or
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceStorageProfileTypes
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
"""
_validation = {
'dns_prefix': {'required': True},
'vm_size': {'required': True},
'os_disk_size_gb': {'maximum': 1023, 'minimum': 0},
'fqdn': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'dns_prefix': {'key': 'dnsPrefix', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'first_consecutive_static_ip': {'key': 'firstConsecutiveStaticIP', 'type': 'str'},
'storage_profile': {'key': 'storageProfile', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(
self,
*,
dns_prefix: str,
vm_size: Union[str, "ContainerServiceVMSizeTypes"],
count: Optional[Union[int, "Count"]] = "1",
os_disk_size_gb: Optional[int] = None,
vnet_subnet_id: Optional[str] = None,
first_consecutive_static_ip: Optional[str] = "10.240.255.5",
storage_profile: Optional[Union[str, "ContainerServiceStorageProfileTypes"]] = None,
**kwargs
):
super(ContainerServiceMasterProfile, self).__init__(**kwargs)
self.count = count
self.dns_prefix = dns_prefix
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.vnet_subnet_id = vnet_subnet_id
self.first_consecutive_static_ip = first_consecutive_static_ip
self.storage_profile = storage_profile
self.fqdn = None
class ContainerServiceNetworkProfile(msrest.serialization.Model):
"""Profile of network configuration.
:param network_plugin: Network plugin used for building Kubernetes network. Possible values
include: "azure", "kubenet". Default value: "kubenet".
:type network_plugin: str or ~azure.mgmt.containerservice.v2020_07_01.models.NetworkPlugin
:param network_policy: Network policy used for building Kubernetes network. Possible values
include: "calico", "azure".
:type network_policy: str or ~azure.mgmt.containerservice.v2020_07_01.models.NetworkPolicy
:param network_mode: Network mode used for building Kubernetes network. Possible values
include: "transparent", "bridge".
:type network_mode: str or ~azure.mgmt.containerservice.v2020_07_01.models.NetworkMode
:param pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
:type pod_cidr: str
:param service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must
not overlap with any Subnet IP ranges.
:type service_cidr: str
:param dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within
the Kubernetes service address range specified in serviceCidr.
:type dns_service_ip: str
:param docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It
must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:type docker_bridge_cidr: str
:param outbound_type: The outbound (egress) routing method. Possible values include:
"loadBalancer", "userDefinedRouting". Default value: "loadBalancer".
:type outbound_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OutboundType
:param load_balancer_sku: The load balancer sku for the managed cluster. Possible values
include: "standard", "basic".
:type load_balancer_sku: str or ~azure.mgmt.containerservice.v2020_07_01.models.LoadBalancerSku
:param load_balancer_profile: Profile of the cluster load balancer.
:type load_balancer_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterLoadBalancerProfile
"""
_validation = {
'pod_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'network_plugin': {'key': 'networkPlugin', 'type': 'str'},
'network_policy': {'key': 'networkPolicy', 'type': 'str'},
'network_mode': {'key': 'networkMode', 'type': 'str'},
'pod_cidr': {'key': 'podCidr', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
'outbound_type': {'key': 'outboundType', 'type': 'str'},
'load_balancer_sku': {'key': 'loadBalancerSku', 'type': 'str'},
'load_balancer_profile': {'key': 'loadBalancerProfile', 'type': 'ManagedClusterLoadBalancerProfile'},
}
def __init__(
self,
*,
network_plugin: Optional[Union[str, "NetworkPlugin"]] = "kubenet",
network_policy: Optional[Union[str, "NetworkPolicy"]] = None,
network_mode: Optional[Union[str, "NetworkMode"]] = None,
pod_cidr: Optional[str] = "10.244.0.0/16",
service_cidr: Optional[str] = "10.0.0.0/16",
dns_service_ip: Optional[str] = "10.0.0.10",
docker_bridge_cidr: Optional[str] = "172.17.0.1/16",
outbound_type: Optional[Union[str, "OutboundType"]] = "loadBalancer",
load_balancer_sku: Optional[Union[str, "LoadBalancerSku"]] = None,
load_balancer_profile: Optional["ManagedClusterLoadBalancerProfile"] = None,
**kwargs
):
super(ContainerServiceNetworkProfile, self).__init__(**kwargs)
self.network_plugin = network_plugin
self.network_policy = network_policy
self.network_mode = network_mode
self.pod_cidr = pod_cidr
self.service_cidr = service_cidr
self.dns_service_ip = dns_service_ip
self.docker_bridge_cidr = docker_bridge_cidr
self.outbound_type = outbound_type
self.load_balancer_sku = load_balancer_sku
self.load_balancer_profile = load_balancer_profile
class ContainerServiceSshConfiguration(msrest.serialization.Model):
"""SSH configuration for Linux-based VMs running on Azure.
All required parameters must be populated in order to send to Azure.
:param public_keys: Required. The list of SSH public keys used to authenticate with Linux-based
VMs. Only expect one key specified.
:type public_keys:
list[~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceSshPublicKey]
"""
_validation = {
'public_keys': {'required': True},
}
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[ContainerServiceSshPublicKey]'},
}
def __init__(
self,
*,
public_keys: List["ContainerServiceSshPublicKey"],
**kwargs
):
super(ContainerServiceSshConfiguration, self).__init__(**kwargs)
self.public_keys = public_keys
class ContainerServiceSshPublicKey(msrest.serialization.Model):
"""Contains information about SSH certificate public key data.
All required parameters must be populated in order to send to Azure.
:param key_data: Required. Certificate public key used to authenticate with VMs through SSH.
The certificate must be in PEM format with or without headers.
:type key_data: str
"""
_validation = {
'key_data': {'required': True},
}
_attribute_map = {
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(
self,
*,
key_data: str,
**kwargs
):
super(ContainerServiceSshPublicKey, self).__init__(**kwargs)
self.key_data = key_data
class ContainerServiceVMDiagnostics(msrest.serialization.Model):
"""Profile for diagnostics on the container service VMs.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the VM diagnostic agent is provisioned on the VM.
:type enabled: bool
:ivar storage_uri: The URI of the storage account where diagnostics are stored.
:vartype storage_uri: str
"""
_validation = {
'enabled': {'required': True},
'storage_uri': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(
self,
*,
enabled: bool,
**kwargs
):
super(ContainerServiceVMDiagnostics, self).__init__(**kwargs)
self.enabled = enabled
self.storage_uri = None
class CredentialResult(msrest.serialization.Model):
"""The credential result response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the credential.
:vartype name: str
:ivar value: Base64-encoded Kubernetes configuration file.
:vartype value: bytearray
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
}
def __init__(
self,
**kwargs
):
super(CredentialResult, self).__init__(**kwargs)
self.name = None
self.value = None
class CredentialResults(msrest.serialization.Model):
"""The list of credential result response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar kubeconfigs: Base64-encoded Kubernetes configuration file.
:vartype kubeconfigs: list[~azure.mgmt.containerservice.v2020_07_01.models.CredentialResult]
"""
_validation = {
'kubeconfigs': {'readonly': True},
}
_attribute_map = {
'kubeconfigs': {'key': 'kubeconfigs', 'type': '[CredentialResult]'},
}
def __init__(
self,
**kwargs
):
super(CredentialResults, self).__init__(**kwargs)
self.kubeconfigs = None
class Resource(msrest.serialization.Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
class ManagedCluster(Resource, BaseManagedCluster):
"""Managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param identity: The identity of the managed cluster, if configured.
:type identity: ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterIdentity
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:ivar max_agent_pools: The max number of agent pools for the managed cluster.
:vartype max_agent_pools: int
:param kubernetes_version: Version of Kubernetes specified when creating the managed cluster.
:type kubernetes_version: str
:param dns_prefix: DNS prefix specified when creating the managed cluster.
:type dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:ivar private_fqdn: FQDN of private cluster.
:vartype private_fqdn: str
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAgentPoolProfile]
:param linux_profile: Profile for Linux VMs in the container service cluster.
:type linux_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceLinuxProfile
:param windows_profile: Profile for Windows VMs in the container service cluster.
:type windows_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterWindowsProfile
:param service_principal_profile: Information about a service principal identity for the
cluster to use for manipulating Azure APIs.
:type service_principal_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterServicePrincipalProfile
:param addon_profiles: Profile of managed cluster add-on.
:type addon_profiles: dict[str,
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAddonProfile]
:param node_resource_group: Name of the resource group containing agent pool nodes.
:type node_resource_group: str
:param enable_rbac: Whether to enable Kubernetes Role-Based Access Control.
:type enable_rbac: bool
:param enable_pod_security_policy: (DEPRECATING) Whether to enable Kubernetes pod security
policy (preview). This feature is set for removal on October 15th, 2020. Learn more at
aka.ms/aks/azpodpolicy.
:type enable_pod_security_policy: bool
:param network_profile: Profile of network configuration.
:type network_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceNetworkProfile
:param aad_profile: Profile of Azure Active Directory configuration.
:type aad_profile: ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAADProfile
:param auto_scaler_profile: Parameters to be applied to the cluster-autoscaler when enabled.
:type auto_scaler_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPropertiesAutoScalerProfile
:param api_server_access_profile: Access profile for managed cluster API server.
:type api_server_access_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAPIServerAccessProfile
:param disk_encryption_set_id: ResourceId of the disk encryption set to use for enabling
encryption at rest.
:type disk_encryption_set_id: str
:param identity_profile: Identities associated with the cluster.
:type identity_profile: dict[str,
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPropertiesIdentityProfileValue]
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The managed cluster SKU.
:type sku: ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterSKU
"""
_validation = {
'provisioning_state': {'readonly': True},
'max_agent_pools': {'readonly': True},
'fqdn': {'readonly': True},
'private_fqdn': {'readonly': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'identity': {'key': 'identity', 'type': 'ManagedClusterIdentity'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'max_agent_pools': {'key': 'properties.maxAgentPools', 'type': 'int'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'private_fqdn': {'key': 'properties.privateFQDN', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'windows_profile': {'key': 'properties.windowsProfile', 'type': 'ManagedClusterWindowsProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'enable_pod_security_policy': {'key': 'properties.enablePodSecurityPolicy', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'auto_scaler_profile': {'key': 'properties.autoScalerProfile', 'type': 'ManagedClusterPropertiesAutoScalerProfile'},
'api_server_access_profile': {'key': 'properties.apiServerAccessProfile', 'type': 'ManagedClusterAPIServerAccessProfile'},
'disk_encryption_set_id': {'key': 'properties.diskEncryptionSetID', 'type': 'str'},
'identity_profile': {'key': 'properties.identityProfile', 'type': '{ManagedClusterPropertiesIdentityProfileValue}'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'ManagedClusterSKU'},
}
def __init__(
self,
*,
location: str,
identity: Optional["ManagedClusterIdentity"] = None,
kubernetes_version: Optional[str] = None,
dns_prefix: Optional[str] = None,
agent_pool_profiles: Optional[List["ManagedClusterAgentPoolProfile"]] = None,
linux_profile: Optional["ContainerServiceLinuxProfile"] = None,
windows_profile: Optional["ManagedClusterWindowsProfile"] = None,
service_principal_profile: Optional["ManagedClusterServicePrincipalProfile"] = None,
addon_profiles: Optional[Dict[str, "ManagedClusterAddonProfile"]] = None,
node_resource_group: Optional[str] = None,
enable_rbac: Optional[bool] = None,
enable_pod_security_policy: Optional[bool] = None,
network_profile: Optional["ContainerServiceNetworkProfile"] = None,
aad_profile: Optional["ManagedClusterAADProfile"] = None,
auto_scaler_profile: Optional["ManagedClusterPropertiesAutoScalerProfile"] = None,
api_server_access_profile: Optional["ManagedClusterAPIServerAccessProfile"] = None,
disk_encryption_set_id: Optional[str] = None,
identity_profile: Optional[Dict[str, "ManagedClusterPropertiesIdentityProfileValue"]] = None,
tags: Optional[Dict[str, str]] = None,
sku: Optional["ManagedClusterSKU"] = None,
**kwargs
):
super(ManagedCluster, self).__init__(location=location, tags=tags, identity=identity, kubernetes_version=kubernetes_version, dns_prefix=dns_prefix, agent_pool_profiles=agent_pool_profiles, linux_profile=linux_profile, windows_profile=windows_profile, service_principal_profile=service_principal_profile, addon_profiles=addon_profiles, node_resource_group=node_resource_group, enable_rbac=enable_rbac, enable_pod_security_policy=enable_pod_security_policy, network_profile=network_profile, aad_profile=aad_profile, auto_scaler_profile=auto_scaler_profile, api_server_access_profile=api_server_access_profile, disk_encryption_set_id=disk_encryption_set_id, identity_profile=identity_profile, **kwargs)
self.identity = identity
self.provisioning_state = None
self.max_agent_pools = None
self.kubernetes_version = kubernetes_version
self.dns_prefix = dns_prefix
self.fqdn = None
self.private_fqdn = None
self.agent_pool_profiles = agent_pool_profiles
self.linux_profile = linux_profile
self.windows_profile = windows_profile
self.service_principal_profile = service_principal_profile
self.addon_profiles = addon_profiles
self.node_resource_group = node_resource_group
self.enable_rbac = enable_rbac
self.enable_pod_security_policy = enable_pod_security_policy
self.network_profile = network_profile
self.aad_profile = aad_profile
self.auto_scaler_profile = auto_scaler_profile
self.api_server_access_profile = api_server_access_profile
self.disk_encryption_set_id = disk_encryption_set_id
self.identity_profile = identity_profile
self.sku = sku
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.sku = sku
class ManagedClusterAADProfile(msrest.serialization.Model):
"""AADProfile specifies attributes for Azure Active Directory integration.
:param managed: Whether to enable managed AAD.
:type managed: bool
:param enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
:type enable_azure_rbac: bool
:param admin_group_object_i_ds: AAD group object IDs that will have admin role of the cluster.
:type admin_group_object_i_ds: list[str]
:param client_app_id: The client AAD application ID.
:type client_app_id: str
:param server_app_id: The server AAD application ID.
:type server_app_id: str
:param server_app_secret: The server AAD application secret.
:type server_app_secret: str
:param tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the
tenant of the deployment subscription.
:type tenant_id: str
"""
_attribute_map = {
'managed': {'key': 'managed', 'type': 'bool'},
'enable_azure_rbac': {'key': 'enableAzureRBAC', 'type': 'bool'},
'admin_group_object_i_ds': {'key': 'adminGroupObjectIDs', 'type': '[str]'},
'client_app_id': {'key': 'clientAppID', 'type': 'str'},
'server_app_id': {'key': 'serverAppID', 'type': 'str'},
'server_app_secret': {'key': 'serverAppSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantID', 'type': 'str'},
}
def __init__(
self,
*,
managed: Optional[bool] = None,
enable_azure_rbac: Optional[bool] = None,
admin_group_object_i_ds: Optional[List[str]] = None,
client_app_id: Optional[str] = None,
server_app_id: Optional[str] = None,
server_app_secret: Optional[str] = None,
tenant_id: Optional[str] = None,
**kwargs
):
super(ManagedClusterAADProfile, self).__init__(**kwargs)
self.managed = managed
self.enable_azure_rbac = enable_azure_rbac
self.admin_group_object_i_ds = admin_group_object_i_ds
self.client_app_id = client_app_id
self.server_app_id = server_app_id
self.server_app_secret = server_app_secret
self.tenant_id = tenant_id
class ManagedClusterAccessProfile(Resource):
"""Managed cluster Access Profile.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kube_config: Base64-encoded Kubernetes configuration file.
:type kube_config: bytearray
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kube_config': {'key': 'properties.kubeConfig', 'type': 'bytearray'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
kube_config: Optional[bytearray] = None,
**kwargs
):
super(ManagedClusterAccessProfile, self).__init__(location=location, tags=tags, **kwargs)
self.kube_config = kube_config
class ManagedClusterAddonProfile(msrest.serialization.Model):
"""A Kubernetes add-on profile for a managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the add-on is enabled or not.
:type enabled: bool
:param config: Key-value pairs for configuring an add-on.
:type config: dict[str, str]
:ivar identity: Information of user assigned identity used by this add-on.
:vartype identity:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterAddonProfileIdentity
"""
_validation = {
'enabled': {'required': True},
'identity': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedClusterAddonProfileIdentity'},
}
def __init__(
self,
*,
enabled: bool,
config: Optional[Dict[str, str]] = None,
**kwargs
):
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = enabled
self.config = config
self.identity = None
class UserAssignedIdentity(msrest.serialization.Model):
"""UserAssignedIdentity.
:param resource_id: The resource id of the user assigned identity.
:type resource_id: str
:param client_id: The client id of the user assigned identity.
:type client_id: str
:param object_id: The object id of the user assigned identity.
:type object_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'object_id': {'key': 'objectId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs
):
super(UserAssignedIdentity, self).__init__(**kwargs)
self.resource_id = resource_id
self.client_id = client_id
self.object_id = object_id
class ManagedClusterAddonProfileIdentity(UserAssignedIdentity):
"""Information of user assigned identity used by this add-on.
:param resource_id: The resource id of the user assigned identity.
:type resource_id: str
:param client_id: The client id of the user assigned identity.
:type client_id: str
:param object_id: The object id of the user assigned identity.
:type object_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'object_id': {'key': 'objectId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs
):
super(ManagedClusterAddonProfileIdentity, self).__init__(resource_id=resource_id, client_id=client_id, object_id=object_id, **kwargs)
class ManagedClusterAgentPoolProfileProperties(msrest.serialization.Model):
"""Properties for the container service agent pool profile.
Variables are only populated by the server, and will be ignored when sending a request.
:param count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:type count: int
:param vm_size: Size of agent VMs. Possible values include: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", "Standard_NV6".
:type vm_size: str or
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling.
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling.
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler.
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible values include:
"VirtualMachineScaleSets", "AvailabilitySet".
:type type: str or ~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolType
:param mode: AgentPoolMode represents mode of an agent pool. Possible values include: "System",
"User".
:type mode: str or ~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolMode
:param orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:type orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:param upgrade_settings: Settings for upgrading the agentpool.
:type upgrade_settings:
~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:type availability_zones: list[str]
:param enable_node_public_ip: Enable public IP for nodes.
:type enable_node_public_ip: bool
:param scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Possible values include: "Spot", "Regular". Default value:
"Regular".
:type scale_set_priority: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetPriority
:param scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Possible values include: "Delete",
"Deallocate". Default value: "Delete".
:type scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetEvictionPolicy
:param spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:type spot_max_price: float
:param tags: A set of tags. Agent pool tags to be persisted on the agent pool virtual machine
scale set.
:type tags: dict[str, str]
:param node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:type node_labels: dict[str, str]
:param node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:type node_taints: list[str]
:param proximity_placement_group_id: The ID for Proximity Placement Group.
:type proximity_placement_group_id: str
"""
_validation = {
'os_disk_size_gb': {'maximum': 1023, 'minimum': 0},
'node_image_version': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'node_image_version': {'key': 'nodeImageVersion', 'type': 'str'},
'upgrade_settings': {'key': 'upgradeSettings', 'type': 'AgentPoolUpgradeSettings'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
'enable_node_public_ip': {'key': 'enableNodePublicIP', 'type': 'bool'},
'scale_set_priority': {'key': 'scaleSetPriority', 'type': 'str'},
'scale_set_eviction_policy': {'key': 'scaleSetEvictionPolicy', 'type': 'str'},
'spot_max_price': {'key': 'spotMaxPrice', 'type': 'float'},
'tags': {'key': 'tags', 'type': '{str}'},
'node_labels': {'key': 'nodeLabels', 'type': '{str}'},
'node_taints': {'key': 'nodeTaints', 'type': '[str]'},
'proximity_placement_group_id': {'key': 'proximityPlacementGroupID', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
vm_size: Optional[Union[str, "ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Optional[Union[str, "OSType"]] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type: Optional[Union[str, "AgentPoolType"]] = None,
mode: Optional[Union[str, "AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Optional[Union[str, "ScaleSetPriority"]] = "Regular",
scale_set_eviction_policy: Optional[Union[str, "ScaleSetEvictionPolicy"]] = "Delete",
spot_max_price: Optional[float] = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs
):
super(ManagedClusterAgentPoolProfileProperties, self).__init__(**kwargs)
self.count = count
self.vm_size = vm_size
self.os_disk_size_gb = os_disk_size_gb
self.vnet_subnet_id = vnet_subnet_id
self.max_pods = max_pods
self.os_type = os_type
self.max_count = max_count
self.min_count = min_count
self.enable_auto_scaling = enable_auto_scaling
self.type = type
self.mode = mode
self.orchestrator_version = orchestrator_version
self.node_image_version = None
self.upgrade_settings = upgrade_settings
self.provisioning_state = None
self.availability_zones = availability_zones
self.enable_node_public_ip = enable_node_public_ip
self.scale_set_priority = scale_set_priority
self.scale_set_eviction_policy = scale_set_eviction_policy
self.spot_max_price = spot_max_price
self.tags = tags
self.node_labels = node_labels
self.node_taints = node_taints
self.proximity_placement_group_id = proximity_placement_group_id
class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties):
"""Profile for the container service agent pool.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Number of agents (VMs) to host docker containers. Allowed values must be in the
range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for
system pools. The default value is 1.
:type count: int
:param vm_size: Size of agent VMs. Possible values include: "Standard_A1", "Standard_A10",
"Standard_A11", "Standard_A1_v2", "Standard_A2", "Standard_A2_v2", "Standard_A2m_v2",
"Standard_A3", "Standard_A4", "Standard_A4_v2", "Standard_A4m_v2", "Standard_A5",
"Standard_A6", "Standard_A7", "Standard_A8", "Standard_A8_v2", "Standard_A8m_v2",
"Standard_A9", "Standard_B2ms", "Standard_B2s", "Standard_B4ms", "Standard_B8ms",
"Standard_D1", "Standard_D11", "Standard_D11_v2", "Standard_D11_v2_Promo", "Standard_D12",
"Standard_D12_v2", "Standard_D12_v2_Promo", "Standard_D13", "Standard_D13_v2",
"Standard_D13_v2_Promo", "Standard_D14", "Standard_D14_v2", "Standard_D14_v2_Promo",
"Standard_D15_v2", "Standard_D16_v3", "Standard_D16s_v3", "Standard_D1_v2", "Standard_D2",
"Standard_D2_v2", "Standard_D2_v2_Promo", "Standard_D2_v3", "Standard_D2s_v3", "Standard_D3",
"Standard_D32_v3", "Standard_D32s_v3", "Standard_D3_v2", "Standard_D3_v2_Promo", "Standard_D4",
"Standard_D4_v2", "Standard_D4_v2_Promo", "Standard_D4_v3", "Standard_D4s_v3",
"Standard_D5_v2", "Standard_D5_v2_Promo", "Standard_D64_v3", "Standard_D64s_v3",
"Standard_D8_v3", "Standard_D8s_v3", "Standard_DS1", "Standard_DS11", "Standard_DS11_v2",
"Standard_DS11_v2_Promo", "Standard_DS12", "Standard_DS12_v2", "Standard_DS12_v2_Promo",
"Standard_DS13", "Standard_DS13-2_v2", "Standard_DS13-4_v2", "Standard_DS13_v2",
"Standard_DS13_v2_Promo", "Standard_DS14", "Standard_DS14-4_v2", "Standard_DS14-8_v2",
"Standard_DS14_v2", "Standard_DS14_v2_Promo", "Standard_DS15_v2", "Standard_DS1_v2",
"Standard_DS2", "Standard_DS2_v2", "Standard_DS2_v2_Promo", "Standard_DS3", "Standard_DS3_v2",
"Standard_DS3_v2_Promo", "Standard_DS4", "Standard_DS4_v2", "Standard_DS4_v2_Promo",
"Standard_DS5_v2", "Standard_DS5_v2_Promo", "Standard_E16_v3", "Standard_E16s_v3",
"Standard_E2_v3", "Standard_E2s_v3", "Standard_E32-16s_v3", "Standard_E32-8s_v3",
"Standard_E32_v3", "Standard_E32s_v3", "Standard_E4_v3", "Standard_E4s_v3",
"Standard_E64-16s_v3", "Standard_E64-32s_v3", "Standard_E64_v3", "Standard_E64s_v3",
"Standard_E8_v3", "Standard_E8s_v3", "Standard_F1", "Standard_F16", "Standard_F16s",
"Standard_F16s_v2", "Standard_F1s", "Standard_F2", "Standard_F2s", "Standard_F2s_v2",
"Standard_F32s_v2", "Standard_F4", "Standard_F4s", "Standard_F4s_v2", "Standard_F64s_v2",
"Standard_F72s_v2", "Standard_F8", "Standard_F8s", "Standard_F8s_v2", "Standard_G1",
"Standard_G2", "Standard_G3", "Standard_G4", "Standard_G5", "Standard_GS1", "Standard_GS2",
"Standard_GS3", "Standard_GS4", "Standard_GS4-4", "Standard_GS4-8", "Standard_GS5",
"Standard_GS5-16", "Standard_GS5-8", "Standard_H16", "Standard_H16m", "Standard_H16mr",
"Standard_H16r", "Standard_H8", "Standard_H8m", "Standard_L16s", "Standard_L32s",
"Standard_L4s", "Standard_L8s", "Standard_M128-32ms", "Standard_M128-64ms", "Standard_M128ms",
"Standard_M128s", "Standard_M64-16ms", "Standard_M64-32ms", "Standard_M64ms", "Standard_M64s",
"Standard_NC12", "Standard_NC12s_v2", "Standard_NC12s_v3", "Standard_NC24", "Standard_NC24r",
"Standard_NC24rs_v2", "Standard_NC24rs_v3", "Standard_NC24s_v2", "Standard_NC24s_v3",
"Standard_NC6", "Standard_NC6s_v2", "Standard_NC6s_v3", "Standard_ND12s", "Standard_ND24rs",
"Standard_ND24s", "Standard_ND6s", "Standard_NV12", "Standard_NV24", "Standard_NV6".
:type vm_size: str or
~azure.mgmt.containerservice.v2020_07_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every
machine in this master/agent pool. If you specify 0, it will apply the default osDisk size
according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to
Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling.
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling.
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler.
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible values include:
"VirtualMachineScaleSets", "AvailabilitySet".
:type type: str or ~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolType
:param mode: AgentPoolMode represents mode of an agent pool. Possible values include: "System",
"User".
:type mode: str or ~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolMode
:param orchestrator_version: Version of orchestrator specified when creating the managed
cluster.
:type orchestrator_version: str
:ivar node_image_version: Version of node image.
:vartype node_image_version: str
:param upgrade_settings: Settings for upgrading the agentpool.
:type upgrade_settings:
~azure.mgmt.containerservice.v2020_07_01.models.AgentPoolUpgradeSettings
:ivar provisioning_state: The current deployment or provisioning state, which only appears in
the response.
:vartype provisioning_state: str
:param availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets
AgentPoolType.
:type availability_zones: list[str]
:param enable_node_public_ip: Enable public IP for nodes.
:type enable_node_public_ip: bool
:param scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set
priority. Default to regular. Possible values include: "Spot", "Regular". Default value:
"Regular".
:type scale_set_priority: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetPriority
:param scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy
for Spot virtual machine scale set. Default to Delete. Possible values include: "Delete",
"Deallocate". Default value: "Delete".
:type scale_set_eviction_policy: str or
~azure.mgmt.containerservice.v2020_07_01.models.ScaleSetEvictionPolicy
:param spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to
pay in US Dollars. Possible values are any decimal value greater than zero or -1 which
indicates default price to be up-to on-demand.
:type spot_max_price: float
:param tags: A set of tags. Agent pool tags to be persisted on the agent pool virtual machine
scale set.
:type tags: dict[str, str]
:param node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:type node_labels: dict[str, str]
:param node_taints: Taints added to new nodes during node pool create and scale. For example,
key=value:NoSchedule.
:type node_taints: list[str]
:param proximity_placement_group_id: The ID for Proximity Placement Group.
:type proximity_placement_group_id: str
:param name: Required. Unique name of the agent pool profile in the context of the subscription
and resource group.
:type name: str
"""
_validation = {
'os_disk_size_gb': {'maximum': 1023, 'minimum': 0},
'node_image_version': {'readonly': True},
'provisioning_state': {'readonly': True},
'name': {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'node_image_version': {'key': 'nodeImageVersion', 'type': 'str'},
'upgrade_settings': {'key': 'upgradeSettings', 'type': 'AgentPoolUpgradeSettings'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
'enable_node_public_ip': {'key': 'enableNodePublicIP', 'type': 'bool'},
'scale_set_priority': {'key': 'scaleSetPriority', 'type': 'str'},
'scale_set_eviction_policy': {'key': 'scaleSetEvictionPolicy', 'type': 'str'},
'spot_max_price': {'key': 'spotMaxPrice', 'type': 'float'},
'tags': {'key': 'tags', 'type': '{str}'},
'node_labels': {'key': 'nodeLabels', 'type': '{str}'},
'node_taints': {'key': 'nodeTaints', 'type': '[str]'},
'proximity_placement_group_id': {'key': 'proximityPlacementGroupID', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
count: Optional[int] = None,
vm_size: Optional[Union[str, "ContainerServiceVMSizeTypes"]] = None,
os_disk_size_gb: Optional[int] = None,
vnet_subnet_id: Optional[str] = None,
max_pods: Optional[int] = None,
os_type: Optional[Union[str, "OSType"]] = "Linux",
max_count: Optional[int] = None,
min_count: Optional[int] = None,
enable_auto_scaling: Optional[bool] = None,
type: Optional[Union[str, "AgentPoolType"]] = None,
mode: Optional[Union[str, "AgentPoolMode"]] = None,
orchestrator_version: Optional[str] = None,
upgrade_settings: Optional["AgentPoolUpgradeSettings"] = None,
availability_zones: Optional[List[str]] = None,
enable_node_public_ip: Optional[bool] = None,
scale_set_priority: Optional[Union[str, "ScaleSetPriority"]] = "Regular",
scale_set_eviction_policy: Optional[Union[str, "ScaleSetEvictionPolicy"]] = "Delete",
spot_max_price: Optional[float] = -1,
tags: Optional[Dict[str, str]] = None,
node_labels: Optional[Dict[str, str]] = None,
node_taints: Optional[List[str]] = None,
proximity_placement_group_id: Optional[str] = None,
**kwargs
):
super(ManagedClusterAgentPoolProfile, self).__init__(count=count, vm_size=vm_size, os_disk_size_gb=os_disk_size_gb, vnet_subnet_id=vnet_subnet_id, max_pods=max_pods, os_type=os_type, max_count=max_count, min_count=min_count, enable_auto_scaling=enable_auto_scaling, type=type, mode=mode, orchestrator_version=orchestrator_version, upgrade_settings=upgrade_settings, availability_zones=availability_zones, enable_node_public_ip=enable_node_public_ip, scale_set_priority=scale_set_priority, scale_set_eviction_policy=scale_set_eviction_policy, spot_max_price=spot_max_price, tags=tags, node_labels=node_labels, node_taints=node_taints, proximity_placement_group_id=proximity_placement_group_id, **kwargs)
self.name = name
class ManagedClusterAPIServerAccessProfile(msrest.serialization.Model):
"""Access profile for managed cluster API server.
:param authorized_ip_ranges: Authorized IP Ranges to kubernetes API server.
:type authorized_ip_ranges: list[str]
:param enable_private_cluster: Whether to create the cluster as a private cluster or not.
:type enable_private_cluster: bool
"""
_attribute_map = {
'authorized_ip_ranges': {'key': 'authorizedIPRanges', 'type': '[str]'},
'enable_private_cluster': {'key': 'enablePrivateCluster', 'type': 'bool'},
}
def __init__(
self,
*,
authorized_ip_ranges: Optional[List[str]] = None,
enable_private_cluster: Optional[bool] = None,
**kwargs
):
super(ManagedClusterAPIServerAccessProfile, self).__init__(**kwargs)
self.authorized_ip_ranges = authorized_ip_ranges
self.enable_private_cluster = enable_private_cluster
class ManagedClusterIdentity(msrest.serialization.Model):
"""Identity for the managed cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the system assigned identity which is used by master
components.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity which is used by master
components.
:vartype tenant_id: str
:param type: The type of identity used for the managed cluster. Type 'SystemAssigned' will use
an implicitly created identity in master components and an auto-created user assigned identity
in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster,
service principal will be used instead. Possible values include: "SystemAssigned",
"UserAssigned", "None".
:type type: str or ~azure.mgmt.containerservice.v2020_07_01.models.ResourceIdentityType
:param user_assigned_identities: The user identity associated with the managed cluster. This
identity will be used in control plane and only one user assigned identity is allowed. The user
identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterIdentityUserAssignedIdentitiesValue]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ManagedClusterIdentityUserAssignedIdentitiesValue}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "ManagedClusterIdentityUserAssignedIdentitiesValue"]] = None,
**kwargs
):
super(ManagedClusterIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ManagedClusterIdentityUserAssignedIdentitiesValue(msrest.serialization.Model):
"""ManagedClusterIdentityUserAssignedIdentitiesValue.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ManagedClusterIdentityUserAssignedIdentitiesValue, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class ManagedClusterListResult(msrest.serialization.Model):
"""The response from the List Managed Clusters operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of managed clusters.
:type value: list[~azure.mgmt.containerservice.v2020_07_01.models.ManagedCluster]
:ivar next_link: The URL to get the next set of managed cluster results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ManagedCluster"]] = None,
**kwargs
):
super(ManagedClusterListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ManagedClusterLoadBalancerProfile(msrest.serialization.Model):
"""Profile of the managed cluster load balancer.
:param managed_outbound_i_ps: Desired managed outbound IPs for the cluster load balancer.
:type managed_outbound_i_ps:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterLoadBalancerProfileManagedOutboundIPs
:param outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load
balancer.
:type outbound_ip_prefixes:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPPrefixes
:param outbound_i_ps: Desired outbound IP resources for the cluster load balancer.
:type outbound_i_ps:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterLoadBalancerProfileOutboundIPs
:param effective_outbound_i_ps: The effective outbound IP resources of the cluster load
balancer.
:type effective_outbound_i_ps:
list[~azure.mgmt.containerservice.v2020_07_01.models.ResourceReference]
:param allocated_outbound_ports: Desired number of allocated SNAT ports per VM. Allowed values
must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure
dynamically allocating ports.
:type allocated_outbound_ports: int
:param idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values
must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
:type idle_timeout_in_minutes: int
"""
_validation = {
'allocated_outbound_ports': {'maximum': 64000, 'minimum': 0},
'idle_timeout_in_minutes': {'maximum': 120, 'minimum': 4},
}
_attribute_map = {
'managed_outbound_i_ps': {'key': 'managedOutboundIPs', 'type': 'ManagedClusterLoadBalancerProfileManagedOutboundIPs'},
'outbound_ip_prefixes': {'key': 'outboundIPPrefixes', 'type': 'ManagedClusterLoadBalancerProfileOutboundIPPrefixes'},
'outbound_i_ps': {'key': 'outboundIPs', 'type': 'ManagedClusterLoadBalancerProfileOutboundIPs'},
'effective_outbound_i_ps': {'key': 'effectiveOutboundIPs', 'type': '[ResourceReference]'},
'allocated_outbound_ports': {'key': 'allocatedOutboundPorts', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'idleTimeoutInMinutes', 'type': 'int'},
}
def __init__(
self,
*,
managed_outbound_i_ps: Optional["ManagedClusterLoadBalancerProfileManagedOutboundIPs"] = None,
outbound_ip_prefixes: Optional["ManagedClusterLoadBalancerProfileOutboundIPPrefixes"] = None,
outbound_i_ps: Optional["ManagedClusterLoadBalancerProfileOutboundIPs"] = None,
effective_outbound_i_ps: Optional[List["ResourceReference"]] = None,
allocated_outbound_ports: Optional[int] = 0,
idle_timeout_in_minutes: Optional[int] = 30,
**kwargs
):
super(ManagedClusterLoadBalancerProfile, self).__init__(**kwargs)
self.managed_outbound_i_ps = managed_outbound_i_ps
self.outbound_ip_prefixes = outbound_ip_prefixes
self.outbound_i_ps = outbound_i_ps
self.effective_outbound_i_ps = effective_outbound_i_ps
self.allocated_outbound_ports = allocated_outbound_ports
self.idle_timeout_in_minutes = idle_timeout_in_minutes
class ManagedClusterLoadBalancerProfileManagedOutboundIPs(msrest.serialization.Model):
"""Desired managed outbound IPs for the cluster load balancer.
:param count: Desired number of outbound IP created/managed by Azure for the cluster load
balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
:type count: int
"""
_validation = {
'count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
*,
count: Optional[int] = 1,
**kwargs
):
super(ManagedClusterLoadBalancerProfileManagedOutboundIPs, self).__init__(**kwargs)
self.count = count
class ManagedClusterLoadBalancerProfileOutboundIPPrefixes(msrest.serialization.Model):
"""Desired outbound IP Prefix resources for the cluster load balancer.
:param public_ip_prefixes: A list of public IP prefix resources.
:type public_ip_prefixes:
list[~azure.mgmt.containerservice.v2020_07_01.models.ResourceReference]
"""
_attribute_map = {
'public_ip_prefixes': {'key': 'publicIPPrefixes', 'type': '[ResourceReference]'},
}
def __init__(
self,
*,
public_ip_prefixes: Optional[List["ResourceReference"]] = None,
**kwargs
):
super(ManagedClusterLoadBalancerProfileOutboundIPPrefixes, self).__init__(**kwargs)
self.public_ip_prefixes = public_ip_prefixes
class ManagedClusterLoadBalancerProfileOutboundIPs(msrest.serialization.Model):
"""Desired outbound IP resources for the cluster load balancer.
:param public_i_ps: A list of public IP resources.
:type public_i_ps: list[~azure.mgmt.containerservice.v2020_07_01.models.ResourceReference]
"""
_attribute_map = {
'public_i_ps': {'key': 'publicIPs', 'type': '[ResourceReference]'},
}
def __init__(
self,
*,
public_i_ps: Optional[List["ResourceReference"]] = None,
**kwargs
):
super(ManagedClusterLoadBalancerProfileOutboundIPs, self).__init__(**kwargs)
self.public_i_ps = public_i_ps
class ManagedClusterPoolUpgradeProfile(msrest.serialization.Model):
"""The list of available upgrade versions.
All required parameters must be populated in order to send to Azure.
:param kubernetes_version: Required. Kubernetes version (major, minor, patch).
:type kubernetes_version: str
:param name: Pool name.
:type name: str
:param os_type: Required. OsType to be used to specify os type. Choose from Linux and Windows.
Default to Linux. Possible values include: "Linux", "Windows". Default value: "Linux".
:type os_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.OSType
:param upgrades: List of orchestrator types and versions available for upgrade.
:type upgrades:
list[~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPoolUpgradeProfileUpgradesItem]
"""
_validation = {
'kubernetes_version': {'required': True},
'os_type': {'required': True},
}
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'upgrades': {'key': 'upgrades', 'type': '[ManagedClusterPoolUpgradeProfileUpgradesItem]'},
}
def __init__(
self,
*,
kubernetes_version: str,
os_type: Union[str, "OSType"] = "Linux",
name: Optional[str] = None,
upgrades: Optional[List["ManagedClusterPoolUpgradeProfileUpgradesItem"]] = None,
**kwargs
):
super(ManagedClusterPoolUpgradeProfile, self).__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.name = name
self.os_type = os_type
self.upgrades = upgrades
class ManagedClusterPoolUpgradeProfileUpgradesItem(msrest.serialization.Model):
"""ManagedClusterPoolUpgradeProfileUpgradesItem.
:param kubernetes_version: Kubernetes version (major, minor, patch).
:type kubernetes_version: str
:param is_preview: Whether Kubernetes version is currently in preview.
:type is_preview: bool
"""
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'is_preview': {'key': 'isPreview', 'type': 'bool'},
}
def __init__(
self,
*,
kubernetes_version: Optional[str] = None,
is_preview: Optional[bool] = None,
**kwargs
):
super(ManagedClusterPoolUpgradeProfileUpgradesItem, self).__init__(**kwargs)
self.kubernetes_version = kubernetes_version
self.is_preview = is_preview
class ManagedClusterPropertiesAutoScalerProfile(msrest.serialization.Model):
"""Parameters to be applied to the cluster-autoscaler when enabled.
:param balance_similar_node_groups:
:type balance_similar_node_groups: str
:param scan_interval:
:type scan_interval: str
:param scale_down_delay_after_add:
:type scale_down_delay_after_add: str
:param scale_down_delay_after_delete:
:type scale_down_delay_after_delete: str
:param scale_down_delay_after_failure:
:type scale_down_delay_after_failure: str
:param scale_down_unneeded_time:
:type scale_down_unneeded_time: str
:param scale_down_unready_time:
:type scale_down_unready_time: str
:param scale_down_utilization_threshold:
:type scale_down_utilization_threshold: str
:param max_graceful_termination_sec:
:type max_graceful_termination_sec: str
"""
_attribute_map = {
'balance_similar_node_groups': {'key': 'balance-similar-node-groups', 'type': 'str'},
'scan_interval': {'key': 'scan-interval', 'type': 'str'},
'scale_down_delay_after_add': {'key': 'scale-down-delay-after-add', 'type': 'str'},
'scale_down_delay_after_delete': {'key': 'scale-down-delay-after-delete', 'type': 'str'},
'scale_down_delay_after_failure': {'key': 'scale-down-delay-after-failure', 'type': 'str'},
'scale_down_unneeded_time': {'key': 'scale-down-unneeded-time', 'type': 'str'},
'scale_down_unready_time': {'key': 'scale-down-unready-time', 'type': 'str'},
'scale_down_utilization_threshold': {'key': 'scale-down-utilization-threshold', 'type': 'str'},
'max_graceful_termination_sec': {'key': 'max-graceful-termination-sec', 'type': 'str'},
}
def __init__(
self,
*,
balance_similar_node_groups: Optional[str] = None,
scan_interval: Optional[str] = None,
scale_down_delay_after_add: Optional[str] = None,
scale_down_delay_after_delete: Optional[str] = None,
scale_down_delay_after_failure: Optional[str] = None,
scale_down_unneeded_time: Optional[str] = None,
scale_down_unready_time: Optional[str] = None,
scale_down_utilization_threshold: Optional[str] = None,
max_graceful_termination_sec: Optional[str] = None,
**kwargs
):
super(ManagedClusterPropertiesAutoScalerProfile, self).__init__(**kwargs)
self.balance_similar_node_groups = balance_similar_node_groups
self.scan_interval = scan_interval
self.scale_down_delay_after_add = scale_down_delay_after_add
self.scale_down_delay_after_delete = scale_down_delay_after_delete
self.scale_down_delay_after_failure = scale_down_delay_after_failure
self.scale_down_unneeded_time = scale_down_unneeded_time
self.scale_down_unready_time = scale_down_unready_time
self.scale_down_utilization_threshold = scale_down_utilization_threshold
self.max_graceful_termination_sec = max_graceful_termination_sec
class ManagedClusterPropertiesIdentityProfileValue(UserAssignedIdentity):
"""ManagedClusterPropertiesIdentityProfileValue.
:param resource_id: The resource id of the user assigned identity.
:type resource_id: str
:param client_id: The client id of the user assigned identity.
:type client_id: str
:param object_id: The object id of the user assigned identity.
:type object_id: str
"""
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'object_id': {'key': 'objectId', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
**kwargs
):
super(ManagedClusterPropertiesIdentityProfileValue, self).__init__(resource_id=resource_id, client_id=client_id, object_id=object_id, **kwargs)
class ManagedClusterServicePrincipalProfile(msrest.serialization.Model):
"""Information about a service principal identity for the cluster to use for manipulating Azure APIs.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The ID for the service principal.
:type client_id: str
:param secret: The secret password associated with the service principal in plain text.
:type secret: str
"""
_validation = {
'client_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
secret: Optional[str] = None,
**kwargs
):
super(ManagedClusterServicePrincipalProfile, self).__init__(**kwargs)
self.client_id = client_id
self.secret = secret
class ManagedClusterSKU(msrest.serialization.Model):
"""ManagedClusterSKU.
:param name: Name of a managed cluster SKU. Possible values include: "Basic".
:type name: str or ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterSKUName
:param tier: Tier of a managed cluster SKU. Possible values include: "Paid", "Free".
:type tier: str or ~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterSKUTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[Union[str, "ManagedClusterSKUName"]] = None,
tier: Optional[Union[str, "ManagedClusterSKUTier"]] = None,
**kwargs
):
super(ManagedClusterSKU, self).__init__(**kwargs)
self.name = name
self.tier = tier
class ManagedClusterUpgradeProfile(msrest.serialization.Model):
"""The list of available upgrades for compute pools.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of upgrade profile.
:vartype id: str
:ivar name: Name of upgrade profile.
:vartype name: str
:ivar type: Type of upgrade profile.
:vartype type: str
:param control_plane_profile: Required. The list of available upgrade versions for the control
plane.
:type control_plane_profile:
~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPoolUpgradeProfile
:param agent_pool_profiles: Required. The list of available upgrade versions for agent pools.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2020_07_01.models.ManagedClusterPoolUpgradeProfile]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'control_plane_profile': {'required': True},
'agent_pool_profiles': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'},
}
def __init__(
self,
*,
control_plane_profile: "ManagedClusterPoolUpgradeProfile",
agent_pool_profiles: List["ManagedClusterPoolUpgradeProfile"],
**kwargs
):
super(ManagedClusterUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = control_plane_profile
self.agent_pool_profiles = agent_pool_profiles
class ManagedClusterWindowsProfile(msrest.serialization.Model):
"""Profile for Windows VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. Specifies the name of the administrator account.
:code:`<br>`:code:`<br>` **restriction:** Cannot end in "." :code:`<br>`:code:`<br>`
**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1",
"user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console",
"david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0",
"sys", "test2", "test3", "user4", "user5". :code:`<br>`:code:`<br>` **Minimum-length:** 1
character :code:`<br>`:code:`<br>` **Max-length:** 20 characters.
:type admin_username: str
:param admin_password: Specifies the password of the administrator account.
:code:`<br>`:code:`<br>` **Minimum-length:** 8 characters :code:`<br>`:code:`<br>`
**Max-length:** 123 characters :code:`<br>`:code:`<br>` **Complexity requirements:** 3 out of 4
conditions below need to be fulfilled :code:`<br>` Has lower characters :code:`<br>`Has upper
characters :code:`<br>` Has a digit :code:`<br>` Has a special character (Regex match [\W_])
:code:`<br>`:code:`<br>` **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd",
"P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!".
:type admin_password: str
:param license_type: The licenseType to use for Windows VMs. Windows_Server is used to enable
Azure Hybrid User Benefits for Windows VMs. Possible values include: "None", "Windows_Server".
:type license_type: str or ~azure.mgmt.containerservice.v2020_07_01.models.LicenseType
"""
_validation = {
'admin_username': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
'license_type': {'key': 'licenseType', 'type': 'str'},
}
def __init__(
self,
*,
admin_username: str,
admin_password: Optional[str] = None,
license_type: Optional[Union[str, "LicenseType"]] = None,
**kwargs
):
super(ManagedClusterWindowsProfile, self).__init__(**kwargs)
self.admin_username = admin_username
self.admin_password = admin_password
self.license_type = license_type
class OperationListResult(msrest.serialization.Model):
"""The List Compute Operation operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of compute operations.
:vartype value: list[~azure.mgmt.containerservice.v2020_07_01.models.OperationValue]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationValue]'},
}
def __init__(
self,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = None
class OperationValue(msrest.serialization.Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class PrivateEndpoint(msrest.serialization.Model):
"""Private endpoint which a connection belongs to.
:param id: The resource Id for private endpoint.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = id
class PrivateEndpointConnection(msrest.serialization.Model):
"""A private endpoint connection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ID of the private endpoint connection.
:vartype id: str
:ivar name: The name of the private endpoint connection.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
:ivar provisioning_state: The current provisioning state. Possible values include: "Succeeded",
"Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.containerservice.v2020_07_01.models.PrivateEndpointConnectionProvisioningState
:param private_endpoint: The resource of private endpoint.
:type private_endpoint: ~azure.mgmt.containerservice.v2020_07_01.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~azure.mgmt.containerservice.v2020_07_01.models.PrivateLinkServiceConnectionState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
}
def __init__(
self,
*,
private_endpoint: Optional["PrivateEndpoint"] = None,
private_link_service_connection_state: Optional["PrivateLinkServiceConnectionState"] = None,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.provisioning_state = None
self.private_endpoint = private_endpoint
self.private_link_service_connection_state = private_link_service_connection_state
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""A list of private endpoint connections.
:param value: The collection value.
:type value: list[~azure.mgmt.containerservice.v2020_07_01.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
*,
value: Optional[List["PrivateEndpointConnection"]] = None,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = value
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""The state of a private link service connection.
:param status: The private link service connection status. Possible values include: "Pending",
"Approved", "Rejected", "Disconnected".
:type status: str or ~azure.mgmt.containerservice.v2020_07_01.models.ConnectionStatus
:param description: The private link service connection description.
:type description: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[Union[str, "ConnectionStatus"]] = None,
description: Optional[str] = None,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = status
self.description = description
class ResourceReference(msrest.serialization.Model):
"""A reference to an Azure resource.
:param id: The fully qualified Azure resource id.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
**kwargs
):
super(ResourceReference, self).__init__(**kwargs)
self.id = id
class TagsObject(msrest.serialization.Model):
"""Tags object for patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TagsObject, self).__init__(**kwargs)
self.tags = tags
| mit | 8,577,119,578,000,281,000 | 44.593252 | 710 | 0.653023 | false |
DLR-SC/tigl | bindings/java/make_tigl_java.py | 1 | 2467 | # -*-# coding: utf-8 -*-
"""
Created on Wed May 01 11:44:49 2013
@author: Martin Siggel <[email protected]>
"""
from __future__ import print_function
import sys, os
from datetime import date
tiglpath = os.path.dirname(os.path.realpath(__file__)) + '/../..'
sys.path.append(tiglpath + '/bindings')
import bindings_generator.java_generator as PG
import bindings_generator.cheader_parser as CP
apache = \
'''/*
* Copyright (C) 2007-2013 German Aerospace Center (DLR/SC)
*
* Created: 2014-10-21 Martin Siggel <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
'''
apache += \
'/* \n\
* This file is automatically created from tigl.h on %s.\n'\
% date.today() + \
'* If you experience any bugs please contact the authors\n\
*/\n\n'
postconstr = '''
self.version = self.getVersion()
'''
blacklist = ['tiglLogToFileStreamEnabled']
if __name__ == '__main__':
# parse the file
parser = CP.CHeaderFileParser()
parser.decoration = 'TIGL_COMMON_EXPORT'
parser.add_alias('TixiDocumentHandle','int')
parser.add_alias('FILE','void')
# set the handle string that the parser can identify the handles
parser.handle_str = 'TiglCPACSConfigurationHandle'
parser.returncode_str ='TiglReturnCode'
parser.parse_header_file(tiglpath + '/src/api/tigl.h')
# create the wrapper
pg = PG.JavaGenerator(name_prefix = 'tigl', libname = 'tigl3', package = "de.dlr.sc.tigl3")
pg.license = apache
pg.blacklist = blacklist
pg.postconstr = postconstr
print('Creating java interface... ', end=' ')
wrapper = pg.create_native_interface(parser)
print('done')
# write file
directory = 'src/de/dlr/sc/tigl3'
filename = directory + '/TiglNativeInterface.java'
print('Write tigl java interface to file "%s" ... ' % filename, end=' ')
fop = open(filename, 'w')
fop.write(wrapper)
print('done')
pg.write_enums(parser, directory)
| apache-2.0 | 3,887,434,982,440,820,000 | 28.023529 | 95 | 0.675719 | false |
gnovak/overheard | overheard/fetch.py | 1 | 16979 | #########
# Notes #
#########
#
# Archive.org won't accept wget user agent string---it can be anything else
#
# When fetching source files from arxiv.org:
# 0408420 gives most recent
# 0408420vN gives version N
# 0408420vN if N > number of versions gives most recent version
# (the behavior is same for old and new archive identifiers)
#
# The conventions for storing latex source archives files are taken
# from those used in the bulk data made available by arxiv.org via
# Amazon.com's S3 service. This makes it possible to download a bunch
# of source files, untar them, and start working on the data without
# pre-processing the files in any way.
#
# Information here: http://arxiv.org/help/bulk_data_s3
#
# Command to bulk download arxiv sources to the current directory
# s3cmd sync --add-header="x-amz-request-payer: requester" s3://arxiv/src/ .
#
# The entire archive is divided into sub-directories by YYMM. Files
# in each directory can be pdf files or gzip files. The gunzipped
# file can be any number of things: latex source, a tar file,
# postscript, html, plain text, (and more?). It might have been nice
# if the pdf files were gzipped so that _everything_ could be handled
# the same way (gunzip, figure out what it is, act appropriately), but
# the arxiv.org people have decided not to do that. It's true that
# PDF files are already compressed, so gzipping them is kind of a
# waste.
#
# However, when you download a paper via
# http://arxiv.org/e-print/identifier, you don't know if the file will
# be a PDF or a gzip file, and the resulting file has no extension.
# Therefore when fetching papers (from the arxiv.org RSS feed, for
# instance), the code below looks at the file type and puts the
# appropriate extension so that it goes into the data directory just
# like the bulk data provided by arxiv.org via S3.
#
# Thus when you ask for the name of the data file associated with an
# arxiv id, you may want it without an extension (if it was downloaded
# via wget and you want to put the appropriate extension on), or you
# may want it _with_ the extension (in which case the code must look
# to see if it's a pdf or gz file, and give you the correct filename).
#
# Note that it's necessary to call 'file' on the gunzipped file.
# Calling 'file' on the gzipped file allows you to figure out what's
# inside most of the time, but sometimes the results are hilariously
# wrong. All of these contain valid latex:
# 1211.0074.gz: Minix filesystem, V2, 51878 zones
# cond-mat0701210.gz: gzip compressed data, was "/data/new/0022/0022418/src/with",
# math0701257.gz: x86 boot sector, code offset 0x8b
#
# Even calling file on the gunzipped file is bizarrly incorrect
# sometimes -- files are listed as C++, plain text (when they're
# latex), or even 'data'. As long as the output of 'file' has the
# word 'text' in it, I treat it as latex.
#
# The problem with files not being recognized as latex after
# gunzipping apparently has to do with text encodings. Here are a few
# examples of arxiv id's listed as 'data' after gunzipping.
# 1303.5083 1401.5069 1401.5758 1401.5838 1401.6077 1401.6577
# 1401.7056 1402.0695 1402.0700 1402.1495 1402.1968 1402.5880
#
# All of the above is for file v5.04 on OS X 10.9.2 Mavaricks. This
# version of file dates from about 2010 and I'm writing this in 2014.
# Going to the latest version of file v5.17 results in improvements in
# the file type determination for about 300 of the ~3000 files in the
# entire archive of 1 million papers, so, it's not really worth it.
#
# In any case, using file v5.17 installed from MacPorts results in
# messages like:
# ERROR: line 163: regex error 17, (illegal byte sequence)
# This is evidently another text encoding problem and it is discussed here:
# https://trac.macports.org/ticket/38771
# A workaround is to set the shell variables:
# export LC_CTYPE=C
# export LANG=C
#
# There's other weird stuff in the files provided by arxiv.org, like
# PDF files that 'file' reports to be strange things. All of these
# are viewable via Emacs DocView and Apple Preview, although when you
# look at the bytes they do indeed look weird (not like normal PDF
# files).
# ./0003/cond-mat0003162.pdf: data
# ./0304/cond-mat0304406.pdf: MacBinary III data with surprising version number
# ./0404/physics0404071.pdf: data
# ./9803/cond-mat9803359.pdf: data
# ./9805/cond-mat9805146.pdf: data
# ./0402/cs0402023.pdf: POSIX tar archive (GNU)
# ./1204/1204.0257.pdf: data
# ./1204/1204.0258.pdf: data
#
# It takes ~5.5 min to just gunzip everything and untar everything via
# the shell for one month's sumbissions. takes ~10 min to do it via
# python code. I find this acceptable.
#
from __future__ import with_statement
import sys, os, subprocess, tempfile, shutil, re, time
import path, util, arxiv_id
# Interactive use/testing more or less requires that fetch.user_agent
# be set to something here in the source file. However, I don't want
# other people hammering arxiv.org with a user agent string that looks
# like it's me. Therefore the arg parsing code in overheard.py sets
# fetch.user_agent to None if the value isn't provided on the command
# line. That triggers an error message if wget is called.
user_agent = 'overheard'
verbose = True
# arxiv_id's job is parsing arxiv identifiers. This module's job is
# relating that to the filesystem. The entire archive is partitioned
# into directories that are just the yymm part of the arxiv id, so
# just steal that function from the arxiv_id module.
dir_prefix = arxiv_id.yymm
def extension(fn):
"Get the extension of a filename"
return os.path.splitext(fn)[1][1:]
def ensure_dirs_exist(path_name):
"""Ensure that dirs exist to create a file
path_name is expected to be the path to a file, including the
filename. This creates all of the directories leading up to the
filename.
If path_name ends with a path separator, then the last element is
known to be a directory and the entire path is created if it
doesn't exist.
"""
dir, fn = os.path.split(path_name)
if not os.path.isdir(dir):
os.makedirs(dir)
def arxiv_to_url(aid):
"Get the URL to download the source file for a paper"
return "http://arxiv.org/e-print/" + aid
def fetch_command(aid, fn):
"Give the command to fetch latex source file"
# Explicitly give the output file name because old-style arxiv
# id's result in files like 9901012 that lack the archive name.
if user_agent is None:
print >> sys.stderr, "User agent string not set. Arxiv.org blocks requests with the user"
print >> sys.stderr, "agent string wget. You must set a different one like this:"
print >> sys.stderr
print >> sys.stderr, "%s -u 'my user agent string'" % sys.argv[0]
print >> sys.stderr
sys.exit(1)
return (["wget", "-U '%s'" % user_agent,
"--output-document", fn] +
([] if verbose else ["--output-file", "/dev/null"]) +
[arxiv_to_url(aid)])
def untar_command(fn):
"Give the command extract a tar archive."
return ["tar", "xf", fn]
def gunzip_command(fn):
"Give the command to decompress a gzip file."
return ["gunzip", fn]
def file_name_base(aid):
"Name of latex/source file for an arxiv id without the extension"
if arxiv_id.is_new(aid):
fn_base = aid
else:
# Ugh, four function calls when I could just grab the regexp
# match object and pull out what I want. And all of this is
# just to get rid of the slash in the identifier
fn_base = (arxiv_id.archive(aid) + arxiv_id.yymm(aid) +
arxiv_id.number(aid) + arxiv_id.version(aid))
return fn_base
def latex_file_name(aid):
"Name of latex file"
return file_name_base(aid) + '.tex'
def latex_file_path(aid):
"Full path to the latex file"
return os.path.join(path.latex, dir_prefix(aid), latex_file_name(aid))
def source_file_name(aid):
"Name of source file"
ext = source_file_extension(aid)
if not ext:
raise RuntimeError, "No files exist for %s" % aid
return file_name_base(aid) + ext
def source_file_path(aid):
"Full path to source file"
return os.path.join(path.source, dir_prefix(aid), source_file_name(aid))
def source_file_path_without_extension(aid):
"""Full path to source file without the extension
This is used when figuring out the correct extension of the source
file for this particular paper
"""
return os.path.join(path.source, dir_prefix(aid), file_name_base(aid))
def source_file_exists(aid):
"""Determine if the source file associated with an arxiv id exists."""
return source_file_extension(aid)
def source_file_extension(aid):
"""Return the extension of the source file associated with an arxiv id.
Return False if the file doesn't exist.
"""
valid_extensions = ['.gz', '.pdf']
paths = [source_file_path_without_extension(aid) + ext
for ext in valid_extensions]
exist = [os.path.isfile(pp) for pp in paths]
n_exist = exist.count(True)
if n_exist > 1:
raise RuntimeError, "More than one file exists for %s" % aid
elif n_exist == 0:
return False
return valid_extensions[exist.index(True)]
def file_type_string(fn):
"Return the output of the 'file' command"
pipe = subprocess.Popen(["file", fn], stdout=subprocess.PIPE)
stdout, stderr = pipe.communicate()
# Hmm... I finally have to learn something about string encodings.
# Output of pipe is ascii text, type() bytes, need to make it into
# a string to do regexps on it... what to assume? utf-8 seems
# ok...
return stdout.decode('utf-8')
def is_tar(fn):
"Is this a tar file?"
return re.search('tar archive', file_type_string(fn), re.I)
def is_gzip(fn):
"Is this a gzip file?"
return re.search('gzip compressed data', file_type_string(fn), re.I)
def is_pdf(fn):
"Is this a pdf file?"
return re.search('pdf document', file_type_string(fn), re.I)
def is_tex(fn):
"Is this a latex file?"
# Accept anything with the word 'text' or LaTeX in it, but does
# _not_ have Postscript in it. Postscript has %'s in it and the
# regexp I use for short comments apparently breaks for big files,
# ie, astro-ph/9505048. That postscript file is pathological,
# though, it's ~50 MB of only f's.
return (re.search('text|latex', file_type_string(fn), re.I) and not
re.search('postscript', file_type_string(fn), re.I))
def is_other(fn):
"Is this some file that we recognize but don't do anything with?"
# File types that are known, but that we can't do anything with
# This is so if a file type is totally unknown, we can print a
# message and catch it.
return re.search('tex dvi', file_type_string(fn), re.I)
def all_source(aids, delay=60, force=False):
"""Fetch the source files for all of the given arxiv ids.
delay is delay in seconds between fetching papers from arxiv.org
force=True disables caching of papers
"""
any_fetched = False
for aid in aids:
wait = source(aid, force=force)
if wait:
any_fetched = True
time.sleep(delay)
return any_fetched
def source(aid, force=False):
"Get source file from archive.org unless we already have it"
if not force and source_file_exists(aid):
if verbose: print "Using cached source file for", aid
return False
else:
# Interrupted downloads leave partial files laying around.
# Download to temp directory, then rename to final filename to
# avoid polluting the archive.
tf = tempfile.NamedTemporaryFile()
subprocess.call(fetch_command(aid, tf.name))
source_base = source_file_path_without_extension(aid)
ensure_dirs_exist(source_base)
# copy file to have correct extension. User copy rather than
# move so the system can happily delete the temp file when
# it's closed.
if is_pdf(tf.name):
shutil.copy(tf.name, source_base + '.pdf')
elif is_gzip(tf.name):
shutil.copy(tf.name, source_base + '.gz')
else:
# This should/would be an exception, but it occurs
# when downloading the new astro-ph files for the day.
# I don't want an unrecognized file to prevent
# downloading other papers, so just print a message
# and move on.
#
# raise RuntimeError, "Unrecognized file %s" % aid
print "WARNING: Unrecognized file type for", aid
return True
def all_latex(aids):
"""Extract latex from all arxiv ids given."""
for aid in aids:
latex(aid)
def latex(aid):
"Get latex out of source file"
if not source_file_exists(aid):
# could just try to grab the file from arxiv.org here.
raise ValueError, "File not found for %s!" % aid
path_name = source_file_path(aid)
tmpdir = tempfile.mkdtemp()
try:
shutil.copy(path_name, tmpdir)
with util.remember_cwd():
# All of this is taking place in a temp dir, so I only want
# filenames, not paths.
os.chdir(tmpdir)
base_fn = file_name_base(aid)
ext_fn = source_file_name(aid)
# gunzip if necessary
if is_gzip(ext_fn):
if verbose: print "Decompressing", aid
subprocess.call(gunzip_command(ext_fn))
if is_tex(base_fn):
# if it's a tex file, rename to correct extension
shutil.move(base_fn, base_fn + '.tex')
elif is_tar(base_fn):
# if it's a tar file, extract
if verbose: print "Extracting", aid
subprocess.call(untar_command(base_fn))
elif is_pdf(ext_fn):
# pdf files still have extension, so look at the filename
# with extension.
pass
elif is_other(base_fn):
# Everything except pdf files has been decompressed, so
# look at the filename without the extension.
pass
else:
# The line break comes at the end of file_type_string()
print "WARNING: Unknown file type: ", file_type_string(base_fn),
# All Latex files should now have .tex extensions, collect them.
files = os.listdir('.')
latex_files = [fn for fn in files if extension(fn) == 'tex']
# If there are no latex files, an empty file should be
# generated to avoid later file not found errors.
latex_fn = latex_file_path(aid)
ensure_dirs_exist(latex_fn)
with open(latex_fn, 'w') as outf:
if latex_files:
# Can have multiple tex files, just concat them
subprocess.call(['cat'] + latex_files, stdout=outf)
finally:
shutil.rmtree(tmpdir)
##################################################
def dir_to_arxiv_ids(dir):
"""Take a dir, list all the files, and convert them into arxiv ids.
This is a bit of a hack, to facilitate bulk extraction of latex files,
don't be too careful about it..."""
# Allowing tex, pdf, or gz extension means this can be used on
# latex dir or source dir
new_reverse_regexp = '^([0-9]{4}.[0-9]{4}(v[0-9]+)?)\.(pdf|gz|tex)$'
old_reverse_regexp = '^([-a-z]+)([0-9]{7}(v[0-9]+)?)\.(pdf|gz|tex)$'
files = os.listdir(dir)
result = []
for fn in files:
match = re.search(new_reverse_regexp, fn)
if match:
result.append(match.group(1))
match = re.search(old_reverse_regexp, fn)
if match:
result.append(match.group(1) + '/' + match.group(2))
return result
def year_to_arxiv_id(year, prefix=path.latex):
"""Get all the arxiv ids for a given year.
year is a two char string referring to the year, 99, 07, 13
prefix is either path.source or path.latex depending on which
directory you want to use to generate the arxiv ids.
"""
dirs = [fn for fn in os.listdir(prefix)
if fn.startswith(year)]
ids = [dir_to_arxiv_ids(os.path.join(prefix,dir))
for dir in dirs]
return util.flatten(ids)
def arxiv_ids_by_year(prefix=path.latex):
"""Gather all arxiv ids into a dict with keys for each year"""
# NOTE -- depend on four-number dir prefixes here
fns = [fn for fn in os.listdir(prefix)
if re.match('[0-9]{4}', fn)]
years = sorted(set(fn[0:2] for fn in fns))
kv_pairs = [(year, year_to_arxiv_id(year))
for year in years]
return dict(kv_pairs)
| mit | 6,797,017,471,657,627,000 | 38.12212 | 98 | 0.649214 | false |
nextgis/nextgisweb_compulink | nextgisweb_compulink/compulink_video_producer/default_video_page.py | 1 | 3503 | from __future__ import print_function
from time import sleep
from datetime import datetime
UNITS = ('Minutes', 'Hours', 'Days', 'Months')
PHOTO = ('false', 'true')
class DefaultVideoPage:
_base_url = '/compulink/player/recording_video?resource_id={res_id}&units={units}&count_units={count_units}&photo={photo}'
_base_url_extra = '&zoom={zoom}&lat_center={lat_center}&lon_center={lon_center}&basemap={basemap}'
_login_url = '/login?next=/resource/0'
@property
def login_url(self):
return self._site_address + self._login_url
@property
def base_url(self):
return self._site_address + self._base_url
@property
def base_url_extra(self):
return self._base_url_extra
def __init__(self, res_id, site_address,
units=UNITS[0], count_units=1, photo=PHOTO[1],
screen_width=1368, screen_height=768,
zoom=None, lat_center=None, lon_center=None,
basemap=None):
self._site_address = site_address
self._res_id = res_id
self._units = units
self._count_units = count_units
self._photo = photo
self._zoom = zoom
self._lat_center = lat_center
self._lon_center = lon_center
self._basemap = basemap
self._screen_width = screen_width
self._screen_height = screen_height
def set_context(self, context):
self.context = context
def start_play(self):
self.context.browser.driver.execute_script('window.startPlayer()')
def login(self, user, passw):
self.context.browser.visit(self.login_url)
field = self.context.browser.find_by_name('login')
field.first.fill(user)
field = self.context.browser.find_by_name('password')
field.first.fill(passw)
self.context.browser.find_by_css(".auth-form__btn").first.click()
sleep(3)
while self.context.browser.driver.execute_script('return document.readyState') != 'complete':
pass
if self.login_url in self.context.browser.url:
return False
return True
def sync_load(self):
print(self.url)
self.context.browser.visit(self.url)
sleep(3)
# white DOM
max_timeout = 100
timeout_start = datetime.now()
while self.context.browser.driver.execute_script('return document.readyState') != 'complete':
sleep(3)
if (datetime.now() - timeout_start).seconds >max_timeout:
break
# white data loading
timeout_start = datetime.now()
while self.context.browser.driver.execute_script('return window.getPlayerState()') != 'ready':
sleep(3)
if (datetime.now() - timeout_start).seconds >max_timeout:
break
sleep(3)
@property
def is_finished(self):
return self.context.browser.driver.execute_script('return window.getPlayerState()') == 'finished'
@property
def url(self):
url = self.base_url.format(
res_id=self._res_id,
count_units=self._count_units,
units=self._units,
photo=self._photo,
)
if self._zoom and self._lat_center and self._lon_center:
url += self.base_url_extra.format(zoom=self._zoom, lat_center=self._lat_center, lon_center=self._lon_center, basemap=self._basemap)
return url
@property
def res_id(self):
return self._res_id
| gpl-2.0 | -3,272,793,355,578,168,000 | 31.738318 | 143 | 0.601484 | false |
nushio3/UFCORIN | script/suntomorrow-AE/train_predictor.py | 1 | 4659 | #!/usr/bin/env python
"""Chainer example: autoencoder of a solar image.
"""
# c.f.
# http://nonbiri-tereka.hatenablog.com/entry/2015/06/21/220506
# http://qiita.com/kenmatsu4/items/99d4a54d5a57405ecaf8
import argparse
import numpy as np
import operator
import re
import six
import subprocess
import random
import chainer
from chainer import computational_graph as c
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
from chainer import optimizers
parser = argparse.ArgumentParser(description='Chainer example: MNIST')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
log_train_fn = 'log-training.txt'
log_test_fn = 'log-test.txt'
subprocess.call('rm '+ log_train_fn,shell=True)
subprocess.call('rm '+ log_test_fn,shell=True)
def zoom_x2(batch):
shape = batch.data.shape
channel_shape = shape[0:-2]
height, width = shape[-2:]
volume = reduce(operator.mul,shape,1)
b1 = F.reshape(batch,(volume,1))
b2 = F.concat([b1,b1],1)
b3 = F.reshape(b2,(volume/width,2*width))
b4 = F.concat([b3,b3],1)
return F.reshape(b4, channel_shape + (2*height ,) + (2*width ,))
gpu_flag=(args.gpu >= 0)
# load the numpy 2D arrays located under the folder.
p=subprocess.Popen('find scaled-256/',shell=True, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
sun_data = []
for fn in stdout.split('\n'):
if not re.search('\.npy$',fn) : continue
sun_data.append(np.load(fn))
if len(sun_data)==0:
# where no data is available, add a dummy data for debugging
for i in range(10):
x=32*[0.333*i*i]
xy=32*[x]
sun_data.append(xy)
model=chainer.FunctionSet(
convA1 = F.Convolution2D( 4, 8,3,stride=1,pad=1),
convA2 = F.Convolution2D( 8,16,3,stride=1,pad=1),
convA3 = F.Convolution2D(16,32,3,stride=1,pad=1),
convV3 = F.Convolution2D(32,16,3,stride=1,pad=1),
convV2 = F.Convolution2D(16, 8,3,stride=1,pad=1),
convV1 = F.Convolution2D( 8, 4,3,stride=1,pad=1),
convY = F.Convolution2D( 4, 1,3,stride=1,pad=1),
)
if gpu_flag:
cuda.init(0)
model.to_gpu()
def forward(x_data,y_data,train=True):
deploy = True
x = Variable(x_data, volatile = not train)
y = Variable(y_data, volatile = not train)
hc1 = F.dropout(F.leaky_relu(model.convA1(x)), train=train and deploy)
hm1 = F.max_pooling_2d(hc1,2)
hc2 = F.dropout(F.leaky_relu(model.convA2(hm1)), train=train and deploy)
hm2 = F.max_pooling_2d(hc2,2)
hc3 = F.dropout(F.leaky_relu(model.convA3(hm2)), train=train and deploy)
hm3 = F.max_pooling_2d(hc3,2)
hv4 = hm3
hz3 = zoom_x2(hv4)
hv3 = F.dropout(F.leaky_relu(model.convV3(hz3)), train=train and deploy)
hz2 = zoom_x2(hv3)
hv2 = F.dropout(F.leaky_relu(model.convV2(hz2)), train=train and deploy)
hz1 = zoom_x2(hv2)
hv1 = F.dropout(F.leaky_relu(model.convV1(hz1)), train=train and deploy)
y_pred = model.convY(hv1)
return F.mean_squared_error(y,y_pred)
def reference(x_data,y_data):
x = Variable(x_data)
y = Variable(y_data)
print F.mean_squared_error(y,y).data
print F.mean_squared_error(x,y).data
reference(np.array(sun_data[0]), np.array(sun_data[1]))
optimizer = optimizers.Adam()
optimizer.setup(model.collect_parameters())
epoch=0
while True:
epoch+=1
batch_input = []; batch_output = []
for i in range(10):
n = 4
start = random.randrange(len(sun_data)-n-1)
batch_input.append(sun_data[start:start+n])
batch_output.append(sun_data[start+n])
batch_input=np.array(batch_input)
batch_output=np.array(batch_output)
if gpu_flag :
batch_input = cuda.to_gpu(batch_input)
batch_output = cuda.to_gpu(batch_output)
optimizer.zero_grads()
loss = forward(batch_input, batch_output, train=True)
loss.backward()
optimizer.update()
print epoch,loss.data
with(open(log_train_fn,'a')) as fp:
fp.write('{} {}\n'.format(epoch,loss.data))
if epoch == 1:
with open("graph.dot", "w") as o:
o.write(c.build_computational_graph((loss, )).dump())
with open("graph.wo_split.dot", "w") as o:
g = c.build_computational_graph((loss, ),
remove_split=True)
o.write(g.dump())
print('graph generated')
if epoch % 10 == 1:
loss = forward(batch_input, batch_output, train=False)
print "TEST: ",epoch,loss.data
with(open(log_test_fn,'a')) as fp:
fp.write('{} {}\n'.format(epoch,loss.data))
| mit | -2,319,461,076,983,837,700 | 27.937888 | 76 | 0.636403 | false |
frascoweb/easywebassets | easywebassets/assets.py | 1 | 1303 | from webassets import Environment, six
from webassets.env import RegisterError
from .package import Package
class Assets(object):
def __init__(self, env=None, **kwargs):
if env:
self.env = env
else:
self.env = Environment(**kwargs)
self.packages = {}
def register(self, name, *args, **kwargs):
if isinstance(name, dict) and not args and not kwargs:
for name, pkg in name.items():
self.register(name, pkg)
return
if len(args) == 1 and not kwargs and isinstance(args[0], Package):
item = args[0]
else:
if len(args) == 1 and isinstance(args[0], list):
args = args[0]
elif len(args) == 1 and isinstance(args[0], dict):
kwargs = args[0]
args = kwargs.pop('contents')
item = Package(*args, **kwargs)
if name in self.packages:
raise RegisterError('Another bundle or package is already registered '+
'as "%s"' % name)
self.packages[name] = item
item.env = self
return item
def __getitem__(self, name):
return self.packages[name]
def __contains__(self, name):
return name in self.packages | mit | -8,230,702,585,525,816,000 | 30.047619 | 83 | 0.539524 | false |
pvtodorov/indra | indra/tools/reading/readers.py | 1 | 29900 | """Objects for interacting with bulk nlp reading tools."""
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import sys
import shutil
import re
import tempfile
import glob
import json
import logging
import subprocess
import zlib
from os import path, mkdir, environ, listdir, remove
from io import BytesIO
from datetime import datetime
from multiprocessing import Pool
from platform import system
from indra import get_config
from indra.util import zip_string
from indra.sources import sparser, reach
logger = logging.getLogger(__name__)
# Set a character limit for reach reading
CONTENT_CHARACTER_LIMIT = 5e5
CONTENT_MAX_SPACE_RATIO = 0.5
def _get_dir(*args):
dirname = path.join(*args)
if path.isabs(dirname):
dirpath = dirname
elif path.exists(dirname):
dirpath = path.abspath(dirname)
else:
dirpath = path.join(path.dirname(path.abspath(__file__)), dirname)
if not path.exists(dirpath):
mkdir(dirpath)
return dirpath
def _time_stamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
def _get_mem_total():
if system() == 'Linux':
with open('/proc/meminfo', 'r') as f:
lines = f.readlines()
tot_entry = [line for line in lines if line.startswith('MemTotal')][0]
ret = int(tot_entry.split(':')[1].replace('kB', '').strip())/10**6
else:
ret = None
return ret
class ReadingError(Exception):
pass
class ReachError(ReadingError):
pass
class SparserError(ReadingError):
pass
class formats:
JSON = 'json'
TEXT = 'text'
XML = 'xml'
class Content(object):
"""An object to regularize the content passed to the readers.
To use this class, use one of the two constructor methods:
- `from_file` : use content from a file on the filesystem.
- `from_string` : Pass a string (or bytes) directly as content.
This class also regularizes the handling of id's and formats, as well as
allowing for decompression and decoding, in the manner standard in the INDRA
project.
"""
def __init__(self, id, format, compressed=False, encoded=False):
self.file_exists = False
self.compressed = compressed
self.encoded = encoded
self._id = id
self._format = format
self._text = None
self._fname = None
self._location = None
self._raw_content = None
return
@classmethod
def from_file(cls, file_path, compressed=False, encoded=False):
"""Create a content object from a file path."""
file_id = '.'.join(path.basename(file_path).split('.')[:-1])
file_format = file_path.split('.')[-1]
content = cls(file_id, file_format, compressed, encoded)
content.file_exists = True
content._location = path.dirname(file_path)
return content
@classmethod
def from_string(cls, id, format, raw_content, compressed=False,
encoded=False):
"""Create a Content object from string/bytes content."""
content = cls(id, format, compressed, encoded)
content._raw_content = raw_content
return content
def _load_raw_content(self):
if self.file_exists and self._raw_content is None:
with open(self.get_filepath(), 'r') as f:
self._raw_content = f.read()
return
def change_id(self, new_id):
"""Change the id of this content."""
self._load_raw_content()
self._id = new_id
self.get_filename(renew=True)
self.get_filepath(renew=True)
return
def change_format(self, new_format):
"""Change the format label of this content.
Note that this does NOT actually alter the format of the content, only
the label.
"""
self._load_raw_content()
self._format = new_format
self.get_filename(renew=True)
self.get_filepath(renew=True)
return
def set_location(self, new_location):
"""Set/change the location of this content.
Note that this does NOT change the actual location of the file. To do
so, use the `copy_to` method.
"""
self._load_raw_content()
self._location = new_location
self.get_filepath(renew=True)
return
def is_format(self, *formats):
"""Check the format of this content."""
return any([self._format == fmt for fmt in formats])
def get_id(self):
return self._id
def get_format(self):
return self._format
def get_text(self):
"""Get the loaded, decompressed, and decoded text of this content."""
self._load_raw_content()
if self._text is None:
assert self._raw_content is not None
ret_cont = self._raw_content
if self.compressed:
ret_cont = zlib.decompress(ret_cont, zlib.MAX_WBITS+16)
if self.encoded:
ret_cont = ret_cont.decode('utf-8')
self._text = ret_cont
assert self._text is not None
return self._text
def get_filename(self, renew=False):
"""Get the filename of this content.
If the file name doesn't already exist, we created it as {id}.{format}.
"""
if self._fname is None or renew:
self._fname = '%s.%s' % (self._id, self._format)
return self._fname
def get_filepath(self, renew=False):
"""Get the file path, joining the name and location for this file.
If no location is given, it is assumed to be "here", e.g. ".".
"""
if self._location is None or renew:
self._location = '.'
return path.join(self._location, self.get_filename())
def copy_to(self, location, fname=None):
if fname is None:
fname = self.get_filename()
fpath = path.join(location, fname)
if self.file_exists and not self._raw_content:
shutil.copy(self.get_filepath(), fpath)
else:
with open(fpath, 'w') as f:
f.write(self.get_text())
self._fname = fname
self._location = location
self.file_exists = True
return fpath
class Reader(object):
"""This abstract object defines and some general methods for readers."""
name = NotImplemented
def __init__(self, base_dir=None, n_proc=1, check_content=True,
input_character_limit=CONTENT_CHARACTER_LIMIT,
max_space_ratio=CONTENT_MAX_SPACE_RATIO):
if base_dir is None:
base_dir = 'run_' + self.name.lower()
self.n_proc = n_proc
self.base_dir = _get_dir(base_dir)
tmp_dir = tempfile.mkdtemp(
prefix='%s_job_%s' % (self.name.lower(), _time_stamp()),
dir=self.base_dir
)
self.tmp_dir = tmp_dir
self.input_dir = _get_dir(tmp_dir, 'input')
self.id_maps = {}
self.do_content_check = check_content
self.input_character_limit = input_character_limit
self.max_space_ratio = max_space_ratio
return
def _check_content(self, content_str):
"""Check if the content is likely to be successfully read."""
if self.do_content_check:
space_ratio = float(content_str.count(' '))/len(content_str)
if space_ratio > self.max_space_ratio:
return "space-ratio: %f > %f" % (space_ratio,
self.max_space_ratio)
if len(content_str) > self.input_character_limit:
return "too long: %d > %d" % (len(content_str),
self.input_character_limit)
return None
@classmethod
def get_version(cls):
raise NotImplementedError()
def read(self, read_list, verbose=False, log=False):
"Read a list of items and return a dict of output files."
raise NotImplementedError()
class ReachReader(Reader):
"""This object encodes an interface to the reach reading script."""
REACH_MEM = 5 # GB
MEM_BUFFER = 2 # GB
name = 'REACH'
def __init__(self, *args, **kwargs):
self.exec_path, self.version = self._check_reach_env()
super(ReachReader, self).__init__(*args, **kwargs)
conf_fmt_fname = path.join(path.dirname(__file__),
'util/reach_conf_fmt.txt')
self.conf_file_path = path.join(self.tmp_dir, 'indra.conf')
with open(conf_fmt_fname, 'r') as fmt_file:
fmt = fmt_file.read()
log_level = 'INFO'
# log_level = 'DEBUG' if logger.level == logging.DEBUG else 'INFO'
with open(self.conf_file_path, 'w') as f:
f.write(
fmt.format(tmp_dir=self.tmp_dir, num_cores=self.n_proc,
loglevel=log_level)
)
self.output_dir = _get_dir(self.tmp_dir, 'output')
return
@classmethod
def _join_json_files(cls, prefix, clear=False):
"""Join different REACH output JSON files into a single JSON object.
The output of REACH is broken into three files that need to be joined
before processing. Specifically, there will be three files of the form:
`<prefix>.uaz.<subcategory>.json`.
Parameters
----------
prefix : str
The absolute path up to the extensions that reach will add.
clear : bool
Default False - if True, delete the files as soon as they are
loaded.
Returns
-------
json_obj : dict
The result of joining the files, keyed by the three subcategories.
"""
filetype_list = ['entities', 'events', 'sentences']
json_dict = {}
try:
for filetype in filetype_list:
fname = prefix + '.uaz.' + filetype + '.json'
with open(fname, 'rt') as f:
json_dict[filetype] = json.load(f)
if clear:
remove(fname)
logger.debug("Removed %s." % fname)
except IOError as e:
logger.error(
'Failed to open JSON files for %s; REACH error?' % prefix
)
logger.exception(e)
return None
return json_dict
@staticmethod
def _check_reach_env():
"""Check that the environment supports runnig reach."""
# Get the path to the REACH JAR
path_to_reach = get_config('REACHPATH')
if path_to_reach is None:
path_to_reach = environ.get('REACHPATH', None)
if path_to_reach is None or not path.exists(path_to_reach):
raise ReachError(
'Reach path unset or invalid. Check REACHPATH environment var '
'and/or config file.'
)
logger.debug('Using REACH jar at: %s' % path_to_reach)
# Get the reach version.
reach_version = get_config('REACH_VERSION')
if reach_version is None:
reach_version = environ.get('REACH_VERSION', None)
if reach_version is None:
logger.debug('REACH version not set in REACH_VERSION')
m = re.match('reach-(.*?)\.jar', path.basename(path_to_reach))
reach_version = re.sub('-SNAP.*?$', '', m.groups()[0])
logger.debug('Using REACH version: %s' % reach_version)
return path_to_reach, reach_version
@classmethod
def get_version(cls):
_, version = cls._check_reach_env()
return version
def prep_input(self, read_list):
"""Apply the readers to the content."""
logger.info("Prepping input.")
i = 0
for content in read_list:
# Check the quality of the text, and skip if there are any issues.
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
# Look for things that are more like file names, rather than ids.
cid = content.get_id()
if isinstance(cid, str) and re.match('^\w*?\d+$', cid) is None:
new_id = 'FILE%06d' % i
i += 1
self.id_maps[new_id] = cid
content.change_id(new_id)
new_fpath = content.copy_to(self.input_dir)
else:
# Put the content in the appropriate directory.
new_fpath = content.copy_to(self.input_dir)
logger.debug('%s saved for reading by reach.'
% new_fpath)
return
def get_output(self):
"""Get the output of a reading job as a list of filenames."""
logger.info("Getting outputs.")
# Get the set of prefixes (each will correspond to three json files.)
json_files = glob.glob(path.join(self.output_dir, '*.json'))
json_prefixes = set()
for json_file in json_files:
# Remove .uaz.<subfile type>.json
prefix = '.'.join(path.basename(json_file).split('.')[:-3])
json_prefixes.add(path.join(self.output_dir, prefix))
# Join each set of json files and store the json dict.
reading_data_list = []
for prefix in json_prefixes:
base_prefix = path.basename(prefix)
if base_prefix.isdecimal():
base_prefix = int(base_prefix)
elif base_prefix in self.id_maps.keys():
base_prefix = self.id_maps[base_prefix]
try:
content = self._join_json_files(prefix, clear=True)
except Exception as e:
logger.exception(e)
logger.error("Could not load result for prefix %s." % prefix)
continue
reading_data_list.append(ReadingData(
base_prefix,
self.name,
self.version,
formats.JSON,
content
))
logger.debug('Joined files for prefix %s.' % base_prefix)
return reading_data_list
def clear_input(self):
"""Remove all the input files (at the end of a reading)."""
for item in listdir(self.input_dir):
item_path = path.join(self.input_dir, item)
if path.isfile(item_path):
remove(item_path)
logger.debug('Removed input %s.' % item_path)
return
def read(self, read_list, verbose=False, log=False):
"""Read the content, returning a list of ReadingData objects."""
ret = []
mem_tot = _get_mem_total()
if mem_tot is not None and mem_tot <= self.REACH_MEM + self.MEM_BUFFER:
logger.error(
"Too little memory to run reach. At least %s required." %
(self.REACH_MEM + self.MEM_BUFFER)
)
logger.info("REACH not run.")
elif len(read_list) > 0:
# Prep the content
self.prep_input(read_list)
# Run REACH!
logger.info("Beginning reach.")
args = [
'java',
'-Dconfig.file=%s' % self.conf_file_path,
'-jar', self.exec_path
]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log_file_str = ''
for line in iter(p.stdout.readline, b''):
log_line = 'REACH: ' + line.strip().decode('utf8')
if verbose:
logger.info(log_line)
if log:
log_file_str += log_line + '\n'
if log:
with open('reach_run.log', 'ab') as f:
f.write(log_file_str.encode('utf8'))
p_out, p_err = p.communicate()
if p.returncode:
logger.error('Problem running REACH:')
logger.error('Stdout: %s' % p_out.decode('utf-8'))
logger.error('Stderr: %s' % p_err.decode('utf-8'))
raise ReachError("Problem running REACH")
logger.info("Reach finished.")
ret = self.get_output()
self.clear_input()
return ret
class SparserReader(Reader):
"""This object provides methods to interface with the commandline tool."""
name = 'SPARSER'
def __init__(self, *args, **kwargs):
self.version = self.get_version()
super(SparserReader, self).__init__(*args, **kwargs)
self.file_list = None
return
@classmethod
def get_version(cls):
return sparser.get_version()
def prep_input(self, read_list):
"Prepare the list of files or text content objects to be read."
logger.info('Prepping input for sparser.')
self.file_list = []
for content in read_list:
quality_issue = self._check_content(content.get_text())
if quality_issue is not None:
logger.warning("Skipping %d due to: %s"
% (content.get_id(), quality_issue))
continue
if content.is_format('nxml'):
# If it is already an nxml, we just need to adjust the
# name a bit, if anything.
if not content.get_filename().startswith('PMC'):
content.change_id('PMC' + str(content.get_id()))
fpath = content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
elif content.is_format('txt', 'text'):
# Otherwise we need to frame the content in xml and put it
# in a new file with the appropriate name.
nxml_str = sparser.make_nxml_from_text(content.get_text())
new_content = Content.from_string('PMC' + str(content.get_id()),
'nxml', nxml_str)
fpath = new_content.copy_to(self.tmp_dir)
self.file_list.append(fpath)
else:
raise SparserError("Unrecognized format %s."
% content.format)
return
def get_output(self, output_files, clear=True):
"Get the output files as an id indexed dict."
reading_data_list = []
patt = re.compile(r'(.*?)-semantics.*?')
for outpath in output_files:
if outpath is None:
logger.warning("Found outpath with value None. Skipping.")
continue
re_out = patt.match(path.basename(outpath))
if re_out is None:
raise SparserError("Could not get prefix from output path %s."
% outpath)
prefix = re_out.groups()[0]
if prefix.startswith('PMC'):
prefix = prefix[3:]
if prefix.isdecimal():
# In this case we assume the prefix is a tcid.
prefix = int(prefix)
try:
with open(outpath, 'rt') as f:
content = json.load(f)
except Exception as e:
logger.exception(e)
logger.error("Could not load reading content from %s."
% outpath)
continue
reading_data_list.append(ReadingData(
prefix,
self.name,
self.version,
formats.JSON,
content
))
if clear:
input_path = outpath.replace('-semantics.json', '.nxml')
try:
remove(outpath)
remove(input_path)
except Exception as e:
logger.exception(e)
logger.error("Could not remove sparser files %s and %s."
% (outpath, input_path))
return reading_data_list
def read_one(self, fpath, outbuf=None, verbose=False):
fpath = path.abspath(fpath)
if outbuf is None:
outbuf = BytesIO()
outbuf.write(b'\nReading %s.\n' % fpath.encode('utf8'))
outbuf.flush()
if verbose:
logger.info('Reading %s.' % fpath)
outpath = None
try:
outpath = sparser.run_sparser(fpath, 'json', outbuf, timeout=60)
except Exception as e:
if verbose:
logger.error('Failed to run sparser on %s.' %
fpath)
logger.exception(e)
outbuf.write(b'Reading failed.----------\n')
outbuf.write(str(e).encode('utf-8') + b'\n')
outbuf.write(b'-------------------------\n')
return outpath, outbuf
def read_some(self, fpath_list, outbuf=None, verbose=False):
"Perform a few readings."
outpath_list = []
for fpath in fpath_list:
output, outbuf = self.read_one(fpath, outbuf, verbose)
if output is not None:
outpath_list.append(output)
return outpath_list, outbuf
def read(self, read_list, verbose=False, log=False, n_per_proc=None):
"Perform the actual reading."
ret = []
self.prep_input(read_list)
L = len(self.file_list)
if L > 0:
logger.info("Beginning to run sparser.")
output_file_list = []
if log:
log_name = 'sparser_run_%s.log' % _time_stamp()
outbuf = open(log_name, 'wb')
else:
outbuf = None
try:
if self.n_proc == 1:
for fpath in self.file_list:
outpath, _ = self.read_one(fpath, outbuf, verbose)
if outpath is not None:
output_file_list.append(outpath)
else:
if n_per_proc is None:
n_per_proc = max(1, min(1000, L//self.n_proc//2))
pool = None
try:
pool = Pool(self.n_proc)
if n_per_proc is not 1:
batches = [self.file_list[n*n_per_proc:(n+1)*n_per_proc]
for n in range(L//n_per_proc + 1)]
out_lists_and_buffs = pool.map(self.read_some,
batches)
else:
out_files_and_buffs = pool.map(self.read_one,
self.file_list)
out_lists_and_buffs = [([out_files], buffs)
for out_files, buffs
in out_files_and_buffs]
finally:
if pool is not None:
pool.close()
pool.join()
for i, (out_list, buff) in enumerate(out_lists_and_buffs):
if out_list is not None:
output_file_list += out_list
if log:
outbuf.write(b'Log for producing output %d/%d.\n'
% (i, len(out_lists_and_buffs)))
if buff is not None:
buff.seek(0)
outbuf.write(buff.read() + b'\n')
else:
outbuf.write(b'ERROR: no buffer was None. '
b'No logs available.\n')
outbuf.flush()
finally:
if log:
outbuf.close()
if verbose:
logger.info("Sparser logs may be found at %s." %
log_name)
ret = self.get_output(output_file_list)
return ret
def get_readers():
"""Get all children of the Reader objcet."""
try:
children = Reader.__subclasses__()
except AttributeError:
module = sys.modules[__name__]
children = [cls for cls_name, cls in module.__dict__.items()
if isinstance(cls, type) and issubclass(cls, Reader)
and cls_name != 'Reader']
return children
def get_reader_class(reader_name):
"""Get a particular reader class by name."""
for reader_class in get_readers():
if reader_class.name.lower() == reader_name.lower():
return reader_class
else:
logger.error("No such reader: %s" % reader_name)
return None
def get_reader(reader_name, *args, **kwargs):
"""Get an instantiated reader by name."""
return get_reader_class(reader_name)(*args, **kwargs)
class ReadingData(object):
"""Object to contain the data produced by a reading.
This is primarily designed for use with the database.
Parameters
----------
tcid : int or str
An identifier of the text content that produced the reading. Must
be an int for use with the database.
reader : str
The name of the reader, consistent with it's `name` attribute, for
example: 'REACH'
reader_version : str
A string identifying the version of the underlying nlp reader.
content_format : str
The format of the content. Options are in indra.db.formats.
content : str
The content of the reading result. A string in the format given by
`content_format`.
reading_id : int or None
Optional. The id corresponding to the Readings entry in the db.
"""
def __init__(self, tcid, reader, reader_version, content_format, content,
reading_id=None):
self.reading_id = reading_id
self.tcid = tcid
self.reader = reader
self.reader_version = reader_version
self.format = content_format
self.content = content
self._statements = None
return
@classmethod
def from_db_reading(cls, db_reading):
return cls(db_reading.text_content_id, db_reading.reader,
db_reading.reader_version, db_reading.format,
json.loads(zlib.decompress(db_reading.bytes,
16+zlib.MAX_WBITS)
.decode('utf8')),
db_reading.id)
@staticmethod
def get_cols():
"""Get the columns for the tuple returned by `make_tuple`."""
return ('text_content_id', 'reader', 'reader_version', 'format',
'bytes')
def get_statements(self, reprocess=False):
"""General method to create statements."""
if self._statements is None or reprocess:
logger.debug("Making statements from %s." % self.reading_id)
if self.reader == ReachReader.name:
if self.format == formats.JSON:
# Process the reach json into statements.
json_str = json.dumps(self.content)
processor = reach.process_json_str(json_str)
else:
raise ReadingError("Incorrect format for Reach output: %s."
% self.format)
elif self.reader == SparserReader.name:
if self.format == formats.JSON:
# Process the sparser content into statements
processor = sparser.process_json_dict(self.content)
if processor is not None:
processor.set_statements_pmid(None)
else:
raise ReadingError("Sparser should only ever be JSON, not "
"%s." % self.format)
else:
raise ReadingError("Unknown reader: %s." % self.reader)
if processor is None:
logger.error("Production of statements from %s failed for %s."
% (self.reader, self.tcid))
stmts = []
else:
stmts = processor.statements
self._statements = stmts
else:
logger.debug("Returning %d statements that were already produced "
"from %s." % (len(self._statements), self.reading_id))
stmts = self._statements
return stmts
def zip_content(self):
"""Compress the content, returning bytes."""
if self.format == formats.JSON:
ret = zip_string(json.dumps(self.content))
elif self.format == formats.TEXT:
ret = zip_string(self.content)
else:
raise Exception('Do not know how to zip format %s.' % self.format)
return ret
def make_tuple(self):
"""Make the tuple expected by the database."""
return (self.tcid, self.reader, self.reader_version, self.format,
self.zip_content())
def matches(self, r_entry):
"""Determine if reading data matches the a reading entry from the db.
Returns True if tcid, reader, reader_version match the corresponding
elements of a db.Reading instance, else False.
"""
# Note the temporary fix in clipping the reader version length. This is
# because the version is for some reason clipped in the database.
return (r_entry.text_content_id == self.tcid
and r_entry.reader == self.reader
and r_entry.reader_version == self.reader_version[:20])
| bsd-2-clause | -9,117,507,397,536,953,000 | 36.704918 | 84 | 0.53097 | false |
luotao1/Paddle | python/paddle/fluid/tests/unittests/mkldnn/test_reduce_mkldnn_op.py | 1 | 7454 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
import paddle
@skip_check_grad_ci(reason="not implemented")
class TestReduceSumDefaultONEDNNOp(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'use_mkldnn': self.use_mkldnn}
def test_check_output(self):
self.check_output()
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum4DONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 10, 5, 5)).astype("float32")}
self.attrs = {'use_mkldnn': self.use_mkldnn, 'dim': [2]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum4DReduceAllWithoutReduceAllAttributeONEDNNOp(
TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 10, 5, 5)).astype("float32")}
self.attrs = {'use_mkldnn': self.use_mkldnn, 'dim': [0, 1, 2, 3]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum4DReduceAllWithoutReduceAllAttributeNegativeDimsONEDNNOp(
TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 10, 5, 5)).astype("float32")}
self.attrs = {'use_mkldnn': self.use_mkldnn, 'dim': [-1, -2, -3, -4]}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum5DKeepDimsONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((2, 5, 3, 2, 2)).astype("float32")}
self.attrs = {'dim': (2, 3, 4), 'keep_dim': True, 'use_mkldnn': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum5DReduceAllKeepDimsONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((2, 5, 3, 2, 2)).astype("float32")}
self.attrs = {'reduce_all': True, 'keep_dim': True, 'use_mkldnn': True}
self.outputs = {
'Out': self.inputs['X'].sum(keepdims=self.attrs['keep_dim'])
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceSum4DReduceAllONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_sum"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")}
self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.inputs['X'].sum()}
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMax3DONEDNNOp(TestReduceSumDefaultONEDNNOp):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
self.attrs = {'dim': [-1], 'use_mkldnn': self.use_mkldnn}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(
reason="reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMax4DNegativeAndPositiveDimsONEDNNOp(
TestReduceSumDefaultONEDNNOp):
"""Remove Max with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_max"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 10, 9)).astype("float32")}
self.attrs = {'dim': [-1, 0, 1], 'use_mkldnn': self.use_mkldnn}
self.outputs = {
'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(
reason="reduce_min is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework.")
class TestReduceMin3DONEDNNOp(TestReduceSumDefaultONEDNNOp):
"""Remove Min with subgradient from gradient check to confirm the success of CI."""
def setUp(self):
self.op_type = "reduce_min"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
self.attrs = {'dim': [2], 'use_mkldnn': self.use_mkldnn}
self.outputs = {
'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim']))
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceMean3DONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_mean"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")}
self.attrs = {'dim': [0], 'use_mkldnn': self.use_mkldnn}
self.outputs = {
'Out': self.inputs['X'].sum(axis=0) / self.inputs['X'].shape[0]
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceMean4DReduceAllONEDNNOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_mean"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((5, 6, 8, 10)).astype("float32")}
self.attrs = {'reduce_all': True, 'use_mkldnn': self.use_mkldnn}
self.outputs = {
'Out':
self.inputs['X'].sum() / np.asarray(self.inputs['X'].shape).prod()
}
@skip_check_grad_ci(reason="not implemented")
class TestReduceMeanNoReduce1DOp(TestReduceSumDefaultONEDNNOp):
def setUp(self):
self.op_type = "reduce_mean"
self.use_mkldnn = True
self.inputs = {'X': np.random.random((1)).astype("float32")}
self.attrs = {'use_mkldnn': self.use_mkldnn}
self.outputs = {'Out': self.inputs['X']}
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
| apache-2.0 | 7,532,327,643,227,592,000 | 37.42268 | 87 | 0.629729 | false |
obulpathi/cdn1 | tests/functional/transport/pecan/base.py | 1 | 1278 | # Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo.config import cfg
import webtest
from cdn import bootstrap
from tests.functional import base
class BaseFunctionalTest(base.TestCase):
def setUp(self):
super(BaseFunctionalTest, self).setUp()
tests_path = os.path.abspath(os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(__file__)
))))
conf_path = os.path.join(tests_path, 'etc', 'default_functional.conf')
cfg.CONF(args=[], default_config_files=[conf_path])
cdn_wsgi = bootstrap.Bootstrap(cfg.CONF).transport.app
self.app = webtest.TestApp(cdn_wsgi)
FunctionalTest = BaseFunctionalTest
| apache-2.0 | -4,533,810,800,330,797,600 | 30.170732 | 78 | 0.693271 | false |
google/compare-codecs | lib/libavc.py | 1 | 2828 | # Copyright 2015 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An encoder using the libavc encoder from Android M."""
import encoder
import file_codec
import subprocess
class LibavcCodec(file_codec.FileCodec):
def __init__(self,
name='libavc',
formatter=None):
self.name = name
self.codecname = 'libavc'
super(LibavcCodec, self).__init__(
name,
formatter=(formatter or
encoder.OptionFormatter(prefix='--', infix=' ')))
self.extension = 'avi'
self.option_set = encoder.OptionSet(
# Rate control. 0 = constant QP, 1 = storage, 2 = CBR high delay,
# 3 = CBR low delay
# 2 and 3 seems to drop frames - sometimes, but not always.
# 3 is able to run out of memory.
encoder.Option('rc', ['0', '1']),
)
def StartEncoder(self, context):
return encoder.Encoder(context, encoder.OptionValueSet(self.option_set, ''))
def EncodeCommandLine(self, parameters, bitrate, videofile, encodedfile):
commandline = (
'%(tool)s '
'--width %(width)d --height %(height)d '
'--src_framerate %(framerate)d --tgt_framerate %(framerate)d '
'--input_chroma_format YUV_420P --bitrate %(bitrate)d '
'%(parameters)s '
'--input %(inputfile)s --output %(outputfile)s' % {
'tool': encoder.Tool('avcenc'),
'width': videofile.width,
'height': videofile.height,
'framerate': videofile.framerate,
'bitrate': bitrate * 1000, # Bitrate is in bits/sec, not kilobits.
'parameters': parameters.ToString(),
'inputfile': videofile.filename,
'outputfile': encodedfile}
)
return commandline
def DecodeCommandLine(self, videofile, encodedfile, yuvfile):
commandline = "%s -loglevel warning -codec:v h264 -i %s %s" % (
encoder.Tool('ffmpeg'),
encodedfile, yuvfile)
return commandline
def ResultData(self, encodedfile):
return {'frame': file_codec.FfmpegFrameInfo(encodedfile)}
def EncoderVersion(self):
# libavc doesn't appear to have a built-in version string. Use the
# git checksum instead.
git_hash = subprocess.check_output(
'cd third_party/libavc; git log --format="%h %ad" -1', shell=True)
return 'libavc %s' % git_hash
| apache-2.0 | 6,120,185,066,405,847,000 | 36.706667 | 80 | 0.644625 | false |
hydroshare/hydroshare | hs_core/tests/api/views/test_change_quota_holder.py | 1 | 2171 | import json
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse
from hs_core import hydroshare
from hs_core.views import change_quota_holder
from hs_core.testing import MockIRODSTestCaseMixin, ViewTestCase
from hs_access_control.models import PrivilegeCodes
class TestChangeQuotaHolder(MockIRODSTestCaseMixin, ViewTestCase):
def setUp(self):
super(TestChangeQuotaHolder, self).setUp()
self.hs_group, _ = Group.objects.get_or_create(name='Hydroshare Author')
# create two users
self.user1 = hydroshare.create_account(
'[email protected]',
username='owner1',
first_name='owner1_first_name',
last_name='owner1_last_name',
superuser=False,
groups=[self.hs_group]
)
self.user2 = hydroshare.create_account(
'[email protected]',
username='owner2',
first_name='owner2_first_name',
last_name='owner2_last_name',
superuser=False,
groups=[self.hs_group]
)
self.res = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.user1,
title='My Test Resource'
)
# test to make sure one owner can transfer quota holder to another owner
self.user1.uaccess.share_resource_with_user(self.res, self.user2, PrivilegeCodes.OWNER)
def test_change_quota_holder(self):
# here we are testing the change_quota_holder view function
url_params = {'shortkey': self.res.short_id}
url = reverse('change_quota_holder', kwargs=url_params)
request = self.factory.post(url, data={'new_holder_username': 'owner2'})
request.user = self.user1
self.add_session_to_request(request)
response = change_quota_holder(request, shortkey=self.res.short_id)
response_data = json.loads(response.content.decode())
self.assertTrue(self.res.get_quota_holder() == self.user2)
self.assertEqual(response_data['status'], 'success')
# clean up
hydroshare.delete_resource(self.res.short_id)
| bsd-3-clause | 1,839,330,057,208,989,400 | 37.767857 | 95 | 0.647167 | false |
codelv/enaml-native | src/enamlnative/android/android_card_view.py | 1 | 2026 | """
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on June 7, 2017
@author: jrm
"""
from atom.api import Typed, set_default
from enamlnative.widgets.card_view import ProxyCardView
from .android_frame_layout import AndroidFrameLayout, FrameLayout
from .bridge import JavaMethod
class CardView(FrameLayout):
package = 'androidx.cardview.widget'
__nativeclass__ = set_default('%s.CardView' % package)
setCardBackgroundColor = JavaMethod('android.graphics.Color')
setCardElevation = JavaMethod('float')
setContentPadding = JavaMethod('int', 'int', 'int', 'int')
setMaxCardElevation = JavaMethod('float')
setPreventCornerOverlap = JavaMethod('boolean')
setRadius = JavaMethod('float')
setUseCompatPadding = JavaMethod('boolean')
class AndroidCardView(AndroidFrameLayout, ProxyCardView):
""" An Android implementation of an Enaml ProxyCardView.
"""
#: A reference to the widget created by the proxy.
widget = Typed(CardView)
# -------------------------------------------------------------------------
# Initialization API
# -------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying widget.
"""
d = self.declaration
self.widget = CardView(self.get_context(), None, d.style)
# -------------------------------------------------------------------------
# ProxyCardView API
# -------------------------------------------------------------------------
def set_elevation(self, elevation):
self.widget.setCardElevation(elevation)
def set_radius(self, radius):
self.widget.setRadius(radius)
def set_content_padding(self, padding):
dp = self.dp
l, t, r, b = padding
self.widget.setContentPadding(int(l*dp), int(t*dp),
int(r*dp), int(b*dp))
| mit | 1,165,668,592,682,198,000 | 31.15873 | 79 | 0.572557 | false |
bretthandrews/marvin | python/marvin/tests/utils/test_dap.py | 1 | 1889 | #!/usr/bin/env python3
# encoding: utf-8
#
# test_dap.py
#
# Created by José Sánchez-Gallego on 19 Sep 2016.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import unittest
from marvin import config
from marvin.tests import MarvinTest
from marvin.utils.dap.datamodel import MapsProperty, MapsPropertyList, get_dap_datamodel
class TestMapsProperties(MarvinTest):
@classmethod
def setUpClass(cls):
super(TestMapsProperties, cls).setUpClass()
config.setMPL('MPL-5')
def test_dap_datamodel_mpl4(self):
datamodel = get_dap_datamodel('1.1.1')
self.assertEqual(len(datamodel), 10)
self.assertEqual(datamodel.version, '1.1.1')
self.assertIsInstance(datamodel, MapsPropertyList)
self.assertIsInstance(datamodel[0], MapsProperty)
def test_MapsPropertyList(self):
datamodel = get_dap_datamodel()
self.assertEqual(datamodel.version, '2.0.2')
self.assertTrue('EMLINE_GFLUX' in datamodel)
self.assertFalse('emline_bad' in datamodel)
self.assertIsInstance(datamodel['emline_gflux'], MapsProperty)
self.assertIsInstance(datamodel == 'emline_gflux', MapsProperty)
self.assertIsNone(datamodel == 'emline_bad', MapsProperty)
def test_MapsPropertyList_get(self):
datamodel = get_dap_datamodel()
self.assertIsNone(datamodel.get('badname_badchannel'))
self.assertIsNone(datamodel.get('emline_gflux'))
self.assertIsNone(datamodel.get('emline_gflux_badchannel'))
maps_prop, channel = datamodel.get('emline_gflux_oii_3727')
self.assertIsInstance(maps_prop, MapsProperty)
self.assertEqual(maps_prop.name, 'emline_gflux')
self.assertEqual(channel, 'oii_3727')
if __name__ == '__main__':
verbosity = 2
unittest.main(verbosity=verbosity)
| bsd-3-clause | 6,837,162,416,948,016,000 | 32.105263 | 88 | 0.695284 | false |
epicvrvs/craw_module | script/bncs.py | 1 | 2263 | import nil.string, nil.thread, utility, craw, packets, time, configuration, threading
class bncs_packet_handler_class:
def __init__(self):
self.lock = threading.Lock()
self.whois_name_queue = []
self.whois_handler_queue = []
self.account_map = {}
self.entering_game = False
self.has_thread = False
def process_bytes(self, bytes):
if not self.has_thread:
#print 'Creating whois thread'
nil.thread.create_thread(self.whois_thread)
self.has_thread = True
if packets.entering_game(bytes):
self.entering_game = True
assignment = packets.parse_assignment(bytes)
if assignment != None and self.entering_game:
player_id, character_class, player_name, x, y = assignment
#print 'Assignment: %s' % player_name
self.entering_game = False
self.whois(player_name, None)
def process_packet(self, packet):
if len(packet) >= 2 and packet[0 : 2] == '\xff\x0f':
account = nil.string.extract_string(packet, '(*', ')')
if account == None:
return
handler = None
self.lock.acquire()
if len(self.whois_handler_queue) > 0:
name, handler = self.whois_handler_queue[0]
self.account_map[name] = account
self.whois_handler_queue = self.whois_handler_queue[1 : ]
self.lock.release()
if handler != None:
handler(account)
def whois(self, name, handler):
if name == None:
print 'Received /whois None request'
return
#print 'Running whois on %s: %s' % (name, repr(self.account_map))
if name in self.account_map:
#print '%s was cached' % name
if handler != None:
handler(self.account_map[name])
return
self.lock.acquire()
self.whois_name_queue.append(name)
self.whois_handler_queue.append((name, handler))
self.lock.release()
def whois_thread(self):
while True:
self.lock.acquire()
if len(self.whois_name_queue) > 0:
name = self.whois_name_queue[0]
#print 'Whois thread is processing %s' % name
command = '/whois %s' % name
packet = '\xff\x0e' + utility.pack_number(len(command) + 5, 1) + '\x00' + command + '\x00'
#Missing BNCS connection check?
craw.send_bncs_packet(packet)
self.whois_name_queue = self.whois_name_queue[1 : ]
self.lock.release()
time.sleep(configuration.whois_delay) | gpl-3.0 | 1,690,256,831,581,942,000 | 29.186667 | 94 | 0.660186 | false |
quattor/aquilon | lib/aquilon/worker/commands/update_room.py | 1 | 1367 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2016,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq update room`."""
from aquilon.aqdb.model import Room
from aquilon.worker.broker import BrokerCommand
from aquilon.worker.dbwrappers.location import get_location, update_location
class CommandUpdateRoom(BrokerCommand):
required_parameters = ["room"]
def render(self, session, room, fullname, uri, comments, floor, user,
justification, reason, logger, **arguments):
dbroom = Room.get_unique(session, room, compel=True)
if floor is not None:
dbroom.floor = floor
update_location(dbroom, fullname=fullname, comments=comments,
uri=uri) | apache-2.0 | 2,823,669,900,772,170,000 | 38.085714 | 76 | 0.716898 | false |
Open-Power-System-Data/conventional_power_plants | download_and_process_DE_functions.py | 1 | 14757 | # -*- coding: utf-8 -*-
import urllib.parse
import urllib.request
import posixpath
import datetime
import os
import logging
import filecmp
import difflib
import json
import sqlite3
import hashlib
import yaml
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#from bokeh.io import output_notebook
# output_notebook()
# Logging Setup
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d %b %Y %H:%M:%S')
logger = logging.getLogger()
# create download and output folders if they do not exist
os.makedirs(os.path.join('download'), exist_ok=True)
os.makedirs(os.path.join('output'), exist_ok=True)
os.makedirs(os.path.join('output', 'original_data'), exist_ok=True)
def get_sha_hash(path, blocksize=65536):
sha_hasher = hashlib.sha256()
with open(path, 'rb') as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
sha_hasher.update(buffer)
buffer = f.read(blocksize)
return sha_hasher.hexdigest()
def downloadandcache(url):
"""
Download a file into a folder called "downloads".
The file is prefixed with the download date YYYY-M-D-.
Returns the local filepath.
Parameters
----------
url : str
Url of a file to be downloaded
"""
path = urllib.parse.urlsplit(url).path
filename = posixpath.basename(path)
now = datetime.datetime.now()
datestring = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
filepath = os.path.join('download', datestring + "-" + filename)
filepath_original_data = os.path.join('output',
'original_data',
filename)
# check if file exists, otherwise download it
if not os.path.exists(filepath):
logger.info('Downloading file %s', filename)
urllib.request.urlretrieve(url, filepath)
urllib.request.urlretrieve(url, filepath_original_data)
else:
logger.info('Using local file from %s', filepath)
foldername = 'download'
return foldername, datestring, filename
def decrementday(year, month, day):
"""
Given values for year, month, and day, the values of the previous day are
returned. At the moment, the function assumes that every month has 31 days,
so that it will return February 31st when given values for March 1.
Parameters
----------
year : integer
Integer year
month : integer
Integer month
day : integer
Integer day
"""
if day > 1:
day = day - 1
else:
day = 31
if month > 1:
month = month - 1
else:
month = 12
year = year - 1
return year, month, day
def getolderfilenameandcleanup(foldername, datestring, filename):
"""
Given a set of foldername and filename as returned by
the downloadandcache function, an older non-identical file with
the same file structure is searched in the folder. Files identical to the
one given are deleted.
Parameters
----------
foldername : str
folder where files are located
datestring : str
string of original file date YYYY-M-D
filename : str
filename of original file
"""
originalfilepath = os.path.join(foldername, datestring + "-" + filename)
now = datetime.datetime.now()
year = now.year
month = now.month
day = now.day
# loop through older possible files
i = 0
while i < 2000:
i = i + 1
year, month, day = decrementday(year, month, day)
datestring = str(year) + "-" + str(month) + "-" + str(day)
filepath = os.path.join(foldername, datestring + "-" + filename)
# Does the file exist?
if os.path.isfile(filepath):
# Check if file is identical to original file. If yes delete this
# file and continue
if filecmp.cmp(originalfilepath, filepath):
# print('files are identical, deleting', filepath)
os.remove(filepath)
else:
# print('files are not identical:', filepath)
return filepath
raise ValueError('no older file found')
def getmatchinglist():
"""
This function returns the matchinglist located under
/input/matching_bnetza_uba.csv
Parameters
----------
none
"""
# read matching list
result = pd.read_csv(
os.path.join('input/data/DE', 'matching_bnetza_uba.csv'),
skiprows=0,
sep=',', # CSV field separator, default is ','
thousands=',', # Thousands separator, default is ','
decimal='.', # Decimal separator, default is '.')
encoding='cp1252')
result['uba_id_string'] = (result['uba_match_name'] + '_'
+ result['uba_match_fuel'])
return result
def getbnetzalist(url_bnetza, previous=False):
"""
This function returns the dataframe of the plantlist by the
Bundesnetzagentur. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_bnetza : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_bnetza)
if not previous:
plantlist = pd.read_csv(os.path.join(foldername, datestring + "-" + filename),
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_csv(oldfilename,
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return oldplantlist
def getubalist(url_uba, previous=False):
"""
This function returns the dataframe of the plantlist by the
Umweltbundesamt. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_uba : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_uba)
if not previous:
plantlist = pd.read_excel(os.path.join(foldername, datestring + "-" + filename), skiprows=9)
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_excel(oldfilename, skiprows=9)
return oldplantlist
def getlistdifferences(oldplantlist, newplantlist):
"""
This function returns the difference between two plantlists, and only takes
into account the columns specified within the function.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
oldplantlist['source'] = 'old'
newplantlist['source'] = 'new'
comparisonplantlist = pd.concat([oldplantlist, newplantlist])
# Only include some columns in comparison
includecolumns = ['Kraftwerksnummer Bundesnetzagentur',
'Kraftwerksname',
'Blockname',
'Kraftwerksname / Standort',
'Kraftwerksstandort',
'Primärenergieträger',
]
cols = [col for col in comparisonplantlist.columns if col in includecolumns]
comparisonplantlist = comparisonplantlist.drop_duplicates(keep=False, subset=cols)
# Sort by first column
comparisonplantlist = comparisonplantlist.sort_values(comparisonplantlist.columns[0], ascending=True)
return comparisonplantlist
def matchinglistcheck(url_bnetza, url_uba):
"""
This function checks the BNetzA and UBA plantlists against the
matchinglist and prints out errors. For entries form the UBA Plantlist a
suggestion for correction with the closest possible match is printed.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
logger.info('Starting Matchinglistcheck')
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_uba['uba_id_string'] = (plantlist_uba['Kraftwerksname / Standort']
+ '_' + plantlist_uba['Primärenergieträger'])
# print(plantlist_uba.uba_id_string)
matchinglist.rename(columns={'ID BNetzA': 'bnetza_id'}, inplace=True)
uba_entrylist = [x for x in plantlist_uba.uba_id_string.tolist() if str(x) != 'nan']
errorfound = False
for entry in matchinglist.index:
# print(entry, matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
bnetza_entries = plantlist_bnetza.loc[(plantlist_bnetza['Kraftwerksnummer Bundesnetzagentur'] == matchinglist.loc[entry].bnetza_id)]
# print(entry, len(bnetza_entries))
if len(bnetza_entries) == 0:
logger.error('Entry not in Bnetzalist:', matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
errorfound = True
uba_entries = plantlist_uba.loc[(plantlist_uba['uba_id_string'] == matchinglist.loc[entry].uba_id_string)]
# print(entry, len(uba_entries))
if len(uba_entries) == 0:
alternatives = difflib.get_close_matches(matchinglist.loc[entry].uba_id_string, uba_entrylist, n=3, cutoff=0.6)
logger.error('Not in ubalist: ' + matchinglist.loc[entry].uba_id_string + ' ' + matchinglist.loc[entry].bnetza_id + ' Possible alternatives: ' + ', '.join(alternatives))
# raise ValueError('Value in Ubalist missing')
errorfound = True
if errorfound == False:
logger.info('No obvious errors in Matchinglist check found')
else:
logger.error('Errors in Matchinglist exist')
def potentialmatching(url_bnetza, url_uba):
"""
This function looks for power plants form the UBA list not contained in the
matching lists. It looks up possible matches based on name similarity.
It returns a list of tuples with the plants name of the UBA List, augmented
with possible matches.
Parameters
----------
url_bnetza : string
Link to BNetzA List
url_uba: string
Link to UBA List
"""
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_bnetza.rename(columns={'Kraftwerksnummer Bundesnetzagentur':'id'}, inplace=True)
plantlist_bnetza_reduced = plantlist_bnetza[plantlist_bnetza['id'].isin(matchinglist['ID BNetzA']) == False]
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Solare Strahlungsenergie']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Onshore-Anlage)']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Offshore-Anlage)']
plantlist_bnetza_reduced['name_and_block'] = plantlist_bnetza_reduced['Kraftwerksname'] + ' ' + plantlist_bnetza_reduced['Blockname'] + '_' + plantlist_bnetza_reduced['Energieträger']
plantlist_uba.rename(columns={'Kraftwerksname / Standort' : 'name',
'Primärenergieträger': 'fuel',
'Anlagenart': 'type'}, inplace=True)
# print(plantlist_uba.columns)
plantlist_uba['uba_id_string'] = (plantlist_uba['name']
+ '_' + plantlist_uba['fuel'])
# Reduce uba list
plantlist_uba_reduced = plantlist_uba[plantlist_uba['uba_id_string'].isin(matchinglist['uba_id_string']) == False]
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'WEA']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'PV']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'].isnull() == False]
possiblematcheslist = []
for entry in plantlist_uba_reduced.index:
# print(entry)
moin = str(plantlist_uba_reduced.loc[entry].uba_id_string)
moin2 = [x for x in plantlist_bnetza_reduced.name_and_block.tolist() if str(x) != 'nan']# plantlist_bnetza_reduced['name_and_block'].tolist()
# print(moin)
# print(moin2)
possiblealternative = difflib.get_close_matches(moin, moin2, n=2, cutoff=0.3)
# print(moin, possiblealternative)
logger.info('Plant ' + moin + ' not in Matchinglist. Possible Matches from BNetzA List: ' + str(possiblealternative))
possiblematcheslist.append((moin, possiblealternative))
# return possiblematcheslist
return plantlist_bnetza_reduced
# Testing this file
if __name__ == "__main__":
# BNetzA Power plant list
url_bnetza = ('http://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/'
'Sachgebiete/Energie/Unternehmen_Institutionen/Versorgungssicherheit/'
'Erzeugungskapazitaeten/Kraftwerksliste/Kraftwerksliste_CSV.csv'
'?__blob=publicationFile&v=10')
# UBA Power plant list
url_uba = ('https://www.umweltbundesamt.de/sites/default/files/medien/372/dokumente/kraftwerke-de-ab-100-mw.xls')
matchinglist = getmatchinglist()
plantlist_bnetza = getbnetzalist(url_bnetza, previous=False)
# plantlist_bnetza_previous = getbnetzalist(url_bnetza, previous=True)
# plantlist_bnetza_differences = getlistdifferences(plantlist_bnetza_previous, plantlist_bnetza)
# plantlist_uba = getubalist(url_uba, previous=False)
# plantlist_uba_previous = getubalist(url_uba, previous=True)
# plantlist_uba_differences = getlistdifferences(plantlist_uba_previous, plantlist_uba)
matchinglistcheck(url_bnetza, url_uba)
res = potentialmatching(url_bnetza, url_uba)
| mit | -2,219,552,584,659,425,300 | 36.812821 | 187 | 0.636604 | false |
mbreese/tabql | tabql/__init__.py | 1 | 4777 | # Copyright (c) 2014, Marcus Breese
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# * Neither the names of the authors nor contributors may not be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import sqlite3
import tempfile
import tab
class TabQL(object):
def __init__(self, fnames, dbfname=None, noheader=False, headercomment=False, tmpdir=None, verbose=False):
self.fnames = fnames
self.noheader = noheader
self.headercomment = headercomment
self.verbose = verbose
if tmpdir == None:
if 'TMPDIR' in os.environ:
self.tmpdir = os.environ['TMPDIR']
elif 'TMP' in os.environ:
self.tmpdir = os.environ['TMP']
else:
self.tmpdir = '/tmp'
else:
self.tmpdir = tmpdir
if dbfname:
self.dbfname = dbfname
self._istmpdb = False
else:
tmp = tempfile.NamedTemporaryFile(prefix='.tmp', suffix='.db', dir=tmpdir)
self.dbfname = tmp.name
tmp.close()
self._istmpdb = True
self.__log('Using SQLite database: %s' % self.dbfname)
self.conn = sqlite3.connect(self.dbfname)
self.__setup()
def __log(self, msg):
if self.verbose:
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
def __setup(self):
for i, (file_type, tablename, fname) in enumerate(self.fnames):
self.__log('Importing table %s from %s' % (tablename, fname))
if file_type == '-tab':
reader = tab.TabReader(fname, noheader=self.noheader, headercomment=self.headercomment)
coldefs = ["'%s' %s" % (x,y) for x,y in zip(reader.headers, reader.coltypes)]
schema = 'CREATE TABLE %s (%s);' % (tablename, ','.join(coldefs))
if self.verbose:
sys.stderr.write('%s\n' % schema)
self.conn.execute(schema)
self.conn.commit()
buffer = []
sql = 'INSERT INTO %s (%s) VALUES (%s)' % (tablename, ','.join(["'%s'" % x for x in reader.headers]), ','.join(['?',] * len(reader.headers)))
i=0
for cols in reader.get_values():
i += 1
buffer.append(cols)
if len(buffer) > 1000:
self.conn.executemany(sql, buffer)
self.conn.commit()
buffer = []
if buffer:
self.conn.executemany(sql, buffer)
self.conn.commit()
self.__log('%s rows imported' % i)
def close(self):
if self._istmpdb:
self.__log('Removing SQLite database: %s' % self.dbfname)
os.unlink(self.dbfname)
def execute(self, query, args=()):
if not self.conn:
self.conn = sqlite3.connect(self.dbfname)
c = self.conn.cursor()
self.__log('Query: %s' % query)
try:
colnames = None
for row in c.execute(query, args):
if not colnames:
colnames = [x[0] for x in c.description]
yield (colnames, row)
except sqlite3.OperationalError, e:
sys.stderr.write('SQL Error: %s\n' % e.message)
return
c.close()
self.conn.close()
| bsd-3-clause | 3,301,811,955,980,845,600 | 35.465649 | 157 | 0.589491 | false |
bwsw/moebius | examples/handlers.py | 1 | 2035 | import time
import json
import random
from moebius.utils import sleep_async
from moebius.errors import \
ConnectionSendError, \
HandlerProcessingError
from moebius.server import Handler
class StartHandler(Handler):
@staticmethod
def run(client, data):
for i in range(10):
try:
print 'send reply %d' % i
client.send('hey hoy %d' % i)
time.sleep(1)
except ConnectionSendError:
print 'send error'
break
except Exception, e:
raise HandlerProcessingError(e)
class StopHandler(Handler):
@staticmethod
def run(client, data):
print 'stop'
pass
class TestHandler(Handler):
@staticmethod
def run(client, data):
d = json.loads(data)
for i in range(3):
print "%d: Got %s from [%s]" % (i, d['message'], client.id)
yield sleep_async(1)
class ReplyHandler(Handler):
@staticmethod
def run(client, data):
random.seed()
yield sleep_async(random.randint(0, 3))
client.send('Reply to %s' % client.id)
class ReplyHandlerErr(Handler):
@staticmethod
def run(client, data):
random.seed()
yield sleep_async(random.randint(0, 3))
client.send('Reply to %s' % client.id)
class ReplyHandlerErr2(Handler):
@staticmethod
def run(client, data):
ggr
random.seed()
client.send('Reply to %s' % client.id)
class ReplyHandler2(Handler):
@staticmethod
def run(client, data):
random.seed()
yield sleep_async(5 + random.randint(0, 5))
client.send('Reply to %s' % client.id)
class ReplyHandler3(Handler):
@staticmethod
def run(client, data):
random.seed()
yield sleep_async(3 + random.randint(0, 5))
client.send('Reply to %s' % client.id)
class ReplyHandlerEchoNoWait(Handler):
@staticmethod
def run(client, data):
client.send('Reply to %s' % client.id)
| apache-2.0 | 336,725,424,554,225,000 | 22.941176 | 71 | 0.594103 | false |
nortorious-flame89/Movie_Website | entertainment_center.py | 1 | 2595 | import media
import fresh_tomatoes
# Instances from the base class 'Movie' __init__ function
# Google URL shortner was used to make the URLs more manageable
# and follow PEP8 Style Guide
iron_giant = media.Movie("The Iron Giant",
"Boy befriends a giant robot during the Cold War",
"https://goo.gl/uuUvgf",
"https://goo.gl/4ipRly")
balto = media.Movie("Balto",
"Wolfdog braves the snow to save the children of Nome",
"https://goo.gl/u12LEZ",
"https://goo.gl/jYmxor")
twlv_angry_men = media.Movie("12 Angry Men",
"Twelve white jurors decide a black boys' fate",
"https://goo.gl/h4eZhW",
"https://goo.gl/7btrww")
big_lebowski = media.Movie("The Big Lebowski",
"A californian stoner is subjected to a series of unfortunate events",
"https://goo.gl/YCqBbd",
"https://goo.gl/PVKR1Q")
v_for_vendetta = media.Movie("V for Vendetta",
"A vigilante seeks to overthrow the totalitarian British government",
"https://goo.gl/ZzDwxa",
"https://goo.gl/QvsCKW")
copying_beethoven = media.Movie("Copying Beethoven",
"A female copy writer becomes an understudy to Ludwid Van Beethoven",
"https://goo.gl/2iVK4z",
"https://goo.gl/tAK5Rr")
ben_hur = media.Movie("Ben Hur",
"Jewish man overthrows the Roman Empire to rescue his wife and mother",
"https://goo.gl/QTUcWp",
"https://goo.gl/uSJKyc")
gladiator = media.Movie("Gladiator",
"Fallen general becomes a gladiator seeking to overthrow the illegitimate Caesar",
"https://goo.gl/JmT1Uy",
"https://goo.gl/9IGyCg")
jungle_book = media.Movie("The Jungle Book",
"Mowgli goes on an adventure to defeat Shere Khan",
"https://goo.gl/V0b3P7",
"https://goo.gl/JenB1g")
# fresh_tomatoes file reads this list and outputs a webpage
movie_l = [iron_giant, balto, twlv_angry_men, big_lebowski, v_for_vendetta,
copying_beethoven, ben_hur, gladiator, jungle_book]
# Opens the movie webpage with the above movies
fresh_tomatoes.open_movies_page(movie_l)
| mit | 5,438,155,041,154,323,000 | 41.540984 | 106 | 0.532563 | false |
HybridF5/jacket | jacket/compute/conf/conductor.py | 1 | 1915 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
conductor_group = cfg.OptGroup(
'conductor',
title='Conductor Options')
use_local = cfg.BoolOpt(
'use_local',
default=True,
help='DEPRECATED: Perform compute-conductor operations locally. '
'This legacy mode was introduced to bridge a gap during '
'the transition to the conductor service. It no longer '
'represents a reasonable alternative for deployers. '
'Removal may be as early as 14.0',
deprecated_for_removal=True)
topic = cfg.StrOpt(
'topic',
default='conductor',
help='The topic on which conductor nodes listen')
manager = cfg.StrOpt(
'manager',
default='jacket.compute.conductor.manager.ConductorManager',
help=('DEPRECATED: Full class name for the Manager for conductor. '
'Removal in 14.0'),
deprecated_for_removal=True)
workers = cfg.IntOpt(
'workers',
help='Number of workers for OpenStack Conductor service. '
'The default will be the number of CPUs available.')
ALL_OPTS = [
use_local,
topic,
manager,
workers]
def register_opts(conf):
conf.register_group(conductor_group)
conf.register_opts(ALL_OPTS, group=conductor_group)
def list_opts():
return {conductor_group: ALL_OPTS}
| apache-2.0 | -8,136,491,807,294,262,000 | 29.887097 | 78 | 0.69295 | false |
orlenko/sfpirg | sfpirgapp/migrations/0002_auto__add_field_testimonial_user.py | 1 | 12798 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Testimonial.user'
db.add_column(u'sfpirgapp_testimonial', 'user',
self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['auth.User']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Testimonial.user'
db.delete_column(u'sfpirgapp_testimonial', 'user_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'sfpirgapp.actiongroup': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ActionGroup'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
u'sfpirgapp.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
u'sfpirgapp.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'photo': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'sfpirgapp.testimonial': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Testimonial'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'theme_color': ('django.db.models.fields.CharField', [], {'default': "'grey'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['sfpirgapp'] | bsd-2-clause | 6,183,903,364,456,007,000 | 78.496894 | 187 | 0.54696 | false |
dustin/py-backpack | cgi/wapsupport.py | 1 | 1430 | #!/usr/bin/env /usr/local/bin/python
"""
WML support.
Copyright (c) 2005 Dustin Sallings <[email protected]>
"""
import sys
import cgi
import ConfigParser
import backpack
CONFIG_FILE="/usr/local/etc/backpack.conf"
# Get the config loaded
conf=ConfigParser.ConfigParser()
conf.read(CONFIG_FILE)
HEADER="""<?xml version="1.0"?>
<!DOCTYPE wml PUBLIC
"-//WAPFORUM//DTD WML 1.1//EN" "http://www.wapforum.org/DTD/wml_1.1.xml">
"""
def sendContent(data):
"""Send the content as wml"""
sys.stdout.write("Content-type: text/vnd.wap.wml\n")
toSend=HEADER + data
sys.stdout.write("Content-length: %d\n\n" % len(toSend))
sys.stdout.write(toSend)
def wml(s):
"""Wrap the contents in wml tags."""
return "<wml>%s</wml>" % (s,)
def card(id, title, s):
"""Build a card by ID."""
return """<card id="%(id)s" title="%(title)s"><p>%(s)s</p></card>""" % \
{'id': id, 'title': title, 's': s}
def handleException(tvt):
"""Print out any exception that may occur."""
type, value, tb = tvt
sendContent(wml(card("error", "Error",
"<b>Got an error:</b><br/> %s" % (value,))))
def doCallback(funcs):
"""Execute the action."""
fs=cgi.FieldStorage()
bp=backpack.Backpack(conf.get("backpack", "url"),
conf.get("backpack", "key"))
action=funcs[fs.getvalue("action", "list")]
try:
action(bp, fs)
except:
handleException(sys.exc_info())
| mit | 2,400,140,651,592,449,000 | 23.237288 | 76 | 0.611189 | false |
EvenStrangest/tensorflow | tensorflow/examples/tutorials/mnist/mnist_with_summaries.py | 1 | 7491 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer.')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_float('dropout', 0.9, 'Keep probability for training dropout.')
flags.DEFINE_string('data_dir', '/tmp/data', 'Directory for storing data')
flags.DEFINE_string('summaries_dir', '/tmp/mnist_logs', 'Summaries directory')
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
one_hot=True,
fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession()
# Create a multilayer model.
# Input placehoolders
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.image_summary('input', image_shaped_input, 10)
# We can't initialize these variables to 0 - the network will get stuck.
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights, layer_name + '/weights')
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.histogram_summary(layer_name + '/pre_activations', preactivate)
activations = act(preactivate, 'activation')
tf.histogram_summary(layer_name + '/activations', activations)
return activations
hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.scalar_summary('dropout_keep_probability', keep_prob)
dropped = tf.nn.dropout(hidden1, keep_prob)
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.nn.softmax)
with tf.name_scope('cross_entropy'):
diff = y_ * tf.log(y)
with tf.name_scope('total'):
cross_entropy = -tf.reduce_mean(diff)
tf.scalar_summary('cross entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', accuracy)
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',
sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
tf.initialize_all_variables().run()
# Train the model, and also write summaries.
# Every 10th step, measure test-set accuracy, and write test summaries
# All other steps, run train_step on training data, & add training summaries
def feed_dict(train):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train or FLAGS.fake_data:
xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
xs, ys = mnist.test.images, mnist.test.labels
k = 1.0
return {x: xs, y_: ys, keep_prob: k}
for i in range(FLAGS.max_steps):
if i % 10 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
train_writer.add_summary(summary, i)
def main(_):
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
train()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 | 1,014,393,407,829,823,700 | 39.934426 | 80 | 0.661327 | false |
robocomp/robocomp | tools/rcreplay/cloud.py | 1 | 2670 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 by RoboLab - University of Extremadura
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import Ice, threading
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import math
import os
import RoboCompCloudPrimitives
global RoboCompCloudPrimitives
replay_plugin_identifier = 'cloudprimitives'
def getReplayClass():
return CloudPrimitivesI()
def getRecordClass(proxy):
return CloudPrimitivesRecorder(proxy)
def getGraphicalUserInterface():
return CloudPrimitivesGUI()
class CloudPrimitivesGUI(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self,parent)
self.show()
self.measure = None
self.configuration = None
def getSize(self):
return QSize(500, 500)
def setConfiguration(self, configuration):
self.configuration = configuration
def setMeasure(self, measure):
self.measure = measure
def paintEvent(self, event):
if self.measure:
self.painter = QPainter(self)
self.painter = None
class CloudPrimitivesI(RoboCompCloudPrimitives.CloudPrimitives):
def __init__(self):
self.measure = None
self.configuration = None
def setConfiguration(self, configuration):
self.configuration = configuration
def setMeasure(self, measure):
self.measure = measure
def getMeasure(self):
return self.measure
def getPatches(self, current = None):
return self.measure[1] # Patches
def getData(self, current = None):
return self.measure[0] # Data
class CloudPrimitivesRecorder:
def __init__(self, proxy):
global RoboCompCloudPrimitives
self.proxy = RoboCompCloudPrimitives.CloudPrimitivesPrx.checkedCast(proxy)
self.numMeasure = 0
def getConfiguration(self):
return True
def getMeasure(self):
print 'Reading measure'
self.numMeasure = self.numMeasure + 1
self.measure = [self.proxy.getData(), self.proxy.getPatches()]
#print ('Measures read', self.numMeasure)
#os.system('mplayer /home/robolab/beep.wav')
return self.measure
def measure(self):
return self.measure
| gpl-3.0 | -6,527,788,322,742,969,000 | 26.8125 | 76 | 0.746067 | false |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py | 1 | 2628 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| mit | -667,427,817,523,286,500 | 30.285714 | 99 | 0.711187 | false |
s3ishr/nlp100knock | chapter1/knock00.py | 1 | 1274 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 00. 文字列の逆順
# 文字列 "stressed" の文字を逆に(末尾から先頭に向かって)並べた文字列を得よ.
text = "stressed"
answer = "desserts"
# Case 1
##########
case1 = text[::-1] # str[start:end:step] : start から end まで step 毎の文字列を取得
# text[1:7:2] => "tes" (1 から 7 まで 2 文字毎)
# text[:-1:2] => "srse" (最初から後ろの 2 文字目まで 2 文字毎)
# text[::-1] => 最初から最後まで -1 文字ずつ
assert case1 == answer
print("Case 1: '" + case1 + "'")
# => "Case 1: 'desserts'"
# Case 2
##########
lst = list(text) # => ['s', 't', 'r', 'e', 's', 's', 'e', 'd']
lst.reverse() # => ['d', 'e', 's', 's', 'e', 'r', 't', 's']
case2 = ''.join(lst) # => "desserts" ( '' で文字列を連結)
assert case2 == answer
print("Case 2: '" + case2 + "'")
# => "Case 2: 'desserts'"
# Case 3
##########
lst = list(text) # => ['s', 't', 'r', 'e', 's', 's', 'e', 'd']
rvs = reversed(lst) # => ['d', 'e', 's', 's', 'e', 'r', 't', 's']
case3 = ''.join(rvs) # => "desserts"
assert case3 == answer
print("Case 3: '" + case3 + "'")
# => "Case 3: 'desserts'"
| mit | -3,072,670,048,608,120,300 | 25.682927 | 73 | 0.443327 | false |
vorband/ewsposter | moduls/einit.py | 1 | 10609 | #!/usr/bin/env python
import ConfigParser
import socket
import argparse
import os
import sys
from moduls.elog import logme
from moduls.etoolbox import readcfg, readonecfg, getOwnExternalIP
def ecfg(name,version):
MODUL = "EINIT"
ECFG= {}
parser = argparse.ArgumentParser()
parser.add_argument("-c","--configpath", help="Load configuration file from Path")
parser.add_argument("-v","--verbose", help="set output verbosity",action="store_true")
parser.add_argument("-d","--debug", help="set output debug",action="store_true")
parser.add_argument("-l","--loop", help="Go in endless loop. Set {xx} for seconds to wait for next loop", type=int, default=0, action="store")
parser.add_argument("-m","--modul", help="only send alerts for this modul", choices=['glastopfv3','glastopfv2','kippo','dionaea','honeytrap','rdpdetect','emobility', 'conpot', 'cowrie',
'elasticpot', 'suricata', 'rdpy', 'mailoney', 'vnclowpot', 'heralding', 'ciscoasa', 'tanner', 'glutton'],action="store")
parser.add_argument("-s","--silent", help="silent mode without output",action="store_true")
parser.add_argument("-i","--ignorecert", help="ignore certificate warnings",action="store_true")
parser.add_argument("-S","--sendonly", help="only send unsend alerts",action="store_true")
parser.add_argument("-E","--ewsonly", help="only generate ews alerts files",action="store_true")
parser.add_argument("-dcr","--daycounter", help="reset and log daycounters for all honeypots",action="store_true")
parser.add_argument("-j","--jsonpath", help="Write JSON output file to path")
parser.add_argument("-L","--sendlimit", help="Set {xxx} for max alerts will send in one session", type=int, action="store")
parser.add_argument("-V","--version", help="show the EWS Poster Version",action="version", version=name + " " + version)
args = parser.parse_args()
if args.sendlimit:
ECFG["sendlimit2"] = args.sendlimit
else:
ECFG["sendlimit2"] = ""
if args.loop:
ECFG["a.loop"] = args.loop
else:
ECFG["a.loop"] = 0
if args.verbose:
ECFG["a.verbose"] = True
else:
ECFG["a.verbose"] = False
if args.debug:
ECFG["a.debug"] = True
else:
ECFG["a.debug"] = False
if args.ignorecert:
ECFG["a.ignorecert"] = True
else:
ECFG["a.ignorecert"] = False
if args.silent:
ECFG["a.silent"] = True
else:
ECFG["a.silent"] = False
if args.daycounter:
ECFG["a.daycounter"] = True
else:
ECFG["a.daycounter"] = False
if args.sendonly:
ECFG["a.sendonly"] = True
else:
ECFG["a.sendonly"] = False
if args.ewsonly:
ECFG["a.ewsonly"] = True
else:
ECFG["a.ewsonly"] = False
if args.configpath:
ECFG["path2"] = args.configpath
if os.path.isdir(args.configpath) is not True:
logme(MODUL,"ConfigPath %s did not exist. Abort !" % (args.configpath),("P1","EXIT"),ECFG)
else:
ECFG["path2"] = ""
if args.modul and args.modul in [
'glastopfv3',
'glastopfv2',
'kippo',
'dionaea',
'honeytrap',
'rdpdetect',
'emobility',
'conpot',
'cowrie',
'elasticpot',
'suricata',
'rdpy',
'mailoney',
'vnclowpot',
'heralding',
'ciscoasa',
'tanner',
'glutton'
]:
ECFG["a.modul"] = args.modul
else:
ECFG["a.modul"] = ""
if args.jsonpath:
ECFG["a.jsondir"] = args.jsonpath
if os.path.isdir(args.jsonpath) is not True:
logme(MODUL,"JsonPath %s did not exist. Abort !" % (args.jsonpath),("P1","EXIT"),ECFG)
else:
ECFG["a.jsondir"] = ""
# say hello
logme(MODUL,name + " " + version + " (c) by Markus Schroer <[email protected]>\n",("P0"),ECFG)
# read EWSPoster Main Path
ECFG["path"] = os.path.dirname(os.path.abspath(__file__)).replace("/moduls","")
if ECFG["path2"] == "":
ECFG["path2"] = ECFG["path"]
if os.path.isfile(ECFG["path2"] + os.sep + "ews.cfg" ) is False:
logme(MODUL,"Missing EWS Config %s. Abort !"%(ECFG["path2"] + os.sep + "ews.cfg"),("P1","EXIT"),ECFG)
else:
ECFG["cfgfile"] = ECFG["path2"] + os.sep + "ews.cfg"
# Create IDX File if not exist
if os.path.isfile(ECFG["path"] + os.sep + "ews.idx" ) is False:
os.open(ECFG["path"] + os.sep + "ews.idx", os.O_RDWR|os.O_CREAT )
logme(MODUL,"Create ews.idx counterfile",("P1"),ECFG)
# Read Main Config Parameter
ITEMS = ("homedir","spooldir","logdir","contact","del_malware_after_send","send_malware","sendlimit")
MCFG = readcfg("MAIN",ITEMS, ECFG["cfgfile"])
# IP Handling
# try to determine the external IP
MCFG["ip"]=getOwnExternalIP(ECFG)
if not MCFG["ip"]:
logme(MODUL,"External IP address cannot be determined. Set external IP in ews.cfg, ews.ip or env variable MY_EXTIP or allow external api request.. Abort !",("P1","EXIT"),ECFG)
logme(MODUL, "Using external IP address " + str(MCFG["ip"]), ("P1", "Log"), ECFG)
# sendlimit expect
if ECFG["sendlimit2"] != "":
MCFG["sendlimit"] = ECFG["sendlimit2"]
if int(MCFG["sendlimit"]) > 500:
logme(MODUL,"Error Sendlimit " + str(MCFG["sendlimit"]) + " to high. Max 500 ! ",("P1","EXIT"),ECFG)
elif int(MCFG["sendlimit"]) < 1:
logme(MODUL,"Error Sendlimit " + str(MCFG["sendlimit"]) + " to low. Min 1 ! ",("P1","EXIT"),ECFG)
elif MCFG["sendlimit"] == "NULL" or str(MCFG["sendlimit"]) == "UNKNOW":
logme(MODUL,"Error Sendlimit " + str(MCFG["sendlimit"]) + " Must set between 1 and 500. ",("P1","EXIT"),ECFG)
# send_malware ?
if MCFG["send_malware"].lower() == "true":
MCFG["send_malware"] = True
else:
MCFG["send_malware"] = False
# del_malware_after_send ?
if MCFG["del_malware_after_send"].lower() == "true":
MCFG["del_malware_after_send"] = True
else:
MCFG["del_malware_after_send"] = False
# home dir available ?
if os.path.isdir(MCFG["homedir"]) is not True:
logme(MODUL,"Error missing homedir " + MCFG["homedir"] + " Abort !",("P1","EXIT"),ECFG)
else:
os.chdir(MCFG["homedir"])
# spool dir available ?
if os.path.isdir(MCFG["spooldir"]) is not True:
logme(MODUL,"Error missing spooldir " + MCFG["spooldir"] + " Abort !",("P1","EXIT"),ECFG)
# log dir available ?
MCFG["logdir"] = readonecfg("MAIN","logdir", ECFG["cfgfile"])
if MCFG["logdir"] != "NULL" and MCFG["logdir"] != "FALSE" and os.path.isdir(MCFG["logdir"]) is True:
MCFG["logfile"] = MCFG["logdir"] + os.sep + "ews.log"
elif MCFG["logdir"] != "NULL" and MCFG["logdir"] != "FALSE" and os.path.isdir(MCFG["logdir"]) is True:
logme(MODUL,"Error missing logdir " + MCFG["logdir"] + " Abort !",("P1","EXIT"),ECFG)
else:
MCFG["logfile"] = "/var/log" + os.sep + "ews.log"
# Proxy Settings ?
MCFG["proxy"] = readonecfg(MODUL,"proxy", ECFG["cfgfile"])
# Read EWS Config Parameter
ITEMS = ("ews","username","token","rhost_first","rhost_second")
EWSCFG = readcfg("EWS",ITEMS, ECFG["cfgfile"])
# Set ews real true or false
if EWSCFG["ews"].lower() == "true":
EWSCFG["ews"] = True
else:
EWSCFG["ews"] = False
# ignore cert validation if ignorecert-parameter is set
EWSCFGCERT =readonecfg("EWS","ignorecert", ECFG["cfgfile"])
if EWSCFGCERT.lower() == "true":
ECFG["a.ignorecert"] = True
# Read HPFEED Config Parameter
ITEMS = ("hpfeed","host","port","channels","ident","secret")
HCFG = readcfg("HPFEED",ITEMS, ECFG["cfgfile"])
if HCFG["hpfeed"].lower() == "true":
HCFG["hpfeed"] = True
else:
HCFG["hpfeed"] = False
# hpfeeds format
EWSHPFFORMAT =readonecfg("HPFEED","hpfformat", ECFG["cfgfile"])
if EWSHPFFORMAT.lower() in ("ews", "json"):
ECFG["hpfformat"] = EWSHPFFORMAT.lower()
else:
ECFG["hpfformat"] = "ews"
# hpfeeds tls cert
EWSHPFCERT =readonecfg("HPFEED","tlscert", ECFG["cfgfile"])
if EWSHPFCERT and EWSHPFCERT.lower() != "":
ECFG["tlscert"] = EWSHPFCERT.lower()
# Read EWSJSON Config Parameter
ITEMS = ("json","jsondir")
EWSJSON = readcfg("EWSJSON",ITEMS, ECFG["cfgfile"])
if EWSJSON["json"].lower() == "true":
EWSJSON["json"] = True
if os.path.isdir(EWSJSON["jsondir"]) is True:
EWSJSON["jsondir"] = EWSJSON["jsondir"] + os.sep + "ews.json"
else:
logme(MODUL,"Error missing jsondir " + EWSJSON["jsondir"] + " Abort !",("P1","EXIT"),ECFG)
else:
EWSJSON["json"] = False
if ECFG["a.jsondir"] != "" and os.path.isdir(ECFG["a.jsondir"]) is True:
EWSJSON["json"] = True
EWSJSON["jsondir"] = ECFG["a.jsondir"] + os.sep + "ews.json"
ECFG.update(MCFG)
ECFG.update(EWSCFG)
ECFG.update(HCFG)
ECFG.update(EWSJSON)
return ECFG
def locksocket(name):
# create lock socket
global lock_socket
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
# debug dev macos
#return True
# end debug dev macos
try:
lock_socket.bind('\0' + name)
return True
except socket.error:
print("could not bind socket")
return False
def daycounterreset(lock,ECFG):
if lock == False:
logme("ARGCHK","Lock Socket is busy ...",("P1"),ECFG)
logme("ARGCHK","Waiting 300 seconds for getting lock socket.",("P1"),ECFG)
for i in range(6000):
if locksocket() == False:
time.sleep(0.1)
else:
break
if locksocket() == False:
logme("daycounterreset","Daycounterreset fails. Socket over 300 sec busy",("P1","LOG"),ECFG)
sys.exit(0)
z = ConfigParser.RawConfigParser()
z.read(ECFG["homedir"] + os.sep + "ews.idx")
for i in z.sections():
if z.has_option(i,"daycounter") == True:
logme("daycounterreset","Daycounter " + i + " : " + z.get(i,"daycounter") + " alerts send." ,("LOG"),ECFG)
z.set(i,"daycounter",0)
with open(ECFG["homedir"] + os.sep + "ews.idx", 'wb') as countfile:
z.write(countfile)
countfile.close
logme("daycounterreset","Daycounters successfull reset.",("P1"),ECFG)
sys.exit(0)
if __name__ == "__main__":
pass
| gpl-3.0 | 1,198,840,189,248,871,700 | 31.246201 | 209 | 0.581959 | false |
geertj/rhevsh | setup.py | 1 | 1294 | #
# This file is part of rhevsh. rhevsh is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# rhevsh is copyright (c) 2011 by the rhevsh authors. See the file
# "AUTHORS" for a complete overview.
import os
import sys
from distutils.command.build import build
from setuptools import setup, Command
version_info = {
'name': 'rhevsh',
'version': '0.9',
'description': 'A command-line interface to Red Hat Enterprise'
' Virtualization',
'author': 'Geert Jansen',
'author_email': '[email protected]',
'url': 'https://github.com/geertj/rhevsh',
'license': 'MIT',
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python' ],
}
setup(
package_dir = { '': 'lib' },
packages = [ 'rhevsh', 'rhevsh.command', 'rhevsh.format' ],
install_requires = [ 'python-cli >= 1.0', 'python-rhev >= 0.9' ],
entry_points = { 'console_scripts': [ 'rhevsh = rhevsh.main:main' ] },
**version_info
)
| mit | 6,211,877,290,003,639,000 | 30.560976 | 74 | 0.634467 | false |
icereval/osf-sync | osfoffline/sync/local.py | 1 | 6234 | import logging
from pathlib import Path
import threading
from watchdog.observers import Observer
from sqlalchemy.orm.exc import NoResultFound
from osfoffline import utils
from osfoffline.utils.authentication import get_current_user
from osfoffline.exceptions import NodeNotFound
from osfoffline.sync.ext.watchdog import ConsolidatedEventHandler
from osfoffline.tasks import operations
from osfoffline.tasks.operations import OperationContext
from osfoffline.utils import Singleton
from osfoffline.tasks.queue import OperationWorker
logger = logging.getLogger(__name__)
class LocalSyncWorker(ConsolidatedEventHandler, metaclass=Singleton):
def __init__(self):
super().__init__()
self.ignore = threading.Event()
try:
user = get_current_user()
except NoResultFound:
# TODO: This only happens when a user logs out and the db has
# been cleared. The app tries to run again without a user being
# being set. This error doesn't disrupt user experience, but we
# might consider tracking down the specific source and preventing
# it from happening.
raise
self.folder = user.folder
self.observer = Observer()
self.observer.schedule(self, self.folder, recursive=True)
def start(self):
logger.debug('Starting watchdog observer')
self.observer.start()
def stop(self):
logger.debug('Stopping LocalSyncWorker')
# observer is actually a separate child thread and must be join()ed
self.observer.stop()
self.join()
def is_alive(self):
return self.observer.is_alive()
def join(self):
self.observer.join()
logger.debug('LocalSyncWorker Stopped')
def dispatch(self, event):
if self.ignore.is_set():
return logger.debug('Ignoring event {}'.format(event))
super().dispatch(event)
def on_moved(self, event):
logger.info('Move event for {}: from {} to {}'.format('directory' if event.is_directory else 'file',
event.src_path,
event.dest_path))
# Note: OperationContext should extrapolate all attributes from what it is given
if event.is_directory:
try:
# TODO: avoid a lazy context load in this case to catch the NodeNotFound exception?
_ = OperationContext(local=Path(event.src_path), is_folder=True).remote
return self.put_event(operations.RemoteMoveFolder(
OperationContext(local=Path(event.src_path), is_folder=True),
OperationContext(local=Path(event.dest_path), is_folder=True),
))
except NodeNotFound:
return self.put_event(operations.RemoteCreateFolder(
OperationContext(local=Path(event.dest_path), is_folder=True),
))
try:
# TODO: avoid a lazy context load in this case to catch the NodeNotFound exception?
_ = OperationContext(local=Path(event.src_path)).remote # noqa
return self.put_event(operations.RemoteMoveFile(
OperationContext(local=Path(event.src_path)),
OperationContext(local=Path(event.dest_path)),
))
except NodeNotFound:
return self.put_event(operations.RemoteCreateFile(
OperationContext(local=Path(event.dest_path)),
))
def on_created(self, event):
logger.info('Creation event for {}: {}'.format('directory' if event.is_directory else 'file',
event.src_path))
node = utils.extract_node(event.src_path)
path = Path(event.src_path)
# If the file exists in the database, this is a modification
# This logic may not be the most correct, #TODO re-evaluate
if utils.local_to_db(path, node):
return self.on_modified(event)
context = OperationContext(local=path, node=node)
if event.is_directory:
return self.put_event(operations.RemoteCreateFolder(context))
return self.put_event(operations.RemoteCreateFile(context))
def on_deleted(self, event, *args, is_folder=False, **kwargs):
logger.info('Deletion event for {}: {}'.format('directory' if event.is_directory else 'file',
event.src_path))
# A hack: override checking if the passed path is a directory. Since Windows
# emits folder deletion events as file deletes we need to ignore whether or not
# a delete event is for a folder. Since the RemoteDelete operation works identically
# for files and folders we can get away with this here.
context = OperationContext(local=Path(event.src_path), check_is_folder=False)
return self.put_event(operations.RemoteDelete(context))
def on_modified(self, event):
logger.info('Modification event for {}: {}'.format('directory' if event.is_directory else 'file',
event.src_path))
node = utils.extract_node(event.src_path)
path = Path(event.src_path)
# If the file does not exist in the database, this may be a create
if not utils.local_to_db(path, node):
for e in self._create_cache:
if e.src_path == event.src_path:
logging.warning('Found a duplicate create event {}. Ignoring...'.format(event))
return
return self.on_created(event)
context = OperationContext(local=Path(event.src_path))
if event.is_directory:
# FIXME: This branch should never be reached, due to a check in dispatch method
logger.error("Received unexpected modification event for folder: {}".format(event.src_path))
return self.put_event(operations.RemoteCreateFolder(context))
return self.put_event(operations.RemoteUpdateFile(context))
def put_event(self, event):
OperationWorker().put(event)
| lgpl-3.0 | 613,275,511,589,267,700 | 42.594406 | 108 | 0.620789 | false |
FiloSottile/Griffith-mirror | lib/plugins/movie/PluginMovieKinoDe.py | 1 | 21800 | # -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2006-2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gutils
import movie
import string
import re
plugin_name = "Kino.de"
plugin_description = "KINO.DE"
plugin_url = "www.kino.de"
plugin_language = _("German")
plugin_author = "Michael Jahn"
plugin_author_email = "<[email protected]>"
plugin_version = "1.16"
class Plugin(movie.Movie):
url_to_use_base = 'http://www.kino.de/'
url_to_use = url_to_use_base + 'kinofilm/'
url_type = 'K'
def __init__(self, id):
self.encode='iso-8859-1'
elements = string.split(id, "_")
self.movie_id = elements[1]
if (elements[0] == "V"):
self.url_to_use_base = 'http://www.video.de/'
self.url_to_use = self.url_to_use_base + 'videofilm/'
self.url_type = 'V'
else:
self.url_to_use_base = 'http://www.kino.de/'
self.url_to_use = self.url_to_use_base + 'kinofilm/'
self.url_type = 'K'
self.url = self.url_to_use + str(self.movie_id)
def initialize(self):
if self.url_type == 'K':
url = self.url_to_use + string.replace(str(self.movie_id), '/', '/credits/')
self.creditspage = self.open_page(self.parent_window, url=url)
else:
self.creditspage = ''
videopageforkinourl = gutils.trim(self.page, 'class="videode"', 'Zum Film auf video.de')
if videopageforkinourl:
url = gutils.trim(videopageforkinourl, 'href="', '"')
self.videopage = self.open_page(self.parent_window, url=url)
else:
self.videopage = None
def get_image(self):
self.image_url = ''
tmpdata = gutils.regextrim(self.page, '<div class="cover-area">', '</div>')
if tmpdata:
# video page
tmpdata = re.search('(http[:][/][/][^/]+[/]flbilder[/][^"\']+)', tmpdata)
if tmpdata:
self.image_url = tmpdata.group(1)
else:
# kino page
tmpdata = gutils.before(self.page, '<span style="line-height: 15px;">')
if tmpdata:
tmpparts = re.split('http://images.kino.de/s/', tmpdata)
if len(tmpparts) > 2:
self.image_url = 'http://images.kino.de/s/' + gutils.before(tmpparts[2], '"')
elif len(tmpparts) > 1:
self.image_url = 'http://images.kino.de/s/' + gutils.before(tmpparts[1], '"')
if not self.image_url and self.videopage:
tmpdata = gutils.regextrim(self.videopage, '<div class="cover-area">', '</div>')
if tmpdata:
# video page
tmpdata = re.search('(http[:][/][/][^/]+[/]flbilder[/][^"\']+)', tmpdata)
if tmpdata:
self.image_url = tmpdata.group(1)
def get_o_title(self):
self.o_title = gutils.trim(self.page, '<p>Originaltitel: ', '</p>')
if not self.o_title:
self.o_title = gutils.trim(self.page, '<h1(', ')')
if not self.o_title:
self.o_title = gutils.trim(self.page, '<div class="teaser">', '</')
if not self.o_title:
if self.videopage:
self.o_title = gutils.trim(self.videopage, '<p>Originaltitel: ', '</p>')
if not self.o_title:
self.o_title = gutils.regextrim(self.page, '<h1>', '</h1>')
def get_title(self):
self.title = gutils.trim(self.page, '<div class="teaser">', '</')
if not self.title:
self.title = gutils.regextrim(self.page, '<h1>', '</h1>')
def get_director(self):
self.director = gutils.trim(self.page, '<th>Regie:', '<th>')
if not self.director:
self.director = gutils.trim(self.creditspage, 'Regie ', '</tr>')
def get_plot(self):
self.plot = gutils.trim(self.page, '<div class="yui-content">', '<div class="footer">')
if not self.plot:
# kino page
self.plot = gutils.trim(self.page, '<span style="line-height: 15px;">', '<table')
if not self.plot and self.videopage:
self.plot = gutils.trim(self.videopage, '<div class="yui-content">', '<div class="footer">')
if self.plot:
# video page
self.plot = re.sub('<script type="text/javascript">[^<]+</script>', '', self.plot)
self.plot = string.replace(self.plot, '>Großansicht</a>', '>')
self.plot = string.replace(self.plot, '>Schließen</a>', '>')
self.plot = string.replace(self.plot, '>zurück </a>', '>')
self.plot = string.replace(self.plot, '>1</a>', '>')
self.plot = string.replace(self.plot, '> weiter</a>', '>')
self.plot = string.replace(self.plot, '</h4>', '\n')
self.plot = gutils.clean(self.plot)
compiledmultiline = re.compile(r'^[^(]+[(]Foto[:][^)]+[)][ ]*$', re.MULTILINE)
self.plot = compiledmultiline.sub('', self.plot)
compiledmultiline = re.compile(r"(^\s+$|^\s*//\s*$)", re.MULTILINE)
self.plot = compiledmultiline.sub('', self.plot)
compiledmultiline = re.compile("^[\n]+$", re.MULTILINE)
self.plot = compiledmultiline.sub("\n", self.plot)
def get_year(self):
self.year = ''
tmp = gutils.trim(self.page, '<div class="description">', '</div>')
if tmp:
searchyearandcountry = re.search('([0-9]{4})<br', tmp)
if searchyearandcountry:
self.year = searchyearandcountry.group(1)
if not self.year:
tmp = gutils.trim(self.page, '<span class="standardsmall"><strong>', '<br')
if tmp:
tmp = gutils.trim(tmp, '<strong>', '</strong>')
if tmp:
srchyear = re.search('([0-9]{4})', tmp)
if srchyear:
self.year = srchyear.group(1)
if not self.year and self.videopage:
tmp = gutils.trim(self.videopage, '<div class="description">', '</div>')
if tmp:
searchyearandcountry = re.search('([0-9]{4})<br', tmp)
if searchyearandcountry:
self.year = searchyearandcountry.group(1)
def get_runtime(self):
self.runtime = ''
srchresult = re.search('Laufzeit: ([0-9]+)[ \t]Min[.]<', self.page)
if srchresult <> None:
self.runtime = srchresult.group(1)
if not self.runtime:
srchresult = re.search('>([0-9]+)[ \t]Min[.]<', self.page)
if srchresult <> None:
self.runtime = srchresult.group(1)
if not self.runtime and self.videopage:
srchresult = re.search('Laufzeit: ([0-9]+)[ \t]Min[.]<', self.videopage)
if srchresult <> None:
self.runtime = srchresult.group(1)
def get_genre(self):
self.genre = gutils.trim(self.page,'<p class="genre">', '</p>')
if not self.genre:
self.genre = gutils.trim(self.page, 'title="Zur Genreliste: Drama">', '<')
if not self.genre and self.videopage:
self.genre = gutils.trim(self.videopage,'<p class="genre">', '</p>')
def get_cast(self):
self.cast = ''
tmp = gutils.regextrim(self.page, '<th>Darsteller:', '(<th>[^&]|</table>)')
if tmp:
tmpparts = string.split(tmp, '<a href="/star/')
for tmppart in tmpparts[1:]:
name = gutils.trim(tmppart, '>', '<')
role = gutils.trim(tmppart, '>als ', '<')
if name:
if role:
self.cast = self.cast + name + _(' as ') + role + '\n'
else:
self.cast = self.cast + name + '\n'
if not self.cast:
tmp = gutils.trim(self.creditspage, '>Cast<br />', '>Crew<')
if tmp:
castparts = re.split('width="50%"><a href="/star/', tmp)
for index in range(1, len(castparts), 1):
role = gutils.clean(gutils.trim(castparts[index - 1], 'width="50%">', '</td>'))
name = gutils.clean(gutils.trim(castparts[index], '">', '<'))
if role:
self.cast = self.cast + name + _(' as ') + role + '\n'
else:
self.cast = self.cast + name + '\n'
def get_classification(self):
self.classification = gutils.regextrim(self.page, 'FSK: ', '<')
if not self.classification and self.videopage:
self.classification = gutils.regextrim(self.videopage, 'FSK: ', '<')
def get_studio(self):
self.studio = ''
tmp = gutils.trim(self.page, '<div class="description">', '</div>')
if tmp:
tmp = gutils.trim(tmp, 'Regie:', '</p>')
if tmp:
self.studio = string.replace(gutils.after(tmp, '<br/>'), 'Verleih: ', '')
if not self.studio:
self.studio = gutils.trim(self.page, 'Verleih: ', '<')
if not self.studio and self.videopage:
tmp = gutils.trim(self.videopage, '<div class="description">', '</div>')
if tmp:
tmp = gutils.trim(tmp, 'Regie:', '</p>')
if tmp:
self.studio = string.replace(gutils.after(tmp, '<br/>'), 'Verleih: ', '')
def get_o_site(self):
self.o_site = ""
def get_site(self):
self.site = self.url_to_use + self.movie_id
def get_trailer(self):
self.trailer = ''
trailerparts = re.split('href="/trailer-und-bilder/film', self.page)
if len(trailerparts) > 1:
for trailerpart in trailerparts[1:]:
trailermatch = re.search('Trailer[ ]*</p>', trailerpart)
if trailermatch:
self.trailer = self.url_to_use_base + 'trailer-und-bilder/film' + gutils.before(trailerpart, '"')
break
if not self.trailer and self.url_type == 'K':
self.trailer = self.url_to_use + string.replace(str(self.movie_id), '/', '/trailer/')
def get_country(self):
self.country = ''
tmp = gutils.trim(self.page, '<div class="description">', '</div>')
if tmp:
searchyearandcountry = re.search('([^>0-9]+)[0-9]{4}<br', tmp)
if searchyearandcountry:
self.country = searchyearandcountry.group(1)
if not self.country:
tmp = gutils.trim(self.page, '<span class="standardsmall"><strong>', '<br')
if tmp:
tmp = gutils.trim(tmp, '<strong>', '</strong>')
if tmp:
self.country = gutils.before(tmp, ' ')
if not self.country and self.videopage:
tmp = gutils.trim(self.videopage, '<div class="description">', '</div>')
if tmp:
searchyearandcountry = re.search('([^>0-9]+)[0-9]{4}<br', tmp)
if searchyearandcountry:
self.country = searchyearandcountry.group(1)
def get_rating(self):
self.rating = 0
tmp = gutils.trim(self.page, '<h4>Filmbewertung</h4>', '</script>')
if tmp:
matched = re.search('ratingBar.setValue[(]([0-9]+)[)]', tmp)
if matched:
try:
self.rating = round(int(matched.group(1)) / 10.0, 0)
except:
pass
def get_notes(self):
self.notes = ""
tmp_notes = gutils.clean(gutils.trim(self.page, "<strong>Sprachen:</strong>", "</p>"))
if tmp_notes != "":
self.notes = self.notes + "Sprachen:\n" + tmp_notes + "\n\n"
tmp_notes = gutils.clean(gutils.trim(self.page, "<strong>Untertitel:</strong>", "</p>"))
if tmp_notes != "":
self.notes = self.notes + "Untertitel:\n" + tmp_notes + "\n\n"
tmp_notes = gutils.clean(gutils.trim(self.page, "<strong>Tonformat:</strong>", "</p>"))
if tmp_notes != "":
self.notes = self.notes + "Tonformat:\n" + tmp_notes + "\n\n"
tmp_notes = gutils.clean(gutils.trim(self.page, "<strong>Bildformat:</strong>", "</p>"))
if tmp_notes != "":
self.notes = self.notes + "Bildformat:\n" + tmp_notes + "\n\n"
tmp_notes = gutils.clean(gutils.trim(self.page, "<strong>EAN</strong>", "</p>"))
if tmp_notes != "":
self.notes = self.notes + "EAN:\n" + tmp_notes + "\n\n"
def get_screenplay(self):
self.screenplay = gutils.regextrim(self.page, '<th>Buch:', '<th>')
if not self.screenplay:
self.screenplay= gutils.trim(self.creditspage, 'Drehbuch: ', '</tr>')
def get_cameraman(self):
self.cameraman = gutils.regextrim(self.page, '<th>Kamera:', '(<th>|</table>)')
if not self.cameraman:
self.cameraman= gutils.trim(self.creditspage, 'Kamera ', '</tr>')
class SearchPlugin(movie.SearchMovie):
def __init__(self):
self.original_url_search = 'http://www.kino.de/suche/film?hitsPerPage=50&searchString='
self.translated_url_search = 'http://www.kino.de/suche/film?hitsPerPage=50&searchString='
self.encode='iso-8859-1'
self.remove_accents = False
def search(self,parent_window):
self.open_search(parent_window)
pagemovie = self.page
#
# Look for DVD and VHS
#
self.url = "http://www.kino.de/suche/video?hitsPerPage=50&searchString="
self.open_search(parent_window)
self.page = pagemovie + self.page
return self.page
def get_searches(self):
elements1 = re.split('href="/kinofilm/', self.page)
elements1[0] = None
for element in elements1:
if element <> None:
title = gutils.clean(gutils.trim(element,'>','</a>')) + string.replace(' (' +
gutils.clean(gutils.trim(element, '<p>', "<br />")) + ')', '()', '')
if title != ' ':
self.ids.append("K_" + re.sub('[?].*', '', gutils.before(element,'"')))
self.titles.append('Kino: ' + title)
elements2 = re.split('href="http://www.video.de/videofilm/', self.page)
elements2[0] = None
for element in elements2:
if element <> None:
title = gutils.clean(gutils.trim(element,'>','</a>')) + string.replace(' (' +
gutils.clean(gutils.trim(element, '<p>', "<br />")) + ')', '()', '')
if title != ' ':
id = re.sub('[?].*', '', gutils.before(element,'"'))
self.ids.append("V_" + id)
type = ''
if 'blu-ray-disc-kauf' in id:
type = ' (Bluray-Kauf)'
if 'blu-ray-disc-leih' in id:
type = ' (Bluray-Verleih)'
if 'dvd-leih' in id:
type = ' (DVD-Verleih)'
if 'dvd-kauf' in id:
type = ' (DVD-Kauf)'
self.titles.append('Video: ' + title + type)
#
# Plugin Test
#
class SearchPluginTest(SearchPlugin):
#
# Configuration for automated tests:
# dict { movie_id -> [ expected result count for original url, expected result count for translated url ] }
#
test_configuration = {
'Rocky Balboa' : [ 9, 9 ],
'Arahan' : [ 10, 10 ],
'Ein glückliches Jahr' : [ 4, 4 ]
}
class PluginTest:
#
# Configuration for automated tests:
# dict { movie_id -> dict { arribute -> value } }
#
# value: * True/False if attribute only should be tested for any value
# * or the expected value
#
test_configuration = {
'K_rocky-balboa/96132.html' : {
'title' : 'Rocky Balboa',
'o_title' : 'Rocky Balboa',
'director' : 'Sylvester Stallone',
'plot' : True,
'cast' : 'Sylvester Stallone' + _(' as ') + 'Rocky Balboa\n\
Antonio Traver' + _(' as ') + 'Mason "The Line" Dixon\n\
Burt Young' + _(' as ') + 'Paulie\n\
Geraldine Hughes' + _(' as ') + 'Marie\n\
Milo Ventimiglia' + _(' as ') + 'Rocky Jr.\n\
James Francis Kelly III' + _(' as ') + 'Steps\n\
Tony Burton' + _(' as ') + 'Duke\n\
A.J. Benza' + _(' as ') + 'L.C.',
'country' : 'USA',
'genre' : 'Drama',
'classification' : 'ab 12 Jahre',
'studio' : 'Fox',
'o_site' : False,
'site' : 'http://www.kino.de/kinofilm/rocky-balboa/96132.html',
'trailer' : 'http://www.kino.de/kinofilm/rocky-balboa/trailer/96132.html',
'year' : 2006,
'notes' : False,
'runtime' : 102,
'image' : True,
'rating' : False,
'cameraman' : 'J. Clark Mathis',
'screenplay' : 'Sylvester Stallone'
},
'K_ein-glueckliches-jahr/28675.html' : {
'title' : 'Ein glückliches Jahr',
'o_title' : 'La bonne année',
'director' : 'Claude Lelouch',
'plot' : True,
'cast' : 'Lino Ventura\n\
Françoise Fabian\n\
Charles Gérard\n\
André Falcon',
'country' : 'Frankreich/Italien',
'genre' : 'Drama',
'classification' : 'ab 12',
'studio' : 'Black Hill Pictures',
'o_site' : False,
'site' : 'http://www.kino.de/kinofilm/ein-glueckliches-jahr/28675.html',
'trailer' : 'http://www.kino.de/kinofilm/ein-glueckliches-jahr/trailer/28675.html',
'year' : 1973,
'notes' : False,
'runtime' : 110,
'image' : True,
'rating' : False,
'cameraman' : 'Jean Collomb',
'screenplay' : 'Claude Lelouch'
},
'V_ein-glueckliches-jahr-dvd/85546.html' : {
'title' : 'Ein glückliches Jahr',
'o_title' : 'La bonne année',
'director' : 'Claude Lelouch',
'plot' : True,
'cast' : 'Lino Ventura\n\
Françoise Fabian\n\
Charles Gérard\n\
André Falcon',
'country' : 'Frankreich/Italien',
'genre' : 'Drama',
'classification' : 'ab 12',
'studio' : 'Black Hill Pictures',
'o_site' : False,
'site' : 'http://www.video.de/videofilm/ein-glueckliches-jahr-dvd/85546.html',
'trailer' : False,
'year' : 1973,
'notes' : 'Sprachen:\n\
Deutsch DD 2.0, Französisch DD 2.0\n\
\n\
Tonformat:\n\
Dolby Digital 2.0\n\
\n\
Bildformat:\n\
1:1,33/4:3',
'runtime' : 110,
'image' : True,
'rating' : False,
'cameraman' : 'Jean Collomb',
'screenplay' : 'Claude Lelouch'
},
'V_arahan-vanilla-dvd/90405.html' : {
'title' : 'Arahan',
'o_title' : 'Arahan jangpung dae jakjeon',
'director' : 'Ryoo Seung-wan',
'plot' : True,
'cast' : 'Ryu Seung-beom' + _(' as ') + 'Sang-hwan\n\
Yoon So-yi' + _(' as ') + 'Wi-jin\n\
Ahn Sung-kee' + _(' as ') + 'Ja-woon\n\
Jung Doo-hong' + _(' as ') + 'Heuk-woon\n\
Yun Ju-sang' + _(' as ') + 'Mu-woon',
'country' : 'Südkorea',
'genre' : 'Action/ Komödie',
'classification' : 'ab 16',
'studio' : 'Splendid Film',
'o_site' : False,
'site' : 'http://www.video.de/videofilm/arahan-vanilla-dvd/90405.html',
'trailer' : False,
'year' : 2004,
'notes' : 'Sprachen:\n\
Deutsch DD 5.1\n\
\n\
Tonformat:\n\
Dolby Digital 5.1\n\
\n\
Bildformat:\n\
1:1,78/16:9',
'runtime' : 108,
'image' : True,
'rating' : False,
'cameraman' : 'Lee Jun-gyu',
'screenplay' : 'Ryoo Seung-wan'
}
}
| gpl-2.0 | 1,633,054,857,474,862,300 | 42.919355 | 117 | 0.492334 | false |
gspilio/nova | setup.py | 1 | 2927 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
from nova.openstack.common import setup as common_setup
requires = common_setup.parse_requirements()
depend_links = common_setup.parse_dependency_links()
project = 'nova'
setuptools.setup(
name=project,
version=common_setup.get_version(project, '2013.2'),
description='cloud computing fabric controller',
author='OpenStack',
author_email='[email protected]',
url='http://www.openstack.org/',
classifiers=[
'Environment :: OpenStack',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
cmdclass=common_setup.get_cmdclass(),
packages=setuptools.find_packages(exclude=['bin', 'smoketests']),
install_requires=requires,
dependency_links=depend_links,
include_package_data=True,
test_suite='nose.collector',
setup_requires=['setuptools_git>=0.4'],
scripts=['bin/nova-all',
'bin/nova-api',
'bin/nova-api-ec2',
'bin/nova-api-metadata',
'bin/nova-api-os-compute',
'bin/nova-baremetal-deploy-helper',
'bin/nova-baremetal-manage',
'bin/nova-rpc-zmq-receiver',
'bin/nova-cells',
'bin/nova-cert',
'bin/nova-clear-rabbit-queues',
'bin/nova-compute',
'bin/nova-conductor',
'bin/nova-console',
'bin/nova-consoleauth',
'bin/nova-dhcpbridge',
'bin/nova-manage',
'bin/nova-network',
'bin/nova-novncproxy',
'bin/nova-objectstore',
'bin/nova-rootwrap',
'bin/nova-scheduler',
'bin/nova-spicehtml5proxy',
'bin/nova-xvpvncproxy',
],
py_modules=[])
| apache-2.0 | 7,221,491,232,337,402,000 | 38.026667 | 78 | 0.604715 | false |
anenbergb/Footage-Manipulation | postprocessing/visualize.py | 1 | 3850 | import Image, ImageDraw
import itertools
import random
import pdb
import os
defaultwidth = 1
colors = ["#FF00FF",
"#FF0000",
"#FF8000",
"#FFD100",
"#008000",
"#0080FF",
"#0000FF",
"#000080",
"#800080"]
color_label_map = {'Pedestrian':colors[0], 'Biker':colors[1],
'Skater':colors[2], 'Cart':colors[3], 'Car':colors[4],
'Bus':colors[5]}
def highlight_box(image, box, color = colors[0], width = defaultwidth,
font = None):
"""
Highlights the bounding box on the given image.
"""
draw = ImageDraw.Draw(image)
if not box.occluded:
width = width * 2
for i in range(width):
draw.rectangle((box.xmin + i, box.ymin + i, box.xmax - i, box.ymax - i),
outline=color)
if font:
ypos = box.ymin
label = box.label
size = draw.textsize(label, font = font)
xpos = max(box.xmin - size[0] - 3, 0)
draw.text((xpos, ypos+1), label,
fill="black", font=font)
draw.text((xpos+1, ypos+1), label,
fill="black", font=font)
draw.text((xpos+1, ypos), label,
fill="black", font=font)
draw.text((xpos, ypos-1), label,
fill="black", font=font)
draw.text((xpos-1, ypos-1), label,
fill="black", font=font)
draw.text((xpos-1, ypos), label,
fill="black", font=font)
draw.text((xpos, ypos), label,
fill="white", font=font)
ypos += size[1] + 3
return image
def highlight_boxes(image, boxes, colors = colors, width = defaultwidth,
font = None):
"""
Highlights an iterable of boxes.
"""
for box, color in zip(boxes, itertools.cycle(colors)):
highlight_box(image, box, color, width, font)
return image
def highlight_path(images, path, color = colors[0], width = defaultwidth,
font = None):
"""
Highlights a path across many images. The images must be indexable
by the frame. Produces a generator.
"""
print "Visualize path of length {0}".format(len(path))
for box in path:
try:
lost = box.lost
except:
lost = False
image = images[box.frame]
if not lost:
highlight_box(image, box, color, width, font)
yield image, box.frame
def highlight_paths(images, paths, colors = colors, width = defaultwidth, font = None, cycle=False, framesroot = "./"):
"""
Highlights multiple paths across many images. The images must be indexable
by the frame. Produces a generator.
"""
print "Visualize {0} paths".format(len(paths))
boxmap = {}
if cycle:
paths = zip(paths, itertools.cycle(colors))
else:
paths_tmp = []
for path in paths:
if len(path) > 0 and path[0].label in color_label_map:
paths_tmp.append((path, color_label_map[path[0].label]))
else:
paths_tmp.append((path, colors[6]))
paths = paths_tmp
for path, color in paths:
for box in path:
if box.frame not in boxmap:
boxmap[box.frame] = [(box, color)]
else:
boxmap[box.frame].append((box, color))
for frame, boxes in sorted(boxmap.items()):
im_path = os.path.join(framesroot, images[frame])
im = Image.open(im_path)
for box, color in boxes:
try:
lost = box.lost
except:
lost = False
if not lost:
highlight_box(im, box, color, width, font)
yield im, frame
def save(images, output):
"""
Saves images produced by the path iterators.
"""
for image, frame in images:
image.save(output(frame))
| mit | -6,719,373,337,939,552,000 | 28.615385 | 119 | 0.543896 | false |
brenthuisman/phd_tools | maskfileto4d.py | 1 | 1057 | #!/usr/bin/env python
import sys,image
from subprocess import call
if len(sys.argv) < 2:
print "Specify a maskfile."
sys.exit()
maskfile = sys.argv[-1]
spacing = 2
size = 399
maskimagefile = sys.argv[-1][:-4]+'.mhd'
spacingstr = str(spacing)
sizeinvox = str(size/spacing)
call(["rtkdrawgeometricphantom","--phantomfile",maskfile,"--dimension",sizeinvox,"--spacing",spacingstr,"--output",maskimagefile])
call(["clitkCropImage","-i",maskimagefile,"-o",maskimagefile,"--BG=-1000"])
call(["clitkBinarizeImage","-i",maskimagefile,"-o",maskimagefile,"-l","0.5","-u","1.5"]) #the intensity egion that is 1, and not 0
#call(["clitkImageConvert","-i",maskimagefile,"-o",maskimagefile,"-t","float"]) #to float, so our image class can read it (TODO: find out how numpy can read raw UCHAR)
#call(["clitkImageConvert","-i",maskimagefile,"-o",maskimagefile,"-c"])
# these lines will output a second mhd file that takes care of adding the fourth dimensions,
# useful when you want to use clitkCropImage with --like.
data = image.image(maskimagefile)
data.tofake4d() | lgpl-3.0 | -4,783,733,606,612,285,000 | 39.692308 | 167 | 0.71807 | false |
CaliOpen/CaliOpen | src/backend/main/py.main/caliopen_main/tests/parsers/test_email.py | 1 | 1710 | import unittest
import os
from datetime import datetime
from zope.interface.verify import verifyObject
from caliopen_storage.config import Configuration
import vobject
if 'CALIOPEN_BASEDIR' in os.environ:
conf_file = '{}/src/backend/configs/caliopen.yaml.template'. \
format(os.environ['CALIOPEN_BASEDIR'])
else:
conf_file = '../../../../../configs/caliopen.yaml.template'
Configuration.load(conf_file, 'global')
from caliopen_main.common.helpers.normalize import clean_email_address
class TestEmailParser(unittest.TestCase):
def test_simple_1(self):
email = '[email protected]'
res = clean_email_address(email)
self.assertEqual(email, res[0])
self.assertEqual(email, res[1])
def test_with_name_1(self):
email = '"Ceci est, une virgule" <[email protected]>'
res = clean_email_address(email)
self.assertEqual('[email protected]', res[0])
self.assertEqual('[email protected]', res[1])
def test_multiple(self):
emails = '"Ceci est, une virgule" <[email protected]>, ' \
'"Est une, autre virgule" <[email protected]>'
parts = emails.split('>,')
self.assertEqual(len(parts), 2)
for part in parts:
res = clean_email_address(part)
self.assertTrue('@' in res[0])
def test_invalid_but_valid(self):
email = 'Ceci [lamentable.ment] <[email protected]>'
res = clean_email_address(email)
self.assertEqual('[email protected]', res[0])
def test_strange_1(self):
email = 'ideascube/ideascube <[email protected]>'
res = clean_email_address(email)
self.assertEqual('[email protected]', res[0])
| gpl-3.0 | 8,288,541,058,513,852,000 | 31.264151 | 70 | 0.639766 | false |
jkirklan/agietst | remote/magic_emu.py | 1 | 3684 | #!/usr/bin/env python
import sys
import socket
import fcntl
import struct
import array
from time import sleep
import subprocess
import shlex
from qpid.messaging import *
#global vars
broker_local = "localhost:5672"
addr_control = "agie_inbound/agie_inbound_control"
def pinger(iadr):
# This pings the local interface
command_line = "ping -c 1 " + iadr
args = shlex.split(command_line)
try:
subprocess.check_call(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
print "network is available on ", intf[1]
except:
print "Couldn't get a ping on ", intf[1]
def pinger_b(iadr):
#This pings the remote broker
command_line = "ping -c 1 " + iadr
args = shlex.split(command_line)
try:
subprocess.check_call(args,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
print "broker is available on ", iadr
return 0
except:
print "Couldn't get a ping on broker at ", iadr
return 1
def add_intf(intf):
broker_ok = 2
intf_name = intf[0]
intf_ip = intf[1]
broker_name = "broker." + intf_name + ".example.com"
pinger(intf[1])
broker_ok = pinger_b(broker_name)
if broker_ok == 0:
print "Sending message to avail brokers"
msg_content = "up," + intf_name + ',' + intf_ip + ',' + broker_name + ',' + 'outbound_agie_' + intf_name
msg = Message(msg_content)
#print msg_content
sender.send(msg)
elif broker_ok == 1:
print "oops"
else:
print "double oops"
def del_intf(diffy):
intf_name, intf_ip = diffy[0]
broker_name = "broker." + intf_name + ".example.com"
print "Sending message about dead broker."
msg_content = "down," + intf_name + ',' + intf_ip + ',' + broker_name + ',' + 'outbound_agie_' + intf_name
#print 'dead msg', msg_content
msg = Message(msg_content)
#print msg_content
rc = sender.send(msg)
#print 'rc', rc
def all_interfaces():
#This returns a list with all active network interfaces
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
#create broker connection and session
lb_connection = Connection(broker_local)
try:
lb_connection.open()
session = lb_connection.session()
sender = session.sender(addr_control)
except MessagingError,m:
print m
#finally:
# lb_connection.close()
start_config = all_interfaces() #set initial value r start_config == to int on init
del start_config[0] #remove loopback
for intf in start_config:
add_intf(intf)
print start_config
while True:
sleep(5)
new_config = all_interfaces()
del new_config[0]
if start_config == new_config:
print "No config change"
# need to add broker ping here for each active interface
else:
iffy = set(new_config)
diffy = []
diffy = [x for x in start_config if x not in iffy]
if diffy:
print "lost network", diffy
del_intf(diffy)
for intf in new_config:
add_intf(intf)
start_config = new_config
print "new config", start_config
lb_connection.close()
| gpl-3.0 | -4,934,059,153,742,142,000 | 28.472 | 107 | 0.630565 | false |
aspiers/ly2video | ly2video/utils.py | 1 | 2831 | #!/usr/bin/env python3
# coding=utf-8
# ly2video - generate performances video from LilyPond source files
# Copyright (C) 2012 Jiri "FireTight" Szabo
# Copyright (C) 2012 Adam Spiers
# Copyright (C) 2014 Emmanuel Leguy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For more information about this program, please visit
# <https://github.com/aspiers/ly2video/>.
import sys
import os
import tempfile
DEBUG = False # --debug sets to True
RUNDIR = ""
TMPDIR = ""
def setDebug():
global DEBUG
DEBUG = True
def debug(text):
if DEBUG:
print(text)
def progress(text):
print(text)
def stderr(text):
sys.stderr.write(text + "\n")
def warn(text):
stderr("WARNING: " + text)
def output_divider_line():
progress(60 * "-")
def fatal(text, status=1):
output_divider_line()
stderr("ERROR: " + text)
sys.exit(status)
def bug(text, *issues):
if len(issues) == 0:
msg = """
Sorry, ly2video has encountered a fatal bug as described above,
which it could not attribute to any known cause :-(
Please consider searching:
"""
else:
msg = """
Sorry, ly2video has encountered a fatal bug as described above :-(
It might be due to the following known issue(s):
"""
for issue in issues:
msg += " https://github.com/aspiers/ly2video/issues/%d\n" % issue
msg += """
If you suspect this is not the case, please visit:
"""
msg += """
https://github.com/aspiers/ly2video/issues
and if the problem is not listed there, please file a new
entry so we can get it fixed. Thanks!
Aborted execution.\
"""
fatal(text + "\n" + msg)
def setRunDir (runDir):
global RUNDIR
RUNDIR = runDir
def tmpPath(*dirs):
global TMPDIR
if not TMPDIR:
TMPDIR = tempfile.mkdtemp(prefix='ly2video')
segments = [ TMPDIR ]
segments.extend(dirs)
return os.path.join(RUNDIR, *segments)
class Observable:
def __init__(self):
self.__observers = []
def registerObserver(self, observer):
self.__observers.append(observer)
def notifyObservers (self):
for observer in self.__observers :
observer.update(self)
class Observer:
def update (self, observable):
pass
| gpl-3.0 | -5,907,988,846,029,149,000 | 22.991525 | 80 | 0.668668 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_vpn_link_connections_operations.py | 1 | 6070 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VpnLinkConnectionsOperations(object):
"""VpnLinkConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vpn_connection(
self,
resource_group_name, # type: str
gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVpnSiteLinkConnectionsResult"]
"""Retrieves all vpn site link connections for a particular virtual wan vpn gateway vpn
connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnSiteLinkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ListVpnSiteLinkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnSiteLinkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_connection.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnSiteLinkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_vpn_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}/vpnLinkConnections'} # type: ignore
| mit | 5,005,576,459,868,780,000 | 47.174603 | 236 | 0.646458 | false |
amozie/amozie | studzie/keras_gym/mountain_car_v0.py | 1 | 2577 | import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
class MountainCarEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self) -> None:
self.env = gym.make('MountainCar-v0')
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def _step(self, action):
step = self.env.step(action)
step = list(step)
step[1] = np.abs(step[0][1]) - 0.05
return tuple(step)
def _reset(self):
return self.env.reset()
def _seed(self, seed=None):
return self.env.seed(seed)
def _render(self, mode='human', close=False):
return self.env.render(mode, close)
def _close(self):
return self.env.close()
env = MountainCarEnv()
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,
enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(500):
action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
state_list.append(reward)
env.render()
env.render(close=True)
dqn.test(env, nb_episodes=5, visualize=True)
env.render(close=True) | apache-2.0 | 1,136,327,064,526,148,100 | 28.295455 | 112 | 0.698487 | false |
ghowland/gomh | _backups/gomh_010.py | 1 | 12815 | #!/usr/bin/env python
import pygame
import sys
sprite_size = [85/2, 112/2]
pygame.init()
SCREEN_SIZE = (640, 480)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption('Get Off My Head')
#pygame.mouse.set_visible(0)
image = pygame.image.load('sf_sprites.png')
image = pygame.transform.scale(image, (image.get_width()/2, image.get_height()/2))
image = image.convert_alpha()
sf_sprites = image
# Load scene and it's collision mask
scene = pygame.image.load('sf_back.png')
scene_mask = pygame.image.load('sf_back_mask.png')
guy0 = pygame.Surface(sprite_size)
guy0.convert_alpha()
guy0.blit(sf_sprites, (0,0), [0, 0, sprite_size[0], sprite_size[1]])
guy0_left = pygame.transform.flip(guy0, True, False)
guy1 = pygame.Surface(sprite_size)
guy1.convert_alpha()
guy1.blit(sf_sprites, (0,0), [sprite_size[0] * 1, sprite_size[1] * 0, sprite_size[0], sprite_size[1]])
guy1_left = pygame.transform.flip(guy1, True, False)
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
guy0_pos = [300, 130]
guy1_pos = [220, 130]
# Scrolling here. X and Y (Y to be implemented later...)
SCROLL_OFFSET = [0, 0]
def TestCollisionByPixelStep(start_pos, end_pos, step, scene, scene_obstacle_color=(255,255,255), log=False):
"""Test for a collision against the scene, starting at start_pos, ending at end_pos, using step to increment.
NOTE: This function assumes that the bounding box has already been tested against the scene, and may call scene.get_at() in negative or over scene size, and crash
"""
# Create deltas (differences) for the step in X and Y depending on the step and start-end positions
# Delta X
if start_pos[0] < end_pos[0]:
dx = 1
elif start_pos[0] > end_pos[0]:
dx = -1
else:
dx = 0
# Delta Y
if start_pos[1] < end_pos[1]:
dy = 1
elif start_pos[1] > end_pos[1]:
dy = -1
else:
dy = 0
# Ensure we can actually move across the line, or fail
if dx == 0 and dy == 0:
raise Exception('What the fuck? The start and end positions are the same... Handle this case later.')
# Determine the distance required to travel in X and Y directions based on the start/end positions
distance_x = abs(start_pos[0] - end_pos[0])
distance_y = abs(start_pos[1] - end_pos[1])
# Start the current position at the starting position
current_pos = [start_pos[0], start_pos[1]]
# Loop until we reach the end position, or find a collision
end_pos_reached = False
has_collision = False
distance_travelled = 0
while not end_pos_reached and not has_collision:
# Get the pixel value at the current position
scene_value = scene.get_at(current_pos)[:3]
if log:
print 'Col: dx: %s dy: %s Start: %s End: %s Cur: %s distX: %s distY: %s Pix: %s' % (dx, dy, start_pos, end_pos, current_pos, distance_x, distance_y, scene_value)
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
# Else, increment the current_pos by the dx and dy, multiplied by the step
else:
# Increment the current_pos
current_pos = [current_pos[0] + (dx * step), current_pos[1] + (dy * step)]
distance_travelled += step
# If the current_pos is past the end_pos, then test the end_pos position, and set end_pos_reached (final test it required)
if distance_x != 0 and distance_travelled >= distance_x:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
elif distance_y != 0 and distance_travelled >= distance_y:
# We reached the end, but make the last pixel test anyway, just to be sure we have checked them all
end_pos_reached = True
# Get the pixel value at the current position
scene_value = scene.get_at(end_pos)[:3]
# If the pixel matches the scene_obstacle_color, there is a collision
if scene_value == scene_obstacle_color:
has_collision = True
return has_collision
def MovePosCollideWithScene(pos, move, bounding_box_size, scene_image, scene_obstacle_color=(255,255,255), log=False):
"""Returns a new position [x, y] from pos, moved by move [dx, dy], with
respect to colliding against non-moveable area in scene_image
(non [0,0,0] colors)
Args:
pos: list, [x, y]
move: list, [dx, dy]
bounding_box_size: list, [width, height]
scene_image, Surface object
Returns: list [new_x, new_y], if move is OK, otherwise [old_x, old_y]
"""
has_collision = False
# Create target position, where we want to move to
target_pos = [pos[0] + move[0], pos[1] + move[1]]
# Test for out of scene positions, and block
if target_pos[0] < 0:
has_collision = True
elif target_pos[0] + bounding_box_size[0] >= scene.get_width() - 1:
has_collision = True
elif target_pos[1] < 0:
has_collision = True
elif target_pos[1] + bounding_box_size[1] >= scene.get_height() - 1:
has_collision = True
# Test scene, if we havent already found a collision with the scene border
if not has_collision:
# Test every N pixels, to not miss collisions that are smaller than the bounding box
step_test = 1
#TODO(g): Collision detection with scene_image
# Make all 4 corners of the bounding box
corner_top_left = [target_pos[0], target_pos[1]]
corner_top_right = [target_pos[0] + bounding_box_size[0], target_pos[1]]
corner_bottom_left = [target_pos[0], target_pos[1] + bounding_box_size[1]]
corner_bottom_right = [target_pos[0] + bounding_box_size[0], target_pos[1] + bounding_box_size[1]]
if log:
print ''
# Test the bounding box, using step (N pixels) to get better resolution on obstacle collision
if TestCollisionByPixelStep(corner_top_left, corner_top_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_left, corner_bottom_left, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_top_right, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
elif TestCollisionByPixelStep(corner_bottom_left, corner_bottom_right, step_test, scene_image, log=log):
has_collision = True
# # Get pixel values for each of the corners
# scene_top_left = scene_image.get_at(corner_top_left)[:3]
# scene_top_right = scene_image.get_at(corner_top_right)[:3]
# scene_bottom_left = scene_image.get_at(corner_bottom_left)[:3]
# scene_bottom_right = scene_image.get_at(corner_bottom_right)[:3]
#
# # Test for colission
# if scene_top_left == scene_obstacle_color:
# has_collision = True
# elif scene_top_right == scene_obstacle_color:
# has_collision = True
# elif scene_bottom_left == scene_obstacle_color:
# has_collision = True
# elif scene_bottom_right == scene_obstacle_color:
# has_collision = True
# If there was a collision, dont move, create a new list form the old list
if has_collision:
# #DEBUG: Print shit out to see
# print 'TL: %s - %s' % (corner_top_left, scene_top_left)
# print 'TR: %s - %s' % (corner_top_right, scene_top_right)
# print 'BL: %s - %s' % (corner_bottom_left, scene_bottom_left)
# print 'BR: %s - %s' % (corner_bottom_right, scene_bottom_right)
final_pos = [pos[0], pos[1]]
# Else, there was not a collision, move the position
else:
# print 'No collision, moving: %s' % move
final_pos = target_pos
return final_pos
def GetPosScrolled(pos):
global SCROLL_OFFSET
scrolled_pos = [pos[0] - SCROLL_OFFSET[0], pos[1] - SCROLL_OFFSET[1]]
return scrolled_pos
def Draw(surface, target_surface, pos):
target_surface.blit(surface, GetPosScrolled(pos))
# Left/Right? GHETTO CODE!
guy0_move_left = False
guy1_move_left = False
guy0_jump = 0
guy1_jump = 0
guy0_fall = 1
guy1_fall = 1
while True:
if guy0_pos[0] < guy1_pos[0]:
guy0_move_left = False
move_pos = MovePosCollideWithScene(guy0_pos, [5, 0], sprite_size, scene_mask)
if move_pos == guy0_pos and guy0_jump == 0:
guy0_jump = 17
else:
guy0_pos = move_pos
elif guy0_pos[0] > guy1_pos[0]:
guy0_move_left = True
move_pos = MovePosCollideWithScene(guy0_pos, [-5, 0], sprite_size, scene_mask)
if move_pos == guy0_pos and guy0_jump == 0:
guy0_jump = 17
else:
guy0_pos = move_pos
# Fall, if you can
if guy0_jump == 0:
fall_pos = MovePosCollideWithScene(guy0_pos, [0, guy0_fall], sprite_size, scene_mask)
if fall_pos != guy0_pos:
guy0_pos = fall_pos
if guy0_fall < 10:
guy0_fall += 1
else:
guy0_fall = 1
# Event pump
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
# Player input handling
keys = pygame.key.get_pressed() #checking pressed keys
if keys[pygame.K_LEFT]:
guy1_move_left = True
guy1_pos = MovePosCollideWithScene(guy1_pos, [-5, 0], sprite_size, scene_mask)
if keys[pygame.K_RIGHT]:
guy1_move_left = False
guy1_pos = MovePosCollideWithScene(guy1_pos, [5, 0], sprite_size, scene_mask)
if keys[pygame.K_UP]:
ground_test_pos = MovePosCollideWithScene(guy1_pos, [0, 1], sprite_size, scene_mask)
if ground_test_pos == guy1_pos and guy1_jump == 0:
guy1_jump = 17
# if keys[pygame.K_DOWN]:
# guy1_pos = MovePosCollideWithScene(guy1_pos, [0, 2], sprite_size, scene_mask)
# Fall, if you can
if guy1_jump == 0:
fall_pos = MovePosCollideWithScene(guy1_pos, [0, guy1_fall], sprite_size, scene_mask)
if fall_pos != guy1_pos:
guy1_pos = fall_pos
if guy1_fall < 10:
guy1_fall += 1
else:
guy1_fall = 1
# Test for jumping (guy1)
if guy1_jump > 0:
hit_the_roof = False
for count in range(0, guy1_jump):
jump_pos = MovePosCollideWithScene(guy1_pos, [0, -1], sprite_size, scene_mask)
# If we hit a ceiling, dont immediately cancell the jump, but reduce it quickly (gives a sense of upward inertia)
if jump_pos == guy1_pos:
hit_the_roof = True
break
# Update the new position, cause we didnt hit the roof
else:
guy1_pos = jump_pos
# Reduce the jump each frame
if not hit_the_roof:
guy1_jump -= 1
else:
guy1_jump = guy1_jump / 2
if guy1_jump <= 2:
guy1_jump = 0
# Test for jumping (guy0)
if guy0_jump > 0:
hit_the_roof = False
for count in range(0, guy0_jump):
jump_pos = MovePosCollideWithScene(guy0_pos, [0, -1], sprite_size, scene_mask)
# If we hit a ceiling, dont immediately cancell the jump, but reduce it quickly (gives a sense of upward inertia)
if jump_pos == guy0_pos:
hit_the_roof = True
break
# Update the new position, cause we didnt hit the roof
else:
guy0_pos = jump_pos
# Reduce the jump each frame
if not hit_the_roof:
guy0_jump -= 1
else:
guy0_jump = guy0_jump / 2
if guy0_jump <= 2:
guy0_jump = 0
# If ESC is hit, quit
if keys[pygame.K_ESCAPE]:
sys.exit(0)
# Handle scrolling the world
# global SCROLL_OFFSET
# global SCREEN_SIZE
scrolled_screen_x = [SCROLL_OFFSET[0], SCROLL_OFFSET[0] + SCREEN_SIZE[0]]
boundary_x = int(SCREEN_SIZE[0] / 2.5)
scroll_by_pixels = 3
if guy1_pos[0] < scrolled_screen_x[0] + boundary_x:
SCROLL_OFFSET[0] -= scroll_by_pixels
if SCROLL_OFFSET[0] < 0:
SCROLL_OFFSET[0] = 0
elif guy1_pos[0] > scrolled_screen_x[1] - boundary_x:
SCROLL_OFFSET[0] += scroll_by_pixels
max_scroll_x = scene.get_width() - SCREEN_SIZE[0]
if SCROLL_OFFSET[0] >= max_scroll_x:
SCROLL_OFFSET[0] = max_scroll_x
# Render background
#background.fill((0, 0, 0))
#background.blit(scene, (0, 0))
Draw(scene, background, (0,0))
# Draw guy0 moving left or right (ghetto method)
if not guy0_move_left:
#background.blit(guy0, guy0_pos)
Draw(guy0, background, guy0_pos)
else:
#background.blit(guy0_left, guy0_pos)
Draw(guy0_left, background, guy0_pos)
# Draw guy1 moving left or right (ghetto method)
if not guy1_move_left:
#background.blit(guy1, guy1_pos)
Draw(guy1, background, guy1_pos)
else:
#background.blit(guy1_left, guy1_pos)
Draw(guy1_left, background, guy1_pos)
# Render to screen
screen.blit(background, (0,0))
pygame.display.flip()
| mit | 6,822,873,736,999,448,000 | 32.028351 | 171 | 0.647679 | false |
BigPoppaG/CourseMe | db_data.py | 1 | 14472 | #!flask/bin/python
from courseme import db
from courseme.models import Module, User, ROLE_USER, ROLE_ADMIN, Objective, Institution, Question, Subject, Topic
from datetime import datetime
maths = Subject(
name = "Mathematics"
)
biology = Subject(
name = "Biology"
)
misc = Subject(
name = "Miscelaneous"
)
db.session.add(maths)
db.session.add(biology)
db.session.add(misc)
db.session.commit()
algebra = Topic(
name = "Algebra",
time_created = datetime.utcnow(),
subject = maths
)
geometry = Topic(
name = "Geometry",
time_created = datetime.utcnow(),
subject = maths
)
number = Topic(
name = "Number",
time_created = datetime.utcnow(),
subject = maths
)
calculus = Topic(
name = "Calculus",
time_created = datetime.utcnow(),
subject = maths
)
db.session.add(algebra)
db.session.add(geometry)
db.session.add(number)
db.session.add(calculus)
db.session.commit()
user = User(email="[email protected]",
password="111111",
name="CourseMe",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_ADMIN)
db.session.add(user)
me = User(email="[email protected]",
password="111111",
name="Dan",
forename="Dan",
blurb="I built the CourseMe website and now am fabulously rich.",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_ADMIN)
db.session.add(me)
head = User(email="[email protected]",
password="111111",
name="Head of School",
blurb="I have been Headmaster at High School for five years. I'm great.",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_USER)
db.session.add(head)
db.session.commit()
courseMe = Institution.create(
name = "CourseMe",
administrator = User.main_admin_user(),
blurb = "This is the main CourseMe institution"
)
school = Institution.create(
name = "High School",
administrator = head,
blurb = "This is a great High School. We use CourseMe for everything. We have 100 pupils and they're all doing great."
)
for i in range(1, 3):
teacher = User(email="teacher" + str(i) + "@server.fake",
password="111111",
name="Mrs. Blogs " + str(i),
blurb="I have been a teacher at High School for five years. I'm great.",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_USER)
db.session.add(teacher)
school.add_member(teacher)
for i in range(1, 100):
student = User(email="student" + str(i) + "@server.fake",
password="111111",
name="Student"+str(i),
forename="Richie",
surname="Rich",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_USER)
db.session.add(student)
school.add_student(student,True)
db.session.add(school)
db.session.commit()
parent = User(email="[email protected]",
password="111111",
name="Parent",
time_registered=datetime.utcnow(),
last_seen=datetime.utcnow(),
role = ROLE_USER)
parent.students.append(student)
db.session.add(parent)
db.session.commit()
objective = Objective(name="Rationalise the denominator of fractions with surds",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Estimate powers and roots of any given positive",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Convert terminating decimals to their corresponding fractions",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Identify and work with fractions in ratio problems",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Use and interpret algebraic notation",
subject=maths,
topic=algebra,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Substitute numerical values into formulae and expressions, including scientific formulae",
subject=maths,
topic=algebra,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective2 = Objective(name="Substitute algebraic expressions into formulae and expressions, including scientific formulae",
subject=maths,
topic=algebra,
created_by_id=User.main_admin_user().id,
prerequisites=[objective]
)
db.session.add(objective2)
db.session.commit()
objective = Objective(name="Round numbers and measures to an appropriate degree of accuracy",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective2 = Objective(name="Use inequality notation to specify simple error intervals due to truncation or rounding",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id,
prerequisites=[objective]
)
db.session.add(objective2)
db.session.commit()
objective = Objective(name="Apply and interpret limits of accuracy",
subject=maths,
topic=number,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
objective = Objective(name="Rearrange formulae to change the subject",
subject=maths,
topic=algebra,
created_by_id=User.main_admin_user().id
#prerequisites=[Objective.query.get(2)]
)
db.session.add(objective)
db.session.commit()
module = Module(
name="Different sized infinities",
description = "Vi Hart Lecture from youtube",
notes = "This is just a placeholder",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=me.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube.com/embed/lA6hE7NFIK0?list=UUOGeU-1Fig3rrDjhm9Zs_wg",
objectives=[Objective.query.get(1)],
extension = True,
subject_id = maths.id
)
db.session.add(module)
module = Module(
name="Hexaflexagons",
description = "Vi Hart Lecture from youtube",
notes = "This is just a placeholder",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=me.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube.com/embed/VIVIegSt81k?rel=0",
objectives=[Objective.query.get(2)],
extension = True,
subject_id = maths.id
)
db.session.add(module)
module = Module(
name="Binary Trees",
description = "Vi Hart Lecture from youtube",
notes = "This is just a placeholder",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=me.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube.com/embed/e4MSN6IImpI?list=PLF7CBA45AEBAD18B8",
objectives=[Objective.query.get(3)],
extension = True,
visually_impaired = True,
subject_id = maths.id
)
db.session.add(module)
module = Module(
name="How I feel about Logarithms",
description = "Vi Hart Lecture from youtube",
notes = "This is just a placeholder",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=me.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube-nocookie.com/embed/N-7tcTIrers?rel=0",
objectives=[Objective.query.get(3)],
extension = True,
subject_id = maths.id
)
db.session.add(module)
module = Module(
name="Solving Linear Equations",
description = "An easy introduction to solving simple equations with one unknown",
notes = "Here are some notes about this lecture",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=teacher.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube-nocookie.com/embed/0BsoWvWXOMM?rel=0",
objectives=[Objective.query.get(3)],
extension = True,
visually_impaired = True,
subject_id = maths.id
)
db.session.add(module)
module = Module.CreateModule(
name="Solving Linear Equations",
description = "An easy introduction to solving simple equations with one unknown",
notes = "Here are some notes about this lecture",
author=head,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube-nocookie.com/embed/GmMX3-nTWbE?rel=0",
objectives=[Objective.query.get(3)],
extension = True,
visually_impaired = False,
subject = maths
)
module = Module(
name="Adding Fractions",
description = "A foolproof way to add and subtract numerical fractions",
notes = "Here are some notes about this lecture",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=teacher.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube-nocookie.com/embed/52ZlXsFJULI?rel=0",
objectives=[Objective.query.get(3)],
subject_id = maths.id
)
db.session.add(module)
module = Module(
name="Simple Trigonometry",
description = "An introduction to trigonometry functions for a right angled triangle",
notes = "Here are some notes about this lecture",
time_created=datetime.utcnow(),
last_updated=datetime.utcnow(),
author_id=teacher.id,
material_type = "Lecture",
material_source="youtube",
material_path="//www.youtube-nocookie.com/embed/F21S9Wpi0y8?rel=0",
objectives=[Objective.query.get(3)],
subject_id = maths.id
)
db.session.add(module)
db.session.commit()
courseMe.add_member(me)
school.add_member(teacher)
q = Question.CreateQuestion(
question =
r'''
Finally, while display equations look good for a page of samples, the ability to mix math and text in a paragraph is also important. This expression \(\sqrt{3x-1}+(1+x)^2\) is an example of an inline equation. As you see, MathJax equations can be used this way as well, without unduly disturbing the spacing between lines.
''',
answer = "",
extension = False,
visually_impaired = False,
author = User.main_admin_user(),
objectives=[Objective.query.get(3)],
subject = maths
)
q = Question.CreateQuestion(
question =
r'''
\begin{aligned}
\nabla \times \vec{\mathbf{B}} -\, \frac1c\, \frac{\partial\vec{\mathbf{E}}}{\partial t} & = \frac{4\pi}{c}\vec{\mathbf{j}} \\ \nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\
\nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\
\nabla \cdot \vec{\mathbf{B}} & = 0 \end{aligned}
''',
answer = "",
extension = False,
visually_impaired = True,
author = User.main_admin_user(),
objectives=[Objective.query.get(2)],
subject = maths
)
q = Question.CreateQuestion(
question =
r'''
\[ \frac{1}{\Bigl(\sqrt{\phi \sqrt{5}}-\phi\Bigr) e^{\frac25 \pi}} =
1+\frac{e^{-2\pi}} {1+\frac{e^{-4\pi}} {1+\frac{e^{-6\pi}}
{1+\frac{e^{-8\pi}} {1+\ldots} } } } \]
''',
answer = "",
extension = False,
visually_impaired = False,
author = User.main_admin_user(),
objectives=[Objective.query.get(1)],
subject = maths
)
q = Question.CreateQuestion(
question =
r'''
\[\mathbf{V}_1 \times \mathbf{V}_2 = \begin{vmatrix}
\mathbf{i} & \mathbf{j} & \mathbf{k} \\
\frac{\partial X}{\partial u} & \frac{\partial Y}{\partial u} & 0 \\
\frac{\partial X}{\partial v} & \frac{\partial Y}{\partial v} & 0
\end{vmatrix} \]
''',
answer = "",
extension = False,
visually_impaired = False,
author = User.main_admin_user(),
objectives=[Objective.query.get(4)],
subject = maths
)
q = Question.CreateQuestion(
question =
r'''
\[ 1 + \frac{q^2}{(1-q)}+\frac{q^6}{(1-q)(1-q^2)}+\cdots =
\prod_{j=0}^{\infty}\frac{1}{(1-q^{5j+2})(1-q^{5j+3})},
\quad\quad \text{for $|q|<1$}. \]
''',
answer = "",
extension = True,
visually_impaired = True,
author = User.main_admin_user(),
objectives=[Objective.query.get(3)],
subject = maths
)
q = Question.CreateQuestion(
question =
r'''
This is a question:
\[ 1 + \frac{q^2}{(1-q)}+\frac{q^6}{(1-q)(1-q^2)}+\cdots =
\prod_{j=0}^{\infty}\frac{1}{(1-q^{5j+2})(1-q^{5j+3})},
\quad\quad \text{for $|q|<1$}. \]
And here is the answer \(\sqrt{3x-1}+(1+x)^2\).
Isn't it great.
''',
answer = "",
extension = True,
visually_impaired = False,
author = User.main_admin_user(),
objectives=[Objective.query.get(3), Objective.query.get(2)],
subject = maths
)
| mit | 2,412,441,968,369,456,000 | 29.087318 | 323 | 0.603234 | false |
eshijia/magnum | magnum/db/sqlalchemy/api.py | 1 | 39042 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from magnum.common import exception
from magnum.common import utils
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif utils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_bays_filters(self, query, filters):
if filters is None:
filters = []
if 'baymodel_id' in filters:
query = query.filter_by(baymodel_id=filters['baymodel_id'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'node_count' in filters:
query = query.filter_by(node_count=filters['node_count'])
if 'master_count' in filters:
query = query.filter_by(master_count=filters['master_count'])
if 'stack_id' in filters:
query = query.filter_by(stack_id=filters['stack_id'])
if 'api_address' in filters:
query = query.filter_by(api_address=filters['api_address'])
if 'node_addresses' in filters:
query = query.filter_by(node_addresses=filters['node_addresses'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'status' in filters:
query = query.filter(models.Bay.status.in_(filters['status']))
return query
def get_bay_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = self._add_bays_filters(query, filters)
return _paginate_query(models.Bay, limit, marker,
sort_key, sort_dir, query)
def create_bay(self, values):
# ensure defaults are present for new bays
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
bay = models.Bay()
bay.update(values)
try:
bay.save()
except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid'])
return bay
def get_bay_by_id(self, context, bay_id):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=bay_id)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=bay_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.')
except NoResultFound:
raise exception.BayNotFound(bay=bay_name)
def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_bay(self, bay_id):
def destroy_bay_resources(session, bay_uuid):
"""Checks whether the bay does not have resources."""
query = model_query(models.Pod, session=session)
query = self._add_pods_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Service, session=session)
query = self._add_services_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.ReplicationController, session=session)
query = self._add_rcs_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Container, session=session)
query = self._add_containers_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
bay_ref = query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
destroy_bay_resources(session, bay_ref['uuid'])
query.delete()
def update_bay(self, bay_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Bay.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_bay(bay_id, values)
def _do_update_bay(self, bay_id, values):
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def create_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is not None:
return lock.conductor_id
session.add(models.BayLock(bay_uuid=bay_uuid,
conductor_id=conductor_id))
def steal_bay_lock(self, bay_uuid, old_conductor_id, new_conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is None:
return True
elif lock.conductor_id != old_conductor_id:
return lock.conductor_id
else:
lock.update({'conductor_id': new_conductor_id})
def release_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
query = query.filter_by(bay_uuid=bay_uuid,
conductor_id=conductor_id)
count = query.delete()
if count == 0:
return True
def _add_baymodels_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'flavor_id' in filters:
query = query.filter_by(flavor_id=filters['flavor_id'])
if 'master_flavor_id' in filters:
query = query.filter_by(
master_flavor_id=filters['master_flavor_id'])
if 'keypair_id' in filters:
query = query.filter_by(keypair_id=filters['keypair_id'])
if 'external_network_id' in filters:
query = query.filter_by(
external_network_id=filters['external_network_id'])
if 'dns_nameserver' in filters:
query = query.filter_by(dns_nameserver=filters['dns_nameserver'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'labels' in filters:
query = query.filter_by(labels=filters['labels'])
return query
def get_baymodel_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = self._add_baymodels_filters(query, filters)
return _paginate_query(models.BayModel, limit, marker,
sort_key, sort_dir, query)
def create_baymodel(self, values):
# ensure defaults are present for new baymodels
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
baymodel = models.BayModel()
baymodel.update(values)
try:
baymodel.save()
except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid'])
return baymodel
def get_baymodel_by_id(self, context, baymodel_id):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=baymodel_id)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=baymodel_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=baymodel_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.')
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name)
def destroy_baymodel(self, baymodel_id):
def is_baymodel_referenced(session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s)."""
query = model_query(models.Bay, session=session)
query = self._add_bays_filters(query,
{'baymodel_id': baymodel_uuid})
return query.count() != 0
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
baymodel_ref = query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id)
query.delete()
def update_baymodel(self, baymodel_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing BayModel.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_baymodel(baymodel_id, values)
def _do_update_baymodel(self, baymodel_id, values):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
ref.update(values)
return ref
def _add_containers_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image' in filters:
query = query.filter_by(image=filters['image'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_container_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = self._add_containers_filters(query, filters)
return _paginate_query(models.Container, limit, marker,
sort_key, sort_dir, query)
def create_container(self, values):
# ensure defaults are present for new containers
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
container = models.Container()
container.update(values)
try:
container.save()
except db_exc.DBDuplicateEntry:
raise exception.ContainerAlreadyExists(uuid=values['uuid'])
return container
def get_container_by_id(self, context, container_id):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=container_id)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
def get_container_by_uuid(self, context, container_uuid):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=container_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_uuid)
def get_container_by_name(self, context, container_name):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=container_name)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_name)
except MultipleResultsFound:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
def destroy_container(self, container_id):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
count = query.delete()
if count != 1:
raise exception.ContainerNotFound(container_id)
def update_container(self, container_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_container(container_id, values)
def _do_update_container(self, container_id, values):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_nodes_filters(self, query, filters):
if filters is None:
filters = []
if 'associated' in filters:
if filters['associated']:
query = query.filter(models.Node.ironic_node_id != None)
else:
query = query.filter(models.Node.ironic_node_id == None)
if 'type' in filters:
query = query.filter_by(type=filters['type'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_node_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
def create_node(self, values):
# ensure defaults are present for new nodes
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
node = models.Node()
node.update(values)
try:
node.save()
except db_exc.DBDuplicateEntry as exc:
if 'ironic_node_id' in exc.columns:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=values['uuid'])
raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
def get_node_by_id(self, context, node_id):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=node_id)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, context, node_uuid):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=node_uuid)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def destroy_node(self, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
count = query.delete()
if count != 1:
raise exception.NodeNotFound(node_id)
def update_node(self, node_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Node.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_node(node_id, values)
except db_exc.DBDuplicateEntry:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=node_id)
def _do_update_node(self, node_id, values):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Prevent ironic_node_id overwriting
if values.get("ironic_node_id") and ref.ironic_node_id:
raise exception.NodeAssociated(
node=node_id,
instance=ref.ironic_node_id)
ref.update(values)
return ref
def _add_pods_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
return query
def get_pod_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = self._add_pods_filters(query, filters)
return _paginate_query(models.Pod, limit, marker,
sort_key, sort_dir, query)
def create_pod(self, values):
# ensure defaults are present for new pods
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
pod = models.Pod()
pod.update(values)
try:
pod.save()
except db_exc.DBDuplicateEntry:
raise exception.PodAlreadyExists(uuid=values['uuid'])
return pod
def get_pod_by_id(self, context, pod_id):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=pod_id)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
def get_pod_by_uuid(self, context, pod_uuid):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=pod_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_uuid)
def get_pod_by_name(self, pod_name):
query = model_query(models.Pod).filter_by(name=pod_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple pods exist with same name.'
' Please use the pod uuid instead.')
except NoResultFound:
raise exception.PodNotFound(pod=pod_name)
def destroy_pod(self, pod_id):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
count = query.delete()
if count != 1:
raise exception.PodNotFound(pod_id)
def update_pod(self, pod_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Pod.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_pod(pod_id, values)
def _do_update_pod(self, pod_id, values):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_services_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'ip' in filters:
query = query.filter_by(ip=filters['ip'])
if 'ports' in filters:
query = query.filter_by(ports=filters['ports'])
return query
def get_service_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = self._add_services_filters(query, filters)
return _paginate_query(models.Service, limit, marker,
sort_key, sort_dir, query)
def create_service(self, values):
# ensure defaults are present for new services
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
service = models.Service()
service.update(values)
try:
service.save()
except db_exc.DBDuplicateEntry:
raise exception.ServiceAlreadyExists(uuid=values['uuid'])
return service
def get_service_by_id(self, context, service_id):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=service_id)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
def get_service_by_uuid(self, context, service_uuid):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=service_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_uuid)
def get_services_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.Service).filter_by(bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ServiceNotFound(bay=bay_uuid)
def get_service_by_name(self, context, service_name):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=service_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple services exist with same name.'
' Please use the service uuid instead.')
except NoResultFound:
raise exception.ServiceNotFound(service=service_name)
def destroy_service(self, service_id):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
count = query.delete()
if count != 1:
raise exception.ServiceNotFound(service_id)
def update_service(self, service_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Service.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_service(service_id, values)
def _do_update_service(self, service_id, values):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_rcs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'replicas' in filters:
query = query.filter_by(replicas=filters['replicas'])
return query
def get_rc_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = self._add_rcs_filters(query, filters)
return _paginate_query(models.ReplicationController, limit, marker,
sort_key, sort_dir, query)
def create_rc(self, values):
# ensure defaults are present for new ReplicationController
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
rc = models.ReplicationController()
rc.update(values)
try:
rc.save()
except db_exc.DBDuplicateEntry:
raise exception.ReplicationControllerAlreadyExists(
uuid=values['uuid'])
return rc
def get_rc_by_id(self, context, rc_id):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=rc_id)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
def get_rc_by_uuid(self, context, rc_uuid):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=rc_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_uuid)
def get_rcs_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.ReplicationController).filter_by(
bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ReplicationControllerNotFound(bay=bay_uuid)
def get_rc_by_name(self, context, rc_name):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=rc_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple rcs exist with same name.'
' Please use the rc uuid instead.')
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_name)
def destroy_rc(self, rc_id):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
count = query.delete()
if count != 1:
raise exception.ReplicationControllerNotFound(rc_id)
def update_rc(self, rc_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing rc.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_rc(rc_id, values)
def _do_update_rc(self, rc_id, values):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_name(self, context, x509keypair_name):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=x509keypair_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple x509keypairs exist with '
'same name. Please use the x509keypair '
'uuid instead.')
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_name)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
| apache-2.0 | 5,895,803,285,767,630,000 | 36.978599 | 79 | 0.596102 | false |
twitter/zktraffic | zktraffic/network/sniffer.py | 1 | 4062 | # ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
from collections import defaultdict
from threading import Thread
import hexdump
import logging
import os
import signal
import socket
import struct
import sys
from zktraffic.base.network import BadPacket, get_ip, get_ip_packet, SnifferBase
from scapy.sendrecv import sniff
from scapy.config import conf as scapy_conf
from six.moves import intern
scapy_conf.logLevel = logging.ERROR # shush scappy
MAX_PACKET_SIZE = 65535
class Sniffer(SnifferBase):
"""
A generic & simple packet sniffer
"""
class RegistrationError(Exception): pass
def __init__(self, iface, port, msg_cls, handler=None, dump_bad_packet=False, start=True):
super(Sniffer, self).__init__()
self.setDaemon(True)
self._msg_cls = msg_cls
self._iface = iface
self._port = port
self._packet_size = MAX_PACKET_SIZE
self._handlers = []
self._dump_bad_packet = dump_bad_packet
self._is_loopback = iface in ["lo", "lo0"]
if handler is not None:
self.add_handler(handler)
if start: # pragma: no cover
self.start()
def add_handler(self, handler):
if handler is None:
raise self.RegistrationError("handler is none")
if handler in self._handlers:
raise self.RegistrationError("handler %s has already been added" % handler)
self._handlers.append(handler)
def run(self, *args, **kwargs):
pfilter = "port %d" % self._port
try:
sniff_kwargs = {"filter": pfilter, "store": 0, "prn": self.handle_packet}
if self._iface != "any":
sniff_kwargs["iface"] = self._iface
if "offline" in kwargs:
sniff_kwargs["offline"] = kwargs["offline"]
sniff(**sniff_kwargs)
except socket.error as ex:
sys.stderr.write("Error: %s, device: %s\n" % (ex, self._iface))
finally:
if "offline" not in kwargs:
os.kill(os.getpid(), signal.SIGINT)
def handle_packet(self, packet):
try:
message = self.message_from_packet(packet)
if message:
self.handle_message(message)
except (BadPacket, struct.error) as ex:
if self._dump_bad_packet:
print("got: %s" % str(ex))
hexdump.hexdump(packet.load)
sys.stdout.flush()
except Exception as ex:
print("got: %s" % str(ex))
hexdump.hexdump(packet.load)
sys.stdout.flush()
def handle_message(self, message):
for h in self._handlers:
h(message)
def message_from_packet(self, packet):
"""
:returns: Returns an instance of Message
:raises:
:exc:`BadPacket` if the packet is of an unknown type
:exc:`DeserializationError` if deserialization failed
:exc:`struct.error` if deserialization failed
"""
ip_p = get_ip_packet(packet.load, 0, self._port, self._is_loopback)
if 0 == len(ip_p.data.data):
return None
if ip_p.data.sport != self._port and ip_p.data.dport != self._port:
raise BadPacket("Wrong port")
return self._msg_cls.from_payload(
ip_p.data.data,
intern("%s:%s" % (get_ip(ip_p, ip_p.src), ip_p.data.sport)),
intern("%s:%s" % (get_ip(ip_p, ip_p.dst), ip_p.data.dport)),
packet.time
)
| apache-2.0 | 3,702,111,362,593,863,000 | 30.007634 | 100 | 0.612752 | false |
astrikov-d/dartcms | dartcms/apps/shop/models.py | 1 | 2156 | # coding: utf-8
from dartcms.utils.loading import is_model_registered
from django.db.models.signals import post_save, pre_delete, pre_save
from .abstract_models import *
from .signals import *
__all__ = []
if not is_model_registered('shop', 'ProductCatalog'):
class ProductCatalog(AbstractProductCatalog):
pass
__all__.append('ProductCatalog')
if is_model_registered('shop', 'ProductSection'):
section_model = get_model('shop', 'ProductSection')
else:
class ProductSection(AbstractProductSection):
pass
__all__.append('ProductSection')
section_model = ProductSection
if not is_model_registered('shop', 'ProductLabel'):
class ProductLabel(AbstractProductLabel):
pass
__all__.append('ProductLabel')
if not is_model_registered('shop', 'ProductManufacturer'):
class ProductManufacturer(AbstractProductManufacturer):
pass
__all__.append('ProductManufacturer')
if not is_model_registered('shop', 'Product'):
class Product(AbstractProduct):
pass
__all__.append('Product')
if not is_model_registered('shop', 'ProductImage'):
class ProductImage(AbstractProductImage):
pass
__all__.append('ProductImage')
if not is_model_registered('shop', 'OrderStatus'):
class OrderStatus(AbstractOrderStatus):
pass
__all__.append('OrderStatus')
if not is_model_registered('shop', 'OrderPaymentType'):
class OrderPaymentType(AbstractOrderPaymentType):
pass
__all__.append('OrderPaymentType')
if not is_model_registered('shop', 'OrderShippingType'):
class OrderShippingType(AbstractOrderShippingType):
pass
__all__.append('OrderShippingType')
if not is_model_registered('shop', 'Order'):
class Order(AbstractOrder):
pass
__all__.append('Order')
if not is_model_registered('shop', 'OrderDetail'):
class OrderDetail(AbstractOrderDetail):
pass
__all__.append('OrderDetail')
pre_save.connect(pre_save_handler_section, sender=section_model)
post_save.connect(post_save_handler_section, sender=section_model)
pre_delete.connect(pre_delete_handler_section, sender=section_model)
| mit | -5,669,258,377,814,378,000 | 25.292683 | 68 | 0.699907 | false |
er432/TASSELpy | TASSELpy/utils/helper.py | 1 | 7197 | import re
import javabridge
## Dictionary holding the symbols for primatives in javabridge signatures
primative_dict = {'boolean':'Z','byte':'B','char':'C','short':'S',
'int':'I','long':'J','float':'F','double':'D',
'void':'V'}
## Constructs the signature needed by javabridge for a function
# @param args A list of argument types for the function. If an array is specified, the
# class name should end with []. The begninning L and ; will be added
# if not already present. Otherwise, just put in the primative name (i.e. int, char, etc)
# @param return_type The return type for the function, specified in the same way as the arguments
# @return The signature for the java method
def make_sig(args,return_type):
"""
Constructs the signature needed by javabridge for a function
Arguments:
args -- A list of argument types for the function. If an array is specified, the class name
should end with []. The beginning L and end ; will be added if not already present.
Otherwise, just put in the primative name (i.e. int char, etc)
return_type -- The return type of the function, specified in the same way as the arguments
Returns:
The signature for the java method
"""
# Run arguments through
for i,arg in enumerate(args):
array = False
n_array = 0
if arg in primative_dict or (re.match(r'[a-z]+(?=\[)',arg) and \
re.match(r'[a-z]+(?=\[)',arg).group() in primative_dict):
# If this is a primative
if arg.endswith('[]'):
# If this should be an array
array = True
while arg.endswith('[]'):
arg = arg[:-2]
n_array += 1
# Turn into signature form
if array:
args[i] = "%s%s" % ('['*n_array,primative_dict[arg])
else:
args[i] = primative_dict[arg]
elif '/' in arg:
# If this is a class name
if arg.endswith('[]'):
# If this should be an array
array = True
while args[i].endswith('[]'):
args[i] = args[i][:-2]
n_array += 1
if not args[i].startswith('L'):
# Check that it begins with L
args[i] = "L%s" % args[i]
if not args[i].endswith(';'):
# Check that it ends with a semicolon
args[i] = "%s;" % args[i]
if array:
# Put in array if necessary
args[i] = "%s%s" % ('['*n_array,args[i])
else:
raise ValueError("%s is not a valid type!" % arg)
# Run return type through
array = False
n_array = 0
if return_type in primative_dict or (re.match(r'[a-z]+(?=\[)',return_type) and \
re.match(r'[a-z]+(?=\[)',return_type).group() in primative_dict):
# If this is a primative
if return_type.endswith('[]'):
# If this should be an array
array = True
while return_type.endswith('[]'):
return_type = return_type[:-2]
n_array += 1
# Turn into signature form
if array:
return_type = "%s%s" % ('['*n_array,primative_dict[return_type])
else:
return_type = primative_dict[return_type]
elif '/' in return_type:
# If this is a class name
if return_type.endswith('[]'):
# If this should be an array
array = True
while return_type.endswith('[]'):
return_type = return_type[:-2]
n_array += 1
if not return_type.startswith('L'):
# Check that it begins with L
return_type = "L%s" % return_type
if not return_type.endswith(';'):
# Check that it ends with a semicolon
return_type = "%s;" % return_type
if array:
# Put in array if necessary
return_type = "%s%s" % ('['*n_array,return_type)
else:
raise ValueError("%s is not a valid type!" % return_type)
## Return the signature
return "(%s)%s" % (''.join(args),return_type)
## Constructs the signature needed by javabridge for a constructor function
# @param args A list of argument types for the function. If an array is specified, the
# class name should end with []. The begninning L and ; will be added
# if not already present. Otherwise, just put in the primative name (i.e. int, char, etc)
# @return The signature for the java constructor
def make_constructor_sig(args):
"""
Constructs the signature needed by javabridge for a function
Arguments:
args -- A list of argument types for the function. If an array is specified, the class name
should end with []. The beginning L and end ; will be added if not already present.
Otherwise, just put in the primative name (i.e. int char, etc)
Returns:
The signature for the java constructor
"""
# Run arguments through
for i,arg in enumerate(args):
array = False
if arg in primative_dict or arg[:-2] in primative_dict:
# If this is a primative
if arg.endswith('[]'):
# If this should be an array
arg = arg[:-2]
array = True
# Turn into signature form
if array:
args[i] = "[%s" % primative_dict[arg]
else:
args[i] = primative_dict[arg]
elif '/' in arg:
# If this is a class name
if arg.endswith('[]'):
# If this should be an array
args[i] = arg[:-2]
array = True
if not args[i].startswith('L'):
# Check that it begins with L
args[i] = "L%s" % args[i]
if not args[i].endswith(';'):
# Check that it ends with a semicolon
args[i] = "%s;" % args[i]
if array:
# Put in array if necessary
args[i] = "[%s" % args[i]
else:
raise ValueError("%s is not a valid type!" % arg)
## Return the signature
return "(%s)V" % (''.join(args))
## Function to use in case a value returned from javabridge is a python
# primative that needs to be sent back to a java object before further processing
# @param prim_val The python primative value
# @return A wrapped java object of the appropriate type (String, Integer, or Double)
def send_to_java(prim_val):
if isinstance(prim_val, str) or isinstance(prim_val,unicode):
return javabridge.make_instance("java/lang/String",
"(Ljava/lang/String;)V",prim_val)
elif isinstance(prim_val, int):
return javabridge.make_instance("java/lang/Integer",
"(Ljava/lang/Integer;)V",prim_val)
elif isinstance(prim_val, float):
return javabridge.make_instance("java/lang/Double",
"(Ljava/lang/Double;)V",prim_val)
else:
return prim_val
| bsd-3-clause | -37,858,812,142,794,400 | 40.362069 | 98 | 0.545227 | false |
plamut/superdesk-core | superdesk/errors.py | 1 | 18386 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from flask import current_app as app
from eve.validation import ValidationError
logger = logging.getLogger(__name__)
notifiers = []
def add_notifier(notifier):
if notifier not in notifiers:
notifiers.append(notifier)
def update_notifiers(*args, **kwargs):
for notifier in notifiers:
notifier(*args, **kwargs)
def get_registered_errors(self):
return {
'IngestApiError': IngestApiError._codes,
'IngestFtpError': IngestFtpError._codes,
'IngestFileError': IngestFileError._codes
}
class SuperdeskError(ValidationError):
_codes = {}
system_exception = None
def __init__(self, code, desc=None):
"""
:param int code: numeric error code
:param desc: optional detailed error description, defaults to None
"""
self.code = code
self.message = self._codes.get(code, 'Unknown error')
self.desc = desc
def __str__(self):
desc_text = '' if not self.desc else (' Details: ' + self.desc)
return "{} Error {} - {}{desc}".format(
self.__class__.__name__,
self.code,
self.message,
desc=desc_text
)
def get_error_description(self):
return self.code, self._codes[self.code]
class SuperdeskApiError(SuperdeskError):
"""Base class for superdesk API."""
# default error status code
status_code = 400
def __init__(self, message=None, status_code=None, payload=None):
"""
:param message: a human readable error description
:param status_code: response status code
:param payload: a dict with request issues
"""
Exception.__init__(self)
self.message = message
if status_code:
self.status_code = status_code
if payload:
self.payload = payload
logger.error("HTTP Exception {} has been raised: {}".format(status_code, message))
def to_dict(self):
"""Create dict for json response."""
rv = {}
rv[app.config['STATUS']] = app.config['STATUS_ERR']
rv['_message'] = self.message or ''
if hasattr(self, 'payload'):
rv[app.config['ISSUES']] = self.payload
return rv
def __str__(self):
return "{}: {}".format(repr(self.status_code), self.message)
@classmethod
def badRequestError(cls, message=None, payload=None):
return SuperdeskApiError(status_code=400, message=message, payload=payload)
@classmethod
def unauthorizedError(cls, message=None, payload={'auth': 1}):
return SuperdeskApiError(status_code=401, message=message, payload=payload)
@classmethod
def forbiddenError(cls, message=None, payload=None):
return SuperdeskApiError(status_code=403, message=message, payload=payload)
@classmethod
def notFoundError(cls, message=None, payload=None):
return SuperdeskApiError(status_code=404, message=message, payload=payload)
@classmethod
def preconditionFailedError(cls, message=None, payload=None):
return SuperdeskApiError(status_code=412, message=message, payload=payload)
@classmethod
def internalError(cls, message=None, payload=None):
return SuperdeskApiError(status_code=500, message=message, payload=payload)
class IdentifierGenerationError(SuperdeskApiError):
"""Exception raised if failed to generate unique_id."""
status_code = 500
payload = {'unique_id': 1}
message = "Failed to generate unique_id"
class InvalidFileType(SuperdeskError):
"""Exception raised when receiving a file type that is not supported."""
def __init__(self, type=None):
super().__init__('Invalid file type %s' % type, payload={})
class BulkIndexError(SuperdeskError):
"""Exception raised when bulk index operation fails.."""
def __init__(self, resource=None, errors=None):
super().__init__('Failed to bulk index resource {} errors: {}'.format(resource, errors), payload={})
class PrivilegeNameError(Exception):
pass
class InvalidStateTransitionError(SuperdeskApiError):
"""Exception raised if workflow transition is invalid."""
def __init__(self, message='Workflow transition is invalid.', status_code=412):
super().__init__(message, status_code)
class SuperdeskIngestError(SuperdeskError):
def __init__(self, code, exception, provider=None):
super().__init__(code)
self.system_exception = exception
provider = provider or {}
self.provider_name = provider.get('name', 'Unknown provider') if provider else 'Unknown provider'
if exception:
if provider.get('notifications', {}).get('on_error', True):
exception_msg = str(exception)[-200:]
update_notifiers('error',
'Error [%s] on ingest provider {{name}}: %s' % (code, exception_msg),
resource='ingest_providers' if provider else None,
name=self.provider_name,
provider_id=provider.get('_id', ''))
if provider:
logger.error("{}: {} on channel {}".format(self, exception, self.provider_name))
else:
logger.error("{}: {}".format(self, exception))
class ProviderError(SuperdeskIngestError):
_codes = {
2001: 'Provider could not be saved',
2002: 'Expired content could not be removed',
2003: 'Rule could not be applied',
2004: 'Ingest error',
2005: 'Anpa category error',
2006: 'Expired content could not be filtered',
2007: 'IPTC processing error',
2008: 'External source no suitable resolution found'
}
@classmethod
def providerAddError(cls, exception=None, provider=None):
return ProviderError(2001, exception, provider)
@classmethod
def expiredContentError(cls, exception=None, provider=None):
return ProviderError(2002, exception, provider)
@classmethod
def ruleError(cls, exception=None, provider=None):
return ProviderError(2003, exception, provider)
@classmethod
def ingestError(cls, exception=None, provider=None):
return ProviderError(2004, exception, provider)
@classmethod
def anpaError(cls, exception=None, provider=None):
return ProviderError(2005, exception, provider)
@classmethod
def providerFilterExpiredContentError(cls, exception=None, provider=None):
return ProviderError(2006, exception, provider)
@classmethod
def iptcError(cls, exception=None, provider=None):
return ProviderError(2007, exception, provider)
@classmethod
def externalProviderError(cls, exception=None, provider=None):
return ProviderError(2008, exception, provider)
class ParserError(SuperdeskIngestError):
_codes = {
1001: 'Message could not be parsed',
1002: 'Ingest file could not be parsed',
1003: 'ANPA file could not be parsed',
1004: 'NewsML1 input could not be processed',
1005: 'NewsML2 input could not be processed',
1006: 'NITF input could not be processed',
1007: 'WENN input could not be processed',
1008: 'ZCZC input could not be processed',
1009: 'IPTC7901 input could not be processed'
}
@classmethod
def parseMessageError(cls, exception=None, provider=None):
return ParserError(1001, exception, provider)
@classmethod
def parseFileError(cls, source=None, filename=None, exception=None, provider=None):
if source and filename:
logger.exception("Source Type: {} - File: {} could not be processed".format(source, filename))
return ParserError(1002, exception, provider)
@classmethod
def anpaParseFileError(cls, filename=None, exception=None):
if filename:
logger.exception("File: {} could not be processed".format(filename))
return ParserError(1003, exception)
@classmethod
def newsmlOneParserError(cls, exception=None, provider=None):
return ParserError(1004, exception, provider)
@classmethod
def newsmlTwoParserError(cls, exception=None, provider=None):
return ParserError(1005, exception, provider)
@classmethod
def nitfParserError(cls, exception=None, provider=None):
return ParserError(1006, exception, provider)
@classmethod
def wennParserError(cls, exception=None, provider=None):
return ParserError(1007, exception, provider)
@classmethod
def ZCZCParserError(cls, exception=None, provider=None):
return ParserError(1008, exception, provider)
@classmethod
def IPTC7901ParserError(cls, exception=None, provider=None):
return ParserError(1009, exception, provider)
class IngestFileError(SuperdeskIngestError):
_codes = {
3001: 'Destination folder could not be created',
3002: 'Ingest file could not be copied'
}
@classmethod
def folderCreateError(cls, exception=None, provider=None):
return IngestFileError(3001, exception, provider)
@classmethod
def fileMoveError(cls, exception=None, provider=None):
return IngestFileError(3002, exception, provider)
class IngestApiError(SuperdeskIngestError):
_codes = {
4000: "Unknown API ingest error",
4001: "API ingest connection has timed out.",
4002: "API ingest has too many redirects",
4003: "API ingest has request error",
4004: "API ingest Unicode Encode Error",
4005: 'API ingest xml parse error',
4006: 'API service not found(404) error',
4007: 'API authorization error',
}
@classmethod
def apiGeneralError(cls, exception=None, provider=None):
return cls(4000, exception, provider)
@classmethod
def apiTimeoutError(cls, exception=None, provider=None):
return cls(4001, exception, provider)
@classmethod
def apiRedirectError(cls, exception=None, provider=None):
return cls(4002, exception, provider)
@classmethod
def apiRequestError(cls, exception=None, provider=None):
return cls(4003, exception, provider)
@classmethod
def apiUnicodeError(cls, exception=None, provider=None):
return cls(4004, exception, provider)
@classmethod
def apiParseError(cls, exception=None, provider=None):
return cls(4005, exception, provider)
@classmethod
def apiNotFoundError(cls, exception=None, provider=None):
return cls(4006, exception, provider)
@classmethod
def apiAuthError(cls, exception=None, provider=None):
return cls(4007, exception, provider)
class IngestFtpError(SuperdeskIngestError):
_codes = {
5000: "FTP ingest error",
5001: "FTP parser could not be found"
}
@classmethod
def ftpError(cls, exception=None, provider=None):
return IngestFtpError(5000, exception, provider)
@classmethod
def ftpUnknownParserError(cls, exception=None, provider=None, filename=None):
if provider:
logger.exception("Provider: {} - File: {} unknown file format. "
"Parser couldn't be found.".format(provider.get('name', 'Unknown provider'), filename))
return IngestFtpError(5001, exception, provider)
class IngestEmailError(SuperdeskIngestError):
_codes = {
6000: "Email authentication failure",
6001: "Email parse error",
6002: "Email ingest error"
}
@classmethod
def emailLoginError(cls, exception=None, provider=None):
return IngestEmailError(6000, exception, provider)
@classmethod
def emailParseError(cls, exception=None, provider=None):
return IngestEmailError(6001, exception, provider)
@classmethod
def emailError(cls, exception=None, provider=None):
return IngestEmailError(6002, exception, provider)
class SuperdeskPublishError(SuperdeskError):
def __init__(self, code, exception, destination=None):
super().__init__(code)
self.system_exception = exception
destination = destination or {}
self.destination_name = destination.get('name', 'Unknown destination') if destination else 'Unknown destination'
if exception:
exception_msg = str(exception)[-200:]
update_notifiers('error',
'Error [%s] on a Subscriber''s destination {{name}}: %s' % (code, exception_msg),
resource='subscribers' if destination else None,
name=self.destination_name,
provider_id=destination.get('_id', ''))
if destination:
logger.error("{}: {} on destination {}".format(self, exception, self.destination_name))
else:
logger.error("{}: {}".format(self, exception))
class FormatterError(SuperdeskPublishError):
_codes = {
7001: 'Article couldn"t be converted to NITF format',
7002: 'Article couldn"t be converted to AAP IPNews format',
7003: 'Article couldn"t be converted to ANPA',
7004: 'Article couldn"t be converted to NinJS',
7005: 'Article couldn"t be converted to NewsML 1.2 format',
7006: 'Article couldn"t be converted to NewsML G2 format',
7008: 'Article couldn"t be converted to AAP SMS format'
}
@classmethod
def nitfFormatterError(cls, exception=None, destination=None):
return FormatterError(7001, exception, destination)
@classmethod
def AAPIpNewsFormatterError(clscls, exception=None, destination=None):
return FormatterError(7002, exception, destination)
@classmethod
def AnpaFormatterError(cls, exception=None, destination=None):
return FormatterError(7003, exception, destination)
@classmethod
def ninjsFormatterError(cls, exception=None, destination=None):
return FormatterError(7004, exception, destination)
@classmethod
def newml12FormatterError(cls, exception=None, destination=None):
return FormatterError(7005, exception, destination)
@classmethod
def newmsmlG2FormatterError(cls, exception=None, destination=None):
return FormatterError(7006, exception, destination)
@classmethod
def bulletinBuilderFormatterError(cls, exception=None, destination=None):
return FormatterError(7007, exception, destination)
@classmethod
def AAPSMSFormatterError(cls, exception=None, destination=None):
return FormatterError(7008, exception, destination)
class SubscriberError(SuperdeskPublishError):
_codes = {
8001: 'Subscriber is closed'
}
@classmethod
def subscriber_inactive_error(cls, exception=None, destination=None):
return FormatterError(8001, exception, destination)
class PublishQueueError(SuperdeskPublishError):
_codes = {
9001: 'Item could not be updated in the queue',
9002: 'Item format could not be recognized',
9004: 'Schedule information could not be processed',
9005: 'State of the content item could not be updated',
9007: 'Previous take is either not published or killed',
9008: 'A post-publish action has happened on item',
9009: 'Item could not be queued'
}
@classmethod
def item_update_error(cls, exception=None, destination=None):
return PublishQueueError(9001, exception, destination)
@classmethod
def unknown_format_error(cls, exception=None, destination=None):
return PublishQueueError(9002, exception, destination)
@classmethod
def bad_schedule_error(cls, exception=None, destination=None):
return PublishQueueError(9004, exception, destination)
@classmethod
def content_update_error(cls, exception=None, destination=None):
return PublishQueueError(9005, exception, destination)
@classmethod
def previous_take_not_published_error(cls, exception=None, destination=None):
return PublishQueueError(9007, exception, destination)
@classmethod
def post_publish_exists_error(cls, exception=None, destination=None):
return PublishQueueError(9008, exception, destination)
@classmethod
def item_not_queued_error(cls, exception=None, destination=None):
return PublishQueueError(9009, exception, destination)
@classmethod
def article_not_found_error(cls, exception=None, destination=None):
return PublishQueueError(9010, exception, destination)
class PublishFtpError(SuperdeskPublishError):
_codes = {
10000: "FTP publish error"
}
@classmethod
def ftpError(cls, exception=None, destination=None):
return PublishFtpError(10000, exception, destination)
class PublishEmailError(SuperdeskPublishError):
_codes = {
11000: "Email publish error",
11001: "Recipient could not be found for destination"
}
@classmethod
def emailError(cls, exception=None, destination=None):
return PublishEmailError(11000, exception, destination)
@classmethod
def recipientNotFoundError(cls, exception=None, destination=None):
return PublishEmailError(11001, exception, destination)
class PublishODBCError(SuperdeskPublishError):
_codes = {
12000: "ODBC publish error"
}
@classmethod
def odbcError(cls, exception=None, destination=None):
return PublishODBCError(12000, exception, destination)
class PublishFileError(SuperdeskPublishError):
_codes = {
13000: "File publish error"
}
@classmethod
def fileSaveError(cls, exception=None, destinations=None):
return PublishFileError(13000, exception, destinations)
class PublishHTTPPushError(SuperdeskPublishError):
_codes = {
14000: "HTTP push publish error",
}
@classmethod
def httpPushError(cls, exception=None, destination=None):
return PublishHTTPPushError(14000, exception, destination)
| agpl-3.0 | 6,312,404,106,729,446,000 | 32.489982 | 120 | 0.665724 | false |
anntzer/scikit-learn | sklearn/linear_model/_least_angle.py | 3 | 71554 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from joblib import Parallel
from ._base import LinearModel
from ..base import RegressorMixin, MultiOutputMixin
# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
from ..utils import arrayfuncs, as_float_array # type: ignore
from ..utils import check_random_state
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
SOLVE_TRIANGULAR_ARGS = {'check_finite': False}
@_deprecate_positional_args
def lars_path(
X,
y,
Xy=None,
*,
Gram=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Input data. Note that if X is None then the Gram matrix must be
specified, i.e., cannot be None or False.
y : None or array-like of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lars_path_gram
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if X is None and Gram is not None:
raise ValueError(
'X cannot be None if Gram is not None'
'Use lars_path_gram to avoid passing X and y.'
)
return _lars_path_solver(
X=X, y=y, Xy=Xy, Gram=Gram, n_samples=None, max_iter=max_iter,
alpha_min=alpha_min, method=method, copy_X=copy_X,
eps=eps, copy_Gram=copy_Gram, verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
@_deprecate_positional_args
def lars_path_gram(
Xy,
Gram,
*,
n_samples,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False
):
"""lars_path in the sufficient stats mode [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
Xy : array-like of shape (n_samples,) or (n_samples, n_targets)
Xy = np.dot(X.T, y).
Gram : array-like of shape (n_features, n_features)
Gram = np.dot(X.T * X).
n_samples : int or float
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lars_path
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
return _lars_path_solver(
X=None, y=None, Xy=Xy, Gram=Gram, n_samples=n_samples,
max_iter=max_iter, alpha_min=alpha_min, method=method,
copy_X=copy_X, eps=eps, copy_Gram=copy_Gram,
verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
def _lars_path_solver(
X,
y,
Xy=None,
Gram=None,
n_samples=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
`Xy = np.dot(X.T, y)` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample. If `None`, it will be `n_samples`.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == "lar" and positive:
raise ValueError(
"Positive constraint not supported for 'lar' " "coding method."
)
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError('X and Gram cannot both be unspecified.')
elif isinstance(Gram, str) and Gram == 'auto' or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError('The shapes of the inputs Gram and Xy'
' do not match.')
if copy_X and X is not None and Gram is None:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (Cov,))
solve_cholesky, = get_lapack_funcs(('potrs',), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**SOLVE_TRIANGULAR_ARGS)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e.'
' Reduce max_iter or increase eps parameters.'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, _ = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, _ = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
for ii in idx:
arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
n_active -= 1
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
n_nonzero_coefs : int, default=500
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of shape (n_alphas,) or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.Lars(n_nonzero_coefs=1)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
Lars(n_nonzero_coefs=1)
>>> print(reg.coef_)
[ 0. -1.11...]
See Also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
method = "lar"
positive = False
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(float).eps, copy_X=True, fit_path=True,
jitter=None, random_state=None):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
@staticmethod
def _get_gram(precompute, X, y):
if (not hasattr(precompute, '__array__')) and (
(precompute is True) or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
precompute = np.dot(X.T, X)
return precompute
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
"""Auxiliary method to fit the model using X, y as training data"""
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
Gram = self._get_gram(self.precompute, X, y)
self.alphas_ = []
self.n_iter_ = []
self.coef_ = np.empty((n_targets, n_features))
if fit_path:
self.active_ = []
self.coef_path_ = []
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_[k] = coef_path[:, -1]
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
if self.jitter is not None:
rng = check_random_state(self.random_state)
noise = rng.uniform(high=self.jitter, size=len(y))
y = y + noise
self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path,
Xy=Xy)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
LassoLars(alpha=0.01)
>>> print(reg.coef_)
[ 0. -0.963257...]
See Also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
LassoLarsIC
sklearn.decomposition.sparse_encode
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(float).eps, copy_X=True, fit_path=True,
positive=False, jitter=None, random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of lists, the outer list length is `n_targets`.
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
>>> reg = LarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9996...
>>> reg.alpha_
0.0254...
>>> reg.predict(X[:1,])
array([154.0842...])
See Also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = "lar"
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(float).eps,
copy_X=True):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super().__init__(fit_intercept=fit_intercept,
verbose=verbose, normalize=normalize,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps, copy_X=copy_X, fit_path=True)
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, '__array__'):
warnings.warn('Parameter "precompute" cannot be an array in '
'%s. Automatically switch to "auto" instead.'
% self.__class__.__name__)
Gram = 'auto'
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, _, _, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha,
Xy=None, fit_path=True)
return self
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or 'auto' , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
active_ : list of int
Indices of active variables at the end of the path.
Examples
--------
>>> from sklearn.linear_model import LassoLarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4.0, random_state=0)
>>> reg = LassoLarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9992...
>>> reg.alpha_
0.0484...
>>> reg.predict(X[:1,])
array([-77.8723...])
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See Also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.max_iter = max_iter
self.normalize = normalize
self.precompute = precompute
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
self.copy_X = copy_X
self.positive = positive
# XXX : we don't use super().__init__
# to avoid setting n_nonzero_coefs
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : {'bic' , 'aic'}, default='aic'
The type of criterion to use.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If a list, it will be of length `n_targets`.
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array-like of shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criterion is
chosen. This value is larger by a factor of ``n_samples`` compared to
Eqns. 2.15 and 2.16 in (Zou et al, 2007).
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLarsIC(criterion='bic')
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
LassoLarsIC(criterion='bic')
>>> print(reg.coef_)
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See Also
--------
lars_path, LassoLars, LassoLarsCV
"""
@_deprecate_positional_args
def __init__(self, criterion='aic', *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
self.fit_path = True
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
training data.
y : array-like of shape (n_samples,)
target values. Will be cast to X's dtype if necessary
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = self._validate_data(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, copy_X)
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=self.max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
sigma2 = np.var(y)
df = np.zeros(coef_path_.shape[1], dtype=int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
eps64 = np.finfo('float64').eps
self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) +
K * df) # Eqns. 2.15--16 in (Zou et al, 2007)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause | 3,500,082,720,243,523,600 | 36.502096 | 79 | 0.588451 | false |
floooh/fips-hello-dep2 | fips-generators/hello_generator.py | 1 | 1203 | """fips imported code generator for testing"""
Version = 2
import genutil as util
#-------------------------------------------------------------------------------
def generateHeader(func_name, msg, hdrPath) :
with open(hdrPath, 'w') as f :
f.write("// #version:{}#\n".format(Version))
f.write("extern void {}(void);".format(func_name))
#-------------------------------------------------------------------------------
def generateSource(func_name, msg, srcPath) :
with open(srcPath, 'w') as f :
f.write("// #version:{}#\n".format(Version))
f.write("#include <stdio.h>\n")
f.write("void {}() {{\n".format(func_name))
f.write(' printf("{}\\n");\n'.format(msg))
f.write('}')
#-------------------------------------------------------------------------------
def generate(input, out_src, out_hdr, func_name, msg) :
if util.isDirty(Version, [input], [out_src, out_hdr]) :
print('## generating {}'.format(out_hdr))
generateHeader(func_name, msg, out_hdr)
print('## generating {}'.format(out_src))
generateSource(func_name, msg, out_src)
else :
print('## nothing to do for {}'.format(input))
| mit | -6,225,001,957,906,463,000 | 39.1 | 80 | 0.46384 | false |
mlperf/inference_results_v0.5 | closed/Google/code/ssd-large/tpu-ssd-large/home/kbuilder/mlperf-inference/google3/third_party/mlperf/inference/ssd/offline/accuracy_coco.py | 1 | 3987 | """Tool to calculate accuracy for loadgen accuracy output found in mlperf_log_accuracy.json We assume that loadgen's query index is in the same order as the images in coco's annotations/instances_val2017.json."""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
from absl import flags
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import tensorflow as tf
FLAGS = flags.FLAGS
# pylint: disable=missing-docstring
flags.DEFINE_string(
"mlperf_accuracy_file",
default=None,
help="path to mlperf_log_accuracy.json")
flags.DEFINE_string("coco_dir", default=None, help="coco directory")
flags.DEFINE_bool("verbose", default=True, help="verbose messages")
flags.DEFINE_string(
"output_file", default="coco-results.json", help="path to output file")
flags.DEFINE_bool("use_inv_map", default=True, help="use inverse label map")
def main():
cocoGt = COCO(os.path.join(FLAGS.coco_dir, "instances_val2017.json"))
if FLAGS.use_inv_map:
inv_map = [0] + cocoGt.getCatIds() # First label in inv_map is not used
with tf.gfile.Open(FLAGS.mlperf_accuracy_file, "r") as f:
results = json.load(f)
detections = []
image_ids = set()
seen = set()
no_results = 0
image_map = [img for img in cocoGt.dataset["images"]]
for j in results:
idx = j["qsl_idx"]
# de-dupe in case loadgen sends the same image multiple times
if idx in seen:
continue
seen.add(idx)
# reconstruct from mlperf accuracy log
# what is written by the benchmark is an array of float32's:
# id, box[0], box[1], box[2], box[3], score, detection_class
# note that id is a index into instances_val2017.json, not the
# actual image_id
data = np.frombuffer(bytes.fromhex(j["data"]), np.float32)
if len(data) < 7:
# handle images that had no results
image = image_map[idx]
# by adding the id to image_ids we make pycoco aware of
# the no-result image
image_ids.add(image["id"])
no_results += 1
if FLAGS.verbose:
print("no results: {}, idx={}".format(image["coco_url"], idx))
continue
for i in range(0, len(data), 7):
image_idx, ymin, xmin, ymax, xmax, score, label = data[i:i + 7]
image_idx = int(image_idx)
image = image_map[idx]
if image_idx != idx:
print("ERROR: loadgen({}) and payload({}) disagree on image_idx".format(
idx, image_idx))
image_id = image["id"]
height, width = image["height"], image["width"]
ymin *= height
xmin *= width
ymax *= height
xmax *= width
loc = os.path.join(FLAGS.coco_dir, "val2017", image["file_name"])
label = int(label)
if FLAGS.use_inv_map:
label = inv_map[label]
# pycoco wants {imageID,x1,y1,w,h,score,class}
detections.append({
"image_id": image_id,
"image_loc": loc,
"category_id": label,
"bbox": [
float(xmin),
float(ymin),
float(xmax - xmin),
float(ymax - ymin)
],
"score": float(score)
})
image_ids.add(image_id)
with tf.gfile.Open(FLAGS.output_file, "w") as fp:
json.dump(detections, fp, sort_keys=True, indent=4)
cocoDt = cocoGt.loadRes(
FLAGS.output_file) # Load from file to bypass error with Python3
cocoEval = COCOeval(cocoGt, cocoDt, iouType="bbox")
cocoEval.params.imgIds = list(image_ids)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("mAP={:.3f}%".format(100. * cocoEval.stats[0]))
if FLAGS.verbose:
print("found {} results".format(len(results)))
print("found {} images".format(len(image_ids)))
print("found {} images with no results".format(no_results))
print("ignored {} dupes".format(len(results) - len(seen)))
return cocoEval.stats[0]
if __name__ == "__main__":
main()
| apache-2.0 | -307,164,166,966,539,650 | 30.393701 | 212 | 0.635315 | false |
Chedi/django-iban-field | django_iban/specification.py | 1 | 7562 | from collections import namedtuple
from django_iban.utils import clean_iban
IBANPartSpecification = namedtuple('IBANPartSpecification', ["length", "data_type"])
class IBANSpecification(object):
MASK_DATATYPE_MAP = {
'a':'a',
'n':'9',
'c':'w',
}
REGEX_DATATYPE_MAP = {
'a': '[A-Z]' ,
'n': '[0-9]' ,
'c': '[A-Z0-9]',
}
def __init__(self, country_name, country_code, bank_format, account_format):
self.country_name = country_name
self.country_code = country_code
self.bank_specification = self.decode_format(bank_format )
self.account_specification = self.decode_format(account_format)
@property
def bank_field_length(self):
return sum((_.length for _ in self.bank_specification))
@property
def account_field_length(self):
return sum((_.length for _ in self.account_specification))
def field_mask(self, specification):
return " ".join([
self.MASK_DATATYPE_MAP[part.data_type] * part.length
for part in specification if part.length > 0])
def validation_regex(self, specification):
return "".join([
"%s{%s}" % (self.REGEX_DATATYPE_MAP[part.data_type], part.length)
for part in specification if part.length > 0])
@property
def bank_regex(self):
return self.validation_regex(self.bank_specification)
@property
def account_regex(self):
return self.validation_regex(self.account_specification)
@property
def iban_regex(self):
return "[A-Z]{2}[0-9]{2}" + self.bank_regex + self.account_regex
@property
def bank_field_mask(self):
return self.field_mask(self.bank_specification)
@property
def account_field_mask(self):
return self.field_mask(self.account_specification)
@property
def total_length(self):
return 4 + self.bank_field_length + self.account_field_length
def decode_format(self, data_format):
return [
IBANPartSpecification(
length = int(part[:-1]) if part[-1] in ("n", "a") else int(part),
data_type = part[-1] if part[-1] in ("n", "a") else "c")
for part in filter(bool, data_format.split())]
@staticmethod
def checksum(value):
value = clean_iban(value)
value = value[4:] + value[:2] + '00'
value_digits = ''
for x in value:
if '0' <= x <= '9':
value_digits += x
elif 'A' <= x <= 'Z':
value_digits += str(ord(x) - 55)
else:
raise Exception('{} is not a valid character for IBAN.'.format(x))
return '%02d' % (98 - int(value_digits) % 97)
IBAN_SPECIFICATION_CONFIG = {
"AD": IBANSpecification("Andorra" , "AD", "0 4n 4n", "0 12 0 "),
"AL": IBANSpecification("Albania" , "AL", "0 8n 0 ", "0 16 0 "),
"AT": IBANSpecification("Austria" , "AT", "0 5n 0 ", "0 11n 0 "),
"BA": IBANSpecification("Bosnia and Herzegovina", "BA", "0 3n 3n", "0 8n 2n"),
"BE": IBANSpecification("Belgium" , "BE", "0 3n 0 ", "0 7n 2n"),
"BG": IBANSpecification("Bulgaria" , "BG", "0 4a 4n", "2n 8 0 "),
"CH": IBANSpecification("Switzerland" , "CH", "0 5n 0 ", "0 12 0 "),
"CY": IBANSpecification("Cyprus" , "CY", "0 3n 5n", "0 16 0 "),
"CZ": IBANSpecification("Czech Republic" , "CZ", "0 4n 0 ", "0 16n 0 "),
"DE": IBANSpecification("Germany" , "DE", "0 8n 0 ", "0 10n 0 "),
"DK": IBANSpecification("Denmark" , "DK", "0 4n 0 ", "0 9n 1n"),
"EE": IBANSpecification("Estonia" , "EE", "0 2n 0 ", "2n 11n 1n"),
"ES": IBANSpecification("Spain" , "ES", "0 4n 4n", "2n 10n 0 "),
"FI": IBANSpecification("Finland" , "FI", "0 6n 0 ", "0 7n 1n"),
"FO": IBANSpecification("Faroe Islands" , "FO", "0 4n 0 ", "0 9n 1n"),
"FR": IBANSpecification("France" , "FR", "0 5n 5n", "0 11 2n"),
"GB": IBANSpecification("United Kingdom" , "GB", "0 4a 6n", "0 8n 0 "),
"GE": IBANSpecification("Georgia" , "GE", "0 2a 0 ", "0 16n 0 "),
"GI": IBANSpecification("Gibraltar" , "GI", "0 4a 0 ", "0 15 0 "),
"GL": IBANSpecification("Greenland" , "GL", "0 4n 0 ", "0 9n 1n"),
"GR": IBANSpecification("Greece" , "GR", "0 3n 4n", "0 16 0 "),
"HR": IBANSpecification("Croatia" , "HR", "0 7n 0 ", "0 10n 0 "),
"HU": IBANSpecification("Hungary" , "HU", "0 3n 4n", "1n 15n 1n"),
"IE": IBANSpecification("Ireland" , "IE", "0 4a 6n", "0 8n 0 "),
"IL": IBANSpecification("Israel" , "IL", "0 3n 3n", "0 13n 0 "),
"IS": IBANSpecification("Iceland" , "IS", "0 4n 0 ", "2n 16n 0 "),
"IT": IBANSpecification("Italy" , "IT", "1a 5n 5n", "0 12 0 "),
"KW": IBANSpecification("Kuwait" , "KW", "0 4a 0 ", "0 22 0 "),
"KZ": IBANSpecification("Kazakhstan" , "KZ", "0 3n 0 ", "0 13 0 "),
"LB": IBANSpecification("Lebanon" , "LB", "0 4n 0 ", "0 20 0 "),
"LI": IBANSpecification("Liechtenstein" , "LI", "0 5n 0 ", "0 12 0 "),
"LT": IBANSpecification("Lithuania" , "LT", "0 5n 0 ", "0 11n 0 "),
"LU": IBANSpecification("Luxembourg" , "LU", "0 3n 0 ", "0 13 0 "),
"LV": IBANSpecification("Latvia" , "LV", "0 4a 0 ", "0 13 0 "),
"MC": IBANSpecification("Monaco" , "MC", "0 5n 5n", "0 11 2n"),
"ME": IBANSpecification("Montenegro" , "ME", "0 3n 0 ", "0 13n 2n"),
"MK": IBANSpecification("Macedonia" , "MK", "0 3n 0 ", "0 10 2n"),
"MR": IBANSpecification("Mauritania" , "MR", "0 5n 5n", "0 11n 2n"),
"MT": IBANSpecification("Malta" , "MT", "0 4a 5n", "0 18 0 "),
"MU": IBANSpecification("Mauritius" , "MU", "0 4a 4n", "0 15n 3a"),
"NL": IBANSpecification("Netherlands" , "NL", "0 4a 0 ", "0 10n 0 "),
"NO": IBANSpecification("Norway" , "NO", "0 4n 0 ", "0 6n 1n"),
"PL": IBANSpecification("Poland" , "PL", "0 8n 0 ", "0 16n 0 "),
"PT": IBANSpecification("Portugal" , "PT", "0 4n 4n", "0 11n 2n"),
"RO": IBANSpecification("Romania" , "RO", "0 4a 0 ", "0 16 0 "),
"RS": IBANSpecification("Serbia" , "RS", "0 3n 0 ", "0 13n 2n"),
"SA": IBANSpecification("Saudi Arabia" , "SA", "0 2n 0 ", "0 18 0 "),
"SE": IBANSpecification("Sweden" , "SE", "0 3n 0 ", "0 16n 1n"),
"SI": IBANSpecification("Slovenia" , "SI", "0 5n 0 ", "0 8n 2n"),
"SK": IBANSpecification("Slovak Republic" , "SK", "0 4n 0 ", "0 16n 0 "),
"SM": IBANSpecification("San Marino" , "SM", "1a 5n 5n", "0 12 0 "),
"TN": IBANSpecification("Tunisia" , "TN", "0 2n 3n", "0 13n 2n"),
"TR": IBANSpecification("Turkey" , "TR", "0 5n 0 ", "1 16 0 ")}
IBAN_GROUPING = 4
IBAN_MAX_LENGTH = 34
IBAN_MIN_LENGTH = min([_.total_length for _ in IBAN_SPECIFICATION_CONFIG.values()])
| gpl-3.0 | 7,448,661,612,872,638,000 | 49.751678 | 86 | 0.495239 | false |
jeffersonfparil/GTWAS_POOL_RADseq_SIM | BACKUP_SCRIPTS_20170930/assignPheno.py | 1 | 4355 | #!/usr/bin/env python
import os, subprocess, sys, math
import numpy as np
import matplotlib.pyplot as plt
work_DIR = sys.argv[1]
genoFile = sys.argv[2]
nQTL = int(sys.argv[3])
heritability = float(sys.argv[4])
model = int(sys.argv[5])
os.chdir(work_DIR)
if model == 1:
#################################################
# MODEL 1: additive effects alone
# y = Xb + e; e~N(0, Ve); Ve=Vg(1/1-h^2); Vg=sum(cor(Xq)bqbq')/4
#################################################
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
#partionning the variance taking into account the linkage among associated SNPs
AssoX=X[:,QTL_locations]
Rho=np.corrcoef(AssoX,rowvar=0)
XtX=GFX.reshape(1,nFX)*GFX.reshape(nFX,1) #GFX * GFX' (nFXxnFX dimensions)
Vg=np.sum(Rho*XtX)/4
Ve=Vg*(1/h2-1)
#Generating the phenotypes based on the variance components
Xb=np.matmul(AssoX, GFX) #alpha
e=np.random.normal(0,Ve**(0.5),nIND)
Y_model1=Xb+e
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t")
elif model == 2:
################################################# &#### FIX ME!!!! Y_model2 is giving me the middle finger! ;-P
# MODEL 2: additive genetic effects + transcript levels
# y = Xg + Ba + Zt + e; e~N(0,Ve); Ve = (Vg+Vt)(1/1-h^2)
#################################################
transBase = sys.argv[6]
transGeno = sys.argv[7]
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
T_base = np.genfromtxt(transBase, delimiter='\t', dtype=int)
T_geno = np.genfromtxt(transGeno, delimiter='\t', dtype=int)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
nTRANS = len(T_base)
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
mean_bT = (mean_QTL /4); var_bT = 1 #normal base trancript level effects
mean_gT = (mean_QTL /2); var_gT = 1 #normal genotype-specific trancript level effecs
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
#Transcript Base-levels effects:
nCausalTrans = int(np.ceil(np.random.normal(nQTL, 1, size=1))[0]) #number of transcripts that affect the phenotype
locCausalTrans = np.random.choice(nTRANS, size=nCausalTrans, replace=False)
# (i.)base-level effects:
T_base_effects = np.random.normal(mean_bT, np.sqrt(var_bT), size=nCausalTrans)
# (ii.)genotype-specific level effects:
T_geno_effects = np.random.normal(mean_gT, np.sqrt(var_gT), size=nCausalTrans)
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
T0 = T_base
T1 = T_geno
t0FX = np.zeros((nTRANS,1)); t0FX[locCausalTrans,0] = T_base_effects
t1FX = np.zeros((nTRANS,1)); t1FX[locCausalTrans,0] = T_geno_effects
#variance partitioning adding Vg with Vt--> variation due to genotype-specific transcripts abundance
AssoZ=T1[:,locCausalTrans]
Rho=np.corrcoef(AssoZ,rowvar=0)
ZtZ=T_geno_effects.reshape(1,nCausalTrans)*T_geno_effects.reshape(nCausalTrans,1)
Vt=np.sum(Rho*ZtZ)/4
Vet=(Vg+Vt)*(1/h2-1)
#generating the phenotypes using the new residual distribution et:Vet
Xg = np.matmul(X[:,QTL_locations], GFX)
Ba = np.sum(T0[locCausalTrans]*T_base_effects)
Zt = np.matmul(T1, t1FX)
et=np.random.normal(0,Vet**(0.5),nIND)
Y_model2 = Xg + Ba + Zt[:,0] + et
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t") | gpl-3.0 | 717,945,124,301,054,200 | 40.09434 | 132 | 0.660161 | false |
iamhssingh/django-userprofile | drf_user/__init__.py | 1 | 2784 | """Django REST Framework - User! User App for Django with API Views"""
__title__ = "User - Django REST Framework"
__version__ = "0.0.8"
__author__ = "101Loop"
__license__ = "GPLv3"
default_app_config = "drf_user.apps.DRFUserConfig"
user_settings = {
"DEFAULT_ACTIVE_STATE": False,
"OTP": {
"LENGTH": 5,
"ALLOWED_CHARS": "1234567890",
"VALIDATION_ATTEMPTS": 3,
"SUBJECT": "OTP for Verification",
"COOLING_PERIOD": 3,
},
"MOBILE_VALIDATION": True,
"EMAIL_VALIDATION": True,
"REGISTRATION": {
"SEND_MAIL": False,
"SEND_MESSAGE": False,
"MAIL_SUBJECT": "Welcome to DRF-USER",
"SMS_BODY": "Your account has been created",
"TEXT_MAIL_BODY": "Your account has been created.",
"HTML_MAIL_BODY": "Your account has been created.",
},
}
def update_user_settings() -> dict:
"""
Updates user setting from django default setting
TODO: Think of a better way, using Signal preferably.
Returns
-------
user_settings: dict
Author: Himanshu Shankar (https://himanshus.com)
"""
from django.conf import settings
custom_settings = getattr(settings, "USER_SETTINGS", None)
if custom_settings:
if isinstance(custom_settings, dict):
for key, value in custom_settings.items():
if key not in ["OTP", "REGISTRATION"]:
user_settings[key] = value
elif key == "OTP":
if isinstance(value, dict):
for otp_key, otp_value in value.items():
user_settings["OTP"][otp_key] = otp_value
else:
raise TypeError("USER_SETTING attribute OTP must be a" " dict.")
elif key == "REGISTRATION":
if isinstance(value, dict):
for reg_key, reg_value in value.items():
user_settings["REGISTRATION"][reg_key] = reg_value
else:
raise TypeError(
"USER_SETTING attribute REGISTRATION" " must be a dict."
)
else:
raise TypeError("USER_SETTING must be a dict.")
if user_settings["REGISTRATION"]["SEND_MAIL"]:
if not getattr(settings, "EMAIL_HOST", None):
raise ValueError(
"EMAIL_HOST must be defined in django setting" " for sending mail."
)
if not getattr(settings, "EMAIL_FROM", None):
raise ValueError(
"EMAIL_FROM must be defined in django setting"
" for sending mail. Who is sending email?"
)
return user_settings
| gpl-3.0 | -6,464,121,690,914,684,000 | 33.37037 | 88 | 0.529454 | false |
yusufm/mobly | tests/lib/mock_controller.py | 1 | 1335 | #!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a mock third-party controller module used for unit testing Mobly.
import logging
MOBLY_CONTROLLER_CONFIG_NAME = "MagicDevice"
def create(configs):
objs = []
for c in configs:
if isinstance(c, dict):
c.pop("serial")
objs.append(MagicDevice(c))
return objs
def destroy(objs):
print("Destroying magic")
def get_info(objs):
infos = []
for obj in objs:
infos.append(obj.who_am_i())
return infos
class MagicDevice(object):
def __init__(self, config):
self.magic = config
def get_magic(self):
logging.info("My magic is %s.", self.magic)
return self.magic
def who_am_i(self):
return {"MyMagic": self.magic}
| apache-2.0 | 478,092,451,428,565,300 | 24.188679 | 75 | 0.674906 | false |
EIREXE/Olddulous | alembic/versions/6ffd5dd5efab_switch_ksp_version_to_game_version.py | 1 | 1201 | """switch ksp_version to game_version
Revision ID: 6ffd5dd5efab
Revises: 4e0500347ce7
Create Date: 2016-04-08 22:58:14.178434
"""
# revision identifiers, used by Alembic.
revision = '6ffd5dd5efab'
down_revision = '4e0500347ce7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('modversion', sa.Column('gameversion_id', sa.Integer(), nullable=True))
op.create_foreign_key('modversion_gameversion_id_fkey', 'modversion', 'gameversion', ['gameversion_id'], ['id'])
op.execute('update modversion set gameversion_id=(select gameversion.id from gameversion where modversion.ksp_version = gameversion.friendly_version and gameversion.game_id = (SELECT mod.game_id from mod where mod.id=modversion.mod_id));')
op.drop_column('modversion', 'ksp_version')
def downgrade():
op.add_column('modversion', sa.Column('ksp_version', sa.String(64), nullable=True))
op.execute('update modversion set ksp_version=(select gameversion.friendly_version from gameversion where modversion.gameversion_id = gameversion.id);')
op.drop_constraint('modversion_gameversion_id_fkey', 'modversion', type_='foreignkey')
op.drop_column('modversion', 'gameversion_id')
| mit | 2,003,685,324,859,270,000 | 41.892857 | 243 | 0.745212 | false |
batmancn/MyLife | works/WebOVS/backend/app.py | 1 | 13251 | #!/usr/bin/env python
import os
import pam
import web
import ovsdb
import ofctrl
import simplejson as json
urls = (
'/', 'Index',
'/login', 'Login',
'/logout', 'Logout',
'/availableports', 'SwitchPorts',
'/swconf', 'SwitchConf',
'/switchinfo', 'SwitchInfo',
'/helptext', 'HelpText',
# All Bridges
'/bridges', 'Bridges',
'/bridges/add', 'Bridges',
# A single Bridge
'/bridges/([\w:.-]+)', 'Bridge',
'/bridges/([\w:.-]+)/(update|del)', 'Bridge',
# Controllers
'/bridges/([\w:.-]+)/controllers', 'Controller',
'/bridges/([\w:.-]+)/controllers/(update|del|add)', 'Controller',
# Normal Ports
'/bridges/([\w:.-]+)/ports', 'Ports',
'/bridges/([\w:.-]+)/ports/(\d+)/(update|del|add)', 'Ports',
# Tunnels
'/bridges/([\w:.-]+)/tunnels', 'Tunnel',
'/bridges/([\w:.-]+)/tunnels/([\w:.-]+)/(update|del|add)', 'Tunnel',
# Bonds
'/bridges/([\w:.-]+)/bonds', 'Bond',
'/bridges/([\w:.-]+)/bonds/([\w:.-]+)/(update|del|add)', 'Bond',
# Mirrors
'/bridges/([\w:.-]+)/mirrors', 'Mirror',
'/bridges/([\w:.-]+)/mirrors/([\w:.-]+)/(update|del|add)', 'Mirror',
# NetFlow
'/bridges/([\w:.-]+)/netflow', 'NetFlow',
'/bridges/([\w:.-]+)/netflow/(update|del|add)', 'NetFlow',
# sFlow
'/bridges/([\w:.-]+)/sflow', 'sFlow',
'/bridges/([\w:.-]+)/sflow/(update|del|add)', 'sFlow',
# Queue
'/bridges/([\w:.-]+)/queues', 'Queues',
'/bridges/([\w:.-]+)/queues/add', 'Queues',
'/bridges/([\w:.-]+)/queues/(\w{8})/(update|del)', 'Queue',
# Qos
'/bridges/([\w:.-]+)/qos', 'QoSes',
'/bridges/([\w:.-]+)/qos/add', 'QoSes',
'/bridges/([\w:.-]+)/qos/(\w{8})/(update|del)', 'QoS',
# Group Table
'/bridges/([\w:.-]+)/groups', 'Group',
'/bridges/([\w:.-]+)/groups/(\d+)/(update|del|add)', 'Group',
# Flows
'/bridges/([\w:.-]+)/tables', 'Tables',
'/bridges/([\w:.-]+)/tables/(\d+)/flows', 'Flows',
'/bridges/([\w:.-]+)/tables/(\d+)/flows/(update|add|del|clear)', 'Flows',
'/bridges/([\w:.-]+)/flows', 'FlowHandler',
# Meter Table
'/bridges/([\w:.-]+)/meters', 'Meter',
'/bridges/([\w:.-]+)/meters/(\d+)/(update|del|add)', 'Meter',
# Bundle control message
'/bridges/([\w:.-]+)/ofbundle', 'Bundle',
'/(.*)', 'Default'
)
app = web.application(urls, globals())
if web.config.get('_session') is None:
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={'loggedin': False})
web.config._session = session
else:
session = web.config._session
render = web.template.render(os.path.join(os.path.dirname(os.getcwd()), 'frontend/'))
class Index(object):
def GET(self):
# redirect to layout template
if session.loggedin:
return render.index()
else:
return render.login()
class Login(object):
def GET(self):
getInput = web.input(username="", password="")
if pam.authenticate(getInput.username, getInput.password):
session.loggedin = True
return 'success'
else:
return 'fail'
class Logout(object):
def GET(self):
session.loggedin = False
raise web.seeother("/")
class SwitchPorts(object):
def GET(self):
return ovsdb.get_available_ports()
class SwitchInfo(object):
def GET(self):
return ovsdb.get_switch_resource()
class SwitchConf(object):
def GET(self):
"""
GET /swconf
"""
return ovsdb.get_switch_conf()
def POST(self):
"""
POST /swconf
"""
data = web.data()
return ovsdb.update_switch_conf(data)
class HelpText(object):
def GET(self):
return ovsdb.get_help_text()
class Bridges(object):
def GET(self):
"""
GET /bridges
"""
return ovsdb.fast_get_bridges()
def POST(self):
"""
POST /bridges/add?name=br0
"""
getInput = web.input()
# TODO, elaborate add_bridge
return ovsdb.add_bridge(str(getInput.name))
class Bridge(object):
def GET(self, name):
"""
GET /bridges/br0
"""
return ovsdb.get_bridge(name)
def POST(self, name, op):
"""
POST /bridges/br0/update
POST /bridges/br0/del
"""
data = web.data()
if op == "update":
# TODO, elaborate update_bridge
return ovsdb.update_bridge(name, data)
elif op == "del":
# TODO, elaborate del_bridge
return ovsdb.del_bridge(name)
class Controller(object):
def GET(self, name):
"""
GET /bridges/br0/controllers
"""
return ovsdb.get_controllers(name)
def POST(self, name, op):
"""
POST /bridges/br0/controllers/update
POST /bridges/br0/controllers/add
POST /bridges/br0/controllers/del
"""
data = web.data()
if op == "update":
return ovsdb.update_controller(name, data)
elif op == "del":
return ovsdb.del_controller(name, data)
elif op == "add":
return ovsdb.add_controller(name, data)
class Ports(object):
def GET(self, brname):
"""
GET /bridges/br0/ports
"""
return ovsdb.get_ports(brname)
def POST(self, brname, portname, op):
"""
POST /bridges/br0/ports/eth0/update
POST /bridges/br0/ports/eth0/add
POST /bridges/br0/ports/eth0/del
"""
data = web.data()
if op == "update":
return ovsdb.update_port(brname, data)
elif op == "del":
return ovsdb.del_port(brname, data)
elif op == "add":
return ovsdb.add_port(brname, data)
class Tunnel(object):
def GET(self, brname):
"""
GET /bridges/br0/tunnels
"""
return ovsdb.get_tunnels(brname)
def POST(self, brname, tulname, op):
"""
POST /bridges/br0/tunnels/gre0/update
POST /bridges/br0/tunnels/gre0/add
POST /bridges/br0/tunnels/gre0/del
"""
data = web.data()
if op == "update":
return ovsdb.update_tunnel(brname, data)
elif op == "del":
return ovsdb.del_tunnel(brname, data)
elif op == "add":
return ovsdb.add_tunnel(brname, data)
class Bond(object):
def GET(self, brname):
"""
GET /bridges/br0/bonds
"""
return ovsdb.get_bonds(brname)
def POST(self, brname, bondname, op):
"""
POST /bridges/br0/bonds/lag0/update
POST /bridges/br0/bonds/lag0/add
POST /bridges/br0/bonds/lag0/del
"""
data = web.data()
if op == "update":
return ovsdb.update_bond(brname, data)
elif op == "del":
return ovsdb.del_bond(brname, data)
elif op == "add":
return ovsdb.add_bond(brname, data)
class Mirror(object):
def GET(self, brname):
"""
GET /bridges/br0/mirrors
"""
return ovsdb.get_mirrors(brname)
def POST(self, brname, mirrorname, op):
"""
POST /bridges/br0/mirrors/M1/update
POST /bridges/br0/mirrors/M1/add
POST /bridges/br0/mirrors/M1/del
"""
data = web.data()
if op == "update":
return ovsdb.update_mirror(brname, data)
elif op == "del":
return ovsdb.del_mirror(brname, data)
elif op == "add":
return ovsdb.add_mirror(brname, data)
class NetFlow(object):
def GET(self, brname):
"""
GET /bridges/br0/netflow
"""
return ovsdb.get_netflows(brname)
def POST(self, brname, op):
"""
POST /bridges/br0/netflow/update
POST /bridges/br0/netflow/add
POST /bridges/br0/netflow/del
"""
data = web.data()
if op == "update":
return ovsdb.update_netflow(brname, data)
elif op == "del":
return ovsdb.del_netflow(brname, data)
elif op == "add":
return ovsdb.add_netflow(brname, data)
class sFlow(object):
def GET(self, brname):
"""
GET /bridges/br0/sflow
"""
return ovsdb.get_sflow(brname)
def POST(self, brname, op):
"""
POST /bridges/br0/sflow/update
POST /bridges/br0/sflow/add
POST /bridges/br0/sflow/del
"""
data = web.data()
if op == "update":
return ovsdb.update_sflow(brname, data)
elif op == "del":
return ovsdb.del_sflow(brname, data)
elif op == "add":
return ovsdb.add_sflow(brname, data)
class Queues(object):
def GET(self, brname):
"""
GET /bridges/br0/queues
"""
return ovsdb.get_queues()
def POST(self, brname):
"""
POST /bridges/br0/queues/add
"""
data = web.data()
return ovsdb.add_queue(data)
class Queue(object):
def GET(self):
pass
def POST(self, brname, uuid, op):
"""
POST /bridges/br0/queues/00000000/update
POST /bridges/br0/queues/00000000/del
"""
data = web.data()
if op == "update":
return ovsdb.update_queue(data)
elif op == "del":
return ovsdb.del_queue(data)
class QoSes(object):
def GET(self, brname):
"""
GET /bridges/br0/qos
"""
return ovsdb.get_all_qos()
def POST(self, brname):
"""
POST /bridges/br0/qos/add
"""
data = web.data()
return ovsdb.add_qos(data)
class QoS(object):
def GET(self):
pass
def POST(self, brname, uuid, op):
"""
POST /bridges/br0/qos/00000000/update
POST /bridges/br0/qos/00000000/del
"""
data = web.data()
if op == "update":
return ovsdb.update_qos(data)
elif op == "del":
return ovsdb.del_qos(data)
class Group(object):
def GET(self, brname):
return ovsdb.get_groups(brname)
def POST(self, name, gid, op):
data = web.data()
if op == "update":
return ovsdb.update_group(name, data)
elif op == "del":
return ovsdb.del_group(name, data)
elif op == "add":
return ovsdb.create_group(name, data)
def DELETE(self, brname):
return ovsdb.del_groups(brname)
class Meter(object):
def GET(self, brname):
return ovsdb.get_meters(brname)
def POST(self, name, gid, op):
data = web.data()
if op == "update":
return ovsdb.update_meter(name, data)
elif op == "del":
return ovsdb.del_meter(name, data)
elif op == "add":
return ovsdb.create_meter(name, data)
def DELETE(self, brname):
return ovsdb.del_meters(brname)
class Tables():
def GET(self, brname):
"""
GET /bridges/br0/tables
"""
wrapper = ofctrl.SimpleCtrl(brname)
return wrapper.get_tables()
class Flows():
def GET(self, brname, tid):
"""
GET /bridges/br0/tables/0/flows
"""
wrapper = ofctrl.SimpleCtrl(brname)
return wrapper.get_flows(int(tid))
def POST(self, brname, tid, op):
"""
POST /bridges/br0/tables/0/flows/update
POST /bridges/br0/tables/0/flows/add
POST /bridges/br0/tables/0/flows/del
POST /bridges/br0/tables/0/flows/clear
"""
data = web.data()
ofctl_wrapper = ofctrl.SimpleCtrl(brname)
if op == "update":
return ofctl_wrapper.mod_flow(data)
elif op == "del":
return ofctl_wrapper.del_flow(data)
elif op == "add":
return ofctl_wrapper.add_flow(data)
elif op == "clear":
return ofctl_wrapper.del_flows(tid)
class FlowHandler():
def GET(self, brname):
"""
GET /bridges/br0/flows
"""
wrapper = ofctrl.SimpleCtrl(brname)
allflows = []
resp = json.loads(wrapper.get_tables())
if resp["ret"] == 0:
for table in resp["tables"]:
for flow in table["flows"]:
allflows.append("{fields},actions={actions}".format(
fields=wrapper._flow_json_to_string(flow),
actions=flow["actions"]))
web.header('Content-Type', 'text/plain')
web.header('Content-disposition', 'attachment; filename=flows.txt')
return "\n".join(allflows)
def POST(self, brname):
"""
POST /bridges/br0/flows
"""
data = web.data()
wrapper = ofctrl.SimpleCtrl(brname)
return wrapper.add_flows(data)
class Bundle():
def POST(self, brname):
"""
POST /bridges/br0/ofbundle
"""
data = web.data()
ofctl_wrapper = ofctrl.SimpleCtrl(brname)
return ofctl_wrapper.bundle(data)
class Default():
def GET(self, whatever):
"""
Handle page-not found error
"""
return render.error()
if __name__ == "__main__":
app.run()
#if name:
#bridge = json.loads(ovsdb.get_bridge(str(name)))
#return self.render.bridge(bridge)
| gpl-3.0 | -1,458,616,825,737,984,500 | 26.434783 | 106 | 0.534148 | false |
hydraplatform/hydra-base | tests/test_attributes.py | 1 | 36075 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright 2013 to 2017 University of Manchester
#
# HydraPlatform is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HydraPlatform is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with HydraPlatform. If not, see <http://www.gnu.org/licenses/>
#
"""
NOTE:
All the methods that has SKELETON key in the doc, are not yet implemented partially/fully
"""
# Example of a SKELETON METHOD
# def test_(self, client):
# """
# SKELETON
# """
# pass
import logging
import copy
import json
import hydra_base as hb
from hydra_base.exceptions import HydraError
import datetime
import pytest
from hydra_base.lib.objects import JSONObject
log = logging.getLogger(__name__)
class TestAttribute:
"""
Test for attribute-based functionality
"""
"""
TESTED
"""
def test_add_attribute(self, client):
test_attr = JSONObject({
"name": f'Test Attribute {datetime.datetime.now()}',
"dimension_id": None
})
previous_all_attributes = client.get_attributes()
new_attr = client.add_attribute(test_attr)
assert new_attr.name == test_attr.name, \
"add_attribute didn't work"
all_attributes = client.get_attributes()
assert len(all_attributes) == len(previous_all_attributes) + 1
#try add it again. SHould have no effect
new_attr = client.add_attribute(test_attr)
all_attributes_insert_2 = client.get_attributes()
assert len(all_attributes) == len(all_attributes_insert_2)
def test_update_attribute(self, client):
test_attr = JSONObject({
"name": f'Test Attribute {datetime.datetime.now()}',
"dimension_id": None
})
new_attr = client.add_attribute(test_attr)
new_attr.name = f"Test attr updated {datetime.datetime.now()}"
updated_attr = client.update_attribute(new_attr)
assert new_attr.id == updated_attr.id and \
updated_attr.name == new_attr.name, \
"update_attribute didn't work"
#Try update this again (should have no effect)
updated_attr.description = "Updated description"
updated_attr_1 = client.update_attribute(updated_attr)
assert updated_attr_1.description == "Updated description"
#Now add a new attribute which should fail when updated
test_attr_fail = JSONObject({
"name": f'Test Attribute {datetime.datetime.now()}',
"dimension_id": None
})
new_attr_fail = client.add_attribute(test_attr_fail)
#set the name to be the same as the other attribute
new_attr_fail.name = new_attr.name
#this should fail because there's already an attribute with this naem
#and dimension (since we've just set it.)
with pytest.raises(HydraError):
client.update_attribute(new_attr_fail)
def test_delete_attribute(self, client):
test_attr = JSONObject({
"name": 'Test Attribute 1',
"dimension_id": None
})
new_attr = client.add_attribute(test_attr)
attr = client.get_attribute_by_id(new_attr.id)
result = client.delete_attribute(new_attr.id)
with pytest.raises(HydraError):
client.get_attribute_by_id(new_attr.id)
with pytest.raises(HydraError):
result = client.delete_attribute(new_attr.id)
def test_add_attributes(self, client):
test_attrs = [
JSONObject({
"name": 'Test Attribute 1',
"dimension_id": None
}),
JSONObject({
"name": 'Test Attribute 2',
"dimension_id": 1
})
]
new_attrs_list_1 = client.add_attributes(test_attrs)
all_attributes_after_add_1 = client.get_attributes()
#Try adding the attributes again. It should ignore them as theyr'e already there.
new_attrs_list_2 = client.add_attributes(test_attrs)
all_attributes_after_add_2 = client.get_attributes()
#This should have returned the attributes with the IDS from the first insert
assert sorted([a.id for a in new_attrs_list_1]) == sorted([a.id for a in new_attrs_list_2])
assert len(all_attributes_after_add_1) == len(all_attributes_after_add_2)
attributeset = set([(a.name, a.dimension) for a in all_attributes_after_add_1])
#Ensure that there are no duplicates by checking that the length of the set
#of name/dimension pairs is the same as the length of all attributes
assert len(attributeset) == len(all_attributes_after_add_2)
def test_get_attributes(self, client):
"""
def get_attributes(**kwargs):
"""
test_attr = JSONObject({
"name": 'Test Attribute 1',
"dimension_id": None
})
new_attr = client.add_attribute(test_attr)
attributes = client.get_attributes()
assert len(attributes) > 0, "get_attributes didn't work as expected!"
def test_get_template_attributes(self, client):
"""
SKELETON
def get_template_attributes(template_id, **kwargs):
"""
pass
def test_get_attribute_by_id(self, client, attribute):
existing_attr = attribute
retrieved_attr = client.get_attribute_by_id(existing_attr.id)
assert existing_attr.name == retrieved_attr.name
assert existing_attr.dimension_id == retrieved_attr.dimension_id
assert existing_attr.description == retrieved_attr.description
def test_get_all_attributes(self, client, attributes):
all_attributes = client.get_attributes()
attribute_names = []
for a in all_attributes:
attribute_names.append(a.name)
assert "Multi-added Attr 1" in attribute_names
assert "Multi-added Attr 2" in attribute_names
def test_get_attribute_by_name_and_dimension(self, client, attribute):
existing_attr = attribute
retrieved_attr = client.get_attribute_by_name_and_dimension(
existing_attr.name,
existing_attr.dimension_id)
assert existing_attr.id == retrieved_attr.id
assert existing_attr.description == retrieved_attr.description
def test_check_attr_dimension(self, client, new_dataset):
"""
def check_attr_dimension(attr_id, **kwargs):
"""
test_attr = JSONObject({
"name": 'Test Attribute 1',
"dimension_id": client.get_dimension_by_unit_id(new_dataset.unit_id).id
})
new_attr = client.add_attribute(test_attr)
result = client.check_attr_dimension(new_attr.id)
log.info(result)
assert result == 'OK', "check_attr_dimension didn't work as expected"
pass
class TestResourceAttribute:
def test_add_resource_attribute(self, client):
"""
SKELETON
def add_resource_attribute(resource_type, resource_id, attr_id, is_var, error_on_duplicate=True, **kwargs):
"""
pass
def test_update_resource_attribute(self, client):
"""
SKELETON
def update_resource_attribute(resource_attr_id, is_var, **kwargs):
"""
pass
def test_delete_resource_attribute(self, client):
"""
SKELETON
def delete_resource_attribute(resource_attr_id, **kwargs):
"""
pass
def test_add_resource_attrs_from_type(self, client):
"""
SKELETON
def add_resource_attrs_from_type(type_id, resource_type, resource_id,**kwargs):
"""
pass
def test_get_resource_attribute(self, client):
"""
SKELETON
def get_resource_attribute(resource_attr_id, **kwargs):
"""
pass
def test_get_all_resource_attributes(self, client):
"""
SKELETON
def get_all_resource_attributes(ref_key, network_id, template_id=None, **kwargs):
"""
pass
def test_get_resource_attributes(self, client):
"""
SKELETON
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs):
"""
pass
def test_get_all_network_attributes(self, client, network_with_data):
all_network_attributes = client.get_all_network_attributes(network_with_data.id)
manual_all_network_attributes = [a.attr_id for a in network_with_data.attributes]
for n in network_with_data.nodes:
for a in n.attributes:
if a.attr_id not in manual_all_network_attributes:
manual_all_network_attributes.append(a.attr_id)
for n in network_with_data.links:
for a in n.attributes:
if a.attr_id not in manual_all_network_attributes:
manual_all_network_attributes.append(a.attr_id)
for n in network_with_data.resourcegroups:
for a in n.attributes:
if a.attr_id not in manual_all_network_attributes:
manual_all_network_attributes.append(a.attr_id)
assert len(all_network_attributes) == len(manual_all_network_attributes)
def test_add_group_attribute(self, client, network_with_data, attribute):
group = network_with_data.resourcegroups[0]
client.add_resource_attribute('GROUP', group.id, attribute.id, 'Y')
group_attrs = client.get_resource_attributes('GROUP', group.id)
group_attr_ids = []
for ga in group_attrs:
group_attr_ids.append(ga.attr_id)
assert attribute.id in group_attr_ids
def test_get_all_group_attributes(self, client, network_with_data):
#Get all the node attributes in the network
group_attr_ids = []
for g in network_with_data.resourcegroups:
for ga in g.attributes:
group_attr_ids.append(ga.id)
group_attributes = client.get_all_resource_attributes('GROUP', network_with_data.id)
#Check that the retrieved attributes are in the list of group attributes
retrieved_ras = []
for ga in group_attributes:
retrieved_ras.append(ga.id)
assert set(group_attr_ids) == set(retrieved_ras)
def test_add_link_attribute(self, client, network_with_data, attribute):
link = network_with_data.links[0]
client.add_resource_attribute('LINK', link.id, attribute.id, 'Y')
link_attributes = client.get_resource_attributes('LINK', link.id)
network_attr_ids = []
for ra in link_attributes:
network_attr_ids.append(ra.attr_id)
assert attribute.id in network_attr_ids
def test_get_all_link_attributes(self, client, network_with_data):
#Get all the node attributes in the network
link_attr_ids = []
for l in network_with_data.links:
for la in l.attributes:
link_attr_ids.append(la.id)
link_attributes = client.get_all_resource_attributes('LINK', network_with_data.id)
#Check that the retrieved attributes are in the list of node attributes
retrieved_ras = []
for la in link_attributes:
retrieved_ras.append(la.id)
assert set(link_attr_ids) == set(retrieved_ras)
def test_add_node_attribute(self, client, network_with_data, attribute):
node = network_with_data.nodes[0]
client.add_resource_attribute('NODE', node.id, attribute.id, 'Y')
node_attributes = client.get_resource_attributes('NODE', node.id)
network_attr_ids = []
for ra in node_attributes:
network_attr_ids.append(ra.attr_id)
assert attribute.id in network_attr_ids
def test_add_duplicate_node_attribute(self, client, network_with_data, attribute):
node = network_with_data.nodes[0]
client.add_resource_attribute('NODE', node.id, attribute.id, 'Y')
node_attributes = client.get_resource_attributes('NODE', node.id)
node_attr_ids = []
for ra in node_attributes:
node_attr_ids.append(ra.attr_id)
assert attribute.id in node_attr_ids
with pytest.raises(hb.HydraError):
client.add_resource_attribute('NODE', node.id, attribute.id, 'Y')
def test_get_all_node_attributes(self, client, network_with_data):
#Get all the node attributes in the network
node_attr_ids = []
for n in network_with_data.nodes:
for a in n.attributes:
node_attr_ids.append(a.id)
node_attributes = client.get_all_resource_attributes('NODE', network_with_data.id)
#Check that the retrieved attributes are in the list of node attributes
retrieved_ras = []
for na in node_attributes:
retrieved_ras.append(na.id)
assert set(node_attr_ids) == set(retrieved_ras)
template_id = network_with_data.types[0].template_id
node_attributes = client.get_all_resource_attributes('NODE', network_with_data.id, template_id)
#Check that the retrieved attributes are in the list of node attributes
retrieved_ras = []
for na in node_attributes:
retrieved_ras.append(na.id)
assert set(node_attr_ids) == set(retrieved_ras)
def test_add_network_attribute(self, client, network_with_data, attribute):
new_attr = attribute
client.add_resource_attribute('NETWORK', network_with_data.id, new_attr.id, 'Y')
updated_network = client.get_network(network_with_data.id)
network_attr_ids = []
for ra in updated_network.attributes:
network_attr_ids.append(ra.attr_id)
assert new_attr.id in network_attr_ids
def test_add_network_attrs_from_type(self, client, network_with_data, attribute):
"""
Recreate the situation where a template is updated, so the network needs.
to be updated to reflect this change.
Equivalent to 'apply_template_to_network', just performed differently.
"""
network = network_with_data
type_id = network.types[0].id
#Get the template type, and add a new typeattr to it
templatetype_j = JSONObject(client.get_templatetype(type_id))
new_typeattr = JSONObject({
'attr_id' : attribute.id
})
templatetype_j.typeattrs.append(new_typeattr)
client.update_templatetype(templatetype_j)
#Record the network's resource attributes before the update
before_net_attrs = []
for ra in network.attributes:
before_net_attrs.append(ra.attr_id)
log.info("old: %s",ra.attr_id)
#Set any new resource attributes
client.add_resource_attrs_from_type(type_id, 'NETWORK', network.id)
updated_network = client.get_network(network.id)
after_net_attrs = []
for ra in updated_network.attributes:
after_net_attrs.append(ra.attr_id)
log.info("new: %s",ra.attr_id)
assert len(after_net_attrs) == len(before_net_attrs) + 1
def OLD_test_get_network_attrs(self, client, network_with_data):
net_attrs = client.get_resource_attributes('NETWORK', network_with_data.id)
net_type_attrs = client.get_resource_attributes('NETWORK',
network_with_data.id,
network_with_data.types[0].id)
assert len(net_attrs) == 3
assert len(net_type_attrs) == 2
def test_delete_all_duplicate_attributes(self, client, network_with_data):
duplicate_attribute = JSONObject({'name': 'duplicate', 'dimension_id': None})
#use dedicated testing function which allows duplicates
dupe_attr_1 = client.add_attribute_no_checks(duplicate_attribute)
dupe_attr_2 = client.add_attribute_no_checks(duplicate_attribute)
all_attrs = client.get_attributes()
assert dupe_attr_1.id in [a.id for a in all_attrs]
assert dupe_attr_2.id in [a.id for a in all_attrs]
#add duplicate resource attributes
client.add_resource_attribute('NETWORK', network_with_data.id, dupe_attr_1.id, 'Y')
client.add_resource_attribute('NETWORK', network_with_data.id, dupe_attr_2.id, 'Y')
#check the dupes are there
updated_net = client.get_network(network_with_data.id)
updated_net_ras = [ra.attr_id for ra in updated_net.attributes]
assert dupe_attr_1.id in updated_net_ras
assert dupe_attr_2.id in updated_net_ras
#now add duplicate attrs to the template type
templatetype_to_update = network_with_data.types[0].id
client.add_typeattr(JSONObject({'type_id': templatetype_to_update,
'attr_id': dupe_attr_1.id}))
client.add_typeattr(JSONObject({'type_id': templatetype_to_update,
'attr_id': dupe_attr_2.id}))
#check the dupes are there
updated_type = client.get_templatetype(templatetype_to_update)
assert dupe_attr_1.id in [ta.attr_id for ta in updated_type.typeattrs]
assert dupe_attr_1.id in [ta.attr_id for ta in updated_type.typeattrs]
client.delete_all_duplicate_attributes()
#check the dupes are gone
updated_net = client.get_network(network_with_data.id)
updated_net_ras = [ra.attr_id for ra in updated_net.attributes]
assert dupe_attr_1.id in updated_net_ras
assert dupe_attr_2.id not in updated_net_ras
#check the dupes are gone
updated_type = client.get_templatetype(templatetype_to_update)
assert dupe_attr_1.id in [ta.attr_id for ta in updated_type.typeattrs]
assert dupe_attr_2.id not in [ta.attr_id for ta in updated_type.typeattrs]
reduced_attrs = client.get_attributes()
#check that the first attr is there, but the dupe is not.
#the one to keep should be the one with the lowest ID
lowest_id = min(dupe_attr_1.id, dupe_attr_2.id)
assert lowest_id in [a.id for a in reduced_attrs]
assert dupe_attr_2.id not in [a.id for a in reduced_attrs]
def test_delete_duplicate_resourceattributes(self, client, network_with_data):
#first add a duplicate resourceattr to the network
#find a resourceattr.
network_with_data.attributes.sort(key=lambda x : x.attr_id)
ra1 = network_with_data.attributes[0]
ra2 = network_with_data.attributes[1]
ra3 = network_with_data.attributes[-1] # not associated to a template
ra_attribute1 = client.get_attribute_by_id(ra1.attr_id)
ra_attribute2 = client.get_attribute_by_id(ra2.attr_id)
ra_attribute3 = client.get_attribute_by_id(ra3.attr_id)
#create an attribute with the same name but a different dimension
duplicate_attribute1 = JSONObject({
'name': ra_attribute1.name,
'dimension_id': 1
})
duplicate_attribute2 = JSONObject({
'name': ra_attribute2.name,
'dimension_id': 1
})
duplicate_attribute3 = JSONObject({
'name': ra_attribute3.name,
'dimension_id': 1
})
dupeattr1 = client.add_attribute(duplicate_attribute1)
dupeattr2 = client.add_attribute(duplicate_attribute2)
dupeattr3 = client.add_attribute(duplicate_attribute3)
dupe_ra1 = client.add_resource_attribute('NETWORK', network_with_data.id, dupeattr1.id, 'N')#
#get an arbitrary dataset
dataset = client.get_dataset(1)
#set a value on the RA which sould get transferred in the deletion later
new_rscen = client.add_data_to_attribute(network_with_data.scenarios[0].id,
dupe_ra1.id,
dataset)
#add 2 more dupes but with no data associated to them
dupe_ra2 = client.add_resource_attribute('NETWORK', network_with_data.id, dupeattr2.id, 'N')
dupe_ra3 = client.add_resource_attribute('NETWORK', network_with_data.id, dupeattr3.id, 'N')
updated_net = client.get_network(network_with_data.id, include_data=True)
updated_net_ras = [ra.attr_id for ra in updated_net.attributes]
assert dupeattr1.id in updated_net_ras
assert dupeattr2.id in updated_net_ras
assert dupeattr3.id in updated_net_ras
#verify the data has been associated to the dupe RA
original_rs = network_with_data.scenarios[0].resourcescenarios
new_rs = updated_net.scenarios[0].resourcescenarios
assert len(new_rs) == len(original_rs) + 1
assert ra1.id not in [rs.resource_attr_id for rs in new_rs]
assert dupe_ra1.id in [rs.resource_attr_id for rs in new_rs]
#now that the new attribute is added, try to delete it.
client.delete_duplicate_resourceattributes()
updated_net = client.get_network(network_with_data.id, include_data=True)
updated_net_ras = [ra.attr_id for ra in updated_net.attributes]
#the number of network attributes has decreased because BOTH duplicates
#of RA3 (which are not associated to a template) have been removed.
#This means one of the original ones is now gone
assert len(updated_net.attributes) == len(network_with_data.attributes) -1
assert dupeattr1.id not in updated_net_ras
assert dupeattr2.id not in updated_net_ras
assert dupeattr3.id not in updated_net_ras
#verify the data which was on the dupe has been remapped to the remaining, correct, attribute
original_rs = network_with_data.scenarios[0].resourcescenarios
new_rs = updated_net.scenarios[0].resourcescenarios
assert len(new_rs) == len(original_rs) + 1
assert ra1.id in [rs.resource_attr_id for rs in new_rs]
assert dupe_ra1.id not in [rs.resource_attr_id for rs in new_rs]
class TestAttributeMap:
def test_set_attribute_mapping(self, client, networkmaker):
net1 = networkmaker.create()
net2 = networkmaker.create()
net3 = networkmaker.create()
s1 = net1.scenarios[0]
s2 = net2.scenarios[0]
node_1 = net1.nodes[0]
node_2 = net2.nodes[1]
node_3 = net3.nodes[2]
attr_1 = client.testutils.get_by_name('node_attr_a', node_1.attributes)
attr_2 = client.testutils.get_by_name('node_attr_b', node_2.attributes)
attr_3 = client.testutils.get_by_name('node_attr_c', node_3.attributes)
rs_to_update_from = None
for rs in s1.resourcescenarios:
if rs.resource_attr_id == attr_1.id:
rs_to_update_from = rs
rs_to_change = None
for rs in s2.resourcescenarios:
if rs.resource_attr_id == attr_2.id:
rs_to_change = rs
client.set_attribute_mapping(attr_1.id, attr_2.id)
client.set_attribute_mapping(attr_1.id, attr_3.id)
all_mappings_1 = client.get_mappings_in_network(net1.id)
all_mappings_2 = client.get_mappings_in_network(net2.id, net2.id)
assert len(all_mappings_1) == 2
assert len(all_mappings_2) == 1
node_mappings_1 = client.get_node_mappings(node_1.id)
node_mappings_2 = client.get_node_mappings(node_1.id, node_2.id)
assert len(node_mappings_1) == 2
assert len(node_mappings_2) == 1
map_exists = client.check_attribute_mapping_exists(attr_1.id, attr_2.id)
assert map_exists == 'Y'
map_exists = client.check_attribute_mapping_exists(attr_2.id, attr_1.id)
assert map_exists == 'N'
map_exists = client.check_attribute_mapping_exists(attr_2.id, attr_3.id)
assert map_exists == 'N'
updated_rs = client.update_value_from_mapping(attr_1.id, attr_2.id, s1.id, s2.id)
assert str(updated_rs.dataset.value) == str(rs_to_update_from.dataset.value)
log.info("Deleting %s -> %s", attr_1.id, attr_2.id)
client.delete_attribute_mapping(attr_1.id, attr_2.id)
all_mappings_1 = client.get_mappings_in_network(net1.id)
assert len(all_mappings_1) == 1
client.delete_mappings_in_network(net1.id)
all_mappings_1 = client.get_mappings_in_network(net1.id)
assert len(all_mappings_1) == 0
def test_delete_attribute_mapping(self, client):
"""
SKELETON
def delete_attribute_mapping(resource_attr_a, resource_attr_b, **kwargs):
"""
pass
def test_delete_mappings_in_network(self, client):
"""
SKELETON
def delete_mappings_in_network(network_id, network_2_id=None, **kwargs):
"""
pass
def test_get_mappings_in_network(self, client):
"""
SKELETON
def get_mappings_in_network(network_id, network_2_id=None, **kwargs):
"""
pass
def test_get_node_mappings(self, client):
"""
SKELETON
def get_node_mappings(node_id, node_2_id=None, **kwargs):
"""
pass
def test_get_link_mappings(self, client):
"""
SKELETON
def get_link_mappings(link_id, link_2_id=None, **kwargs):
"""
pass
def test_get_network_mappings(self, client):
"""
SKELETON
def get_network_mappings(network_id, network_2_id=None, **kwargs):
"""
pass
def test_check_attribute_mapping_exists(self, client):
"""
SKELETON
def check_attribute_mapping_exists(resource_attr_id_source, resource_attr_id_target, **kwargs):
"""
pass
class TestAttributeGroups:
"""
Test for attribute Groups-based functionality
"""
def test_add_attribute_group(self, client, projectmaker, attribute):
project = projectmaker.create()
newgroup = JSONObject({
'project_id' : project.id,
'name' : "Attribute Group %s" % (datetime.datetime.now(),),
'description' : "A description of an attribute group",
'layout' : {"color": "green"},
'exclusive' : 'Y',
})
newgroup = client.add_attribute_group(newgroup)
retrieved_new_group = client.get_attribute_group(newgroup.id)
assert retrieved_new_group.name == newgroup.name
def test_update_attribute_group(self, client, attributegroup):
newname = attributegroup.name + " Updated"
attributegroup.name = newname
client.update_attribute_group(attributegroup)
retrieved_new_group = client.get_attribute_group(attributegroup.id)
assert retrieved_new_group.name == newname
def test_delete_attribute_group(self, client, attributegroup):
client.delete_attribute_group(attributegroup.id)
with pytest.raises(hb.HydraError):
client.get_attribute_group(attributegroup.id)
def test_basic_add_attribute_group_items(self, client, projectmaker, network_with_data, attributegroupmaker):
project = projectmaker.create()
#convenience naming
network = network_with_data
#Create two groups -- attributes which are associated with a network,
#and everything else.
group_1 = attributegroupmaker.create(project.id, "Network Attributes")
group_2 = attributegroupmaker.create(project.id, "Node Attributes")
network_attributes = []
for netattr in network.attributes:
network_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_1.id
}))
node_attr_tracker = []
node_attributes = []
for node in network.nodes:
for node_attr in node.attributes:
if node_attr.attr_id not in node_attr_tracker:
node_attributes.append(JSONObject(
{'attr_id' : node_attr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id
}))
node_attr_tracker.append(node_attr.attr_id)
client.add_attribute_group_items(network_attributes)
client.add_attribute_group_items(node_attributes)
all_items_in_network = client.get_network_attributegroup_items(network.id)
assert len(all_items_in_network) == len(network_attributes)+len(node_attributes)
def test_exclusive_add_attribute_group_items(self, client, projectmaker, network_with_data, attributegroupmaker):
"""
add attributes to a group that are already in an exclusive group
"""
project = projectmaker.create()
#convenience naming
network = network_with_data
#Create two groups -- attributes which are associated with a network,
#and everything else.
group_1 = attributegroupmaker.create(project.id, "Network Attributes", 'Y')
group_2 = attributegroupmaker.create(project.id, "Node Attributes")
network_attributes = []
node_attributes = []
for netattr in network.attributes:
network_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_1.id}))
#Put these attributes into both groups. THis should fail, as group 1
#is exclusive, and already has these attributes
node_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id}))
node_attr_tracker = []
for node in network.nodes:
for node_attr in node.attributes:
if node_attr.attr_id not in node_attr_tracker:
node_attributes.append(JSONObject(
{'attr_id' : node_attr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id}))
node_attr_tracker.append(node_attr.attr_id)
log.info("Adding items to group 1 (network attributes)")
client.add_attribute_group_items(network_attributes)
#add a group with attributes that are already in an exclusive group
with pytest.raises(hb.HydraError):
log.info("Adding items to group 2 (node attributes, plus network attributes)")
client.add_attribute_group_items(node_attributes)
def test_reverse_exclusive_add_attribute_group_items(self, client, projectmaker, network_with_data, attributegroupmaker):
"""
add attributes to an exclusive group that are already in another group
"""
project = projectmaker.create()
#convenience naming
network = network_with_data
#Create two groups -- attributes which are associated with a network,
#and everything else.
group_1 = attributegroupmaker.create(project.id, "Network Attributes", 'Y')
group_2 = attributegroupmaker.create(project.id, "Node Attributes")
network_attributes = []
node_attributes = []
for netattr in network.attributes:
network_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_1.id}))
#Put these attributes into both groups. THis should fail, as group 1
#is exclusive, and already has these attributes
node_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id}))
node_attr_tracker = []
for node in network.nodes:
for node_attr in node.attributes:
if node_attr.attr_id not in node_attr_tracker:
node_attributes.append(JSONObject(
{'attr_id' : node_attr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id}))
node_attr_tracker.append(node_attr.attr_id)
log.info("Adding items to group 2 (node attributes, plus network attributes)")
client.add_attribute_group_items(node_attributes)
#add attributes to an exclusive group that are already in another group
with pytest.raises(hb.HydraError):
log.info("Adding items to group 1 (network attributes)")
client.add_attribute_group_items(network_attributes)
def test_delete_attribute_group_items(self, client, projectmaker, network_with_data, attributegroupmaker):
project = projectmaker.create()
#convenience naming
network = network_with_data
#Create two groups -- attributes which are associated with a network,
#and everything else.
group_1 = attributegroupmaker.create(project.id, "Network Attributes")
group_2 = attributegroupmaker.create(project.id, "Node Attributes")
network_attributes = []
for netattr in network.attributes:
network_attributes.append(JSONObject(
{'attr_id' : netattr.attr_id,
'network_id' : network.id,
'group_id' : group_1.id}))
node_attr_tracker = []
node_attributes = []
for node in network.nodes:
for node_attr in node.attributes:
if node_attr.attr_id not in node_attr_tracker:
node_attributes.append(JSONObject(
{'attr_id' : node_attr.attr_id,
'network_id' : network.id,
'group_id' : group_2.id}))
node_attr_tracker.append(node_attr.attr_id)
client.add_attribute_group_items(network_attributes)
client.add_attribute_group_items(node_attributes)
all_items_in_network = client.get_network_attributegroup_items(network.id)
assert len(all_items_in_network) == len(network_attributes)+len(node_attributes)
#Now remove all the node attributes
client.delete_attribute_group_items(node_attributes)
all_items_in_network = client.get_network_attributegroup_items(network.id)
assert len(all_items_in_network) == len(network_attributes)
def test_get_attribute_group(self, client):
"""
SKELETON
def get_attribute_group(group_id, **kwargs):
"""
pass
def test_get_network_attributegroup_items(self, client):
"""
SKELETON
def get_network_attributegroup_items(network_id, **kwargs):
"""
pass
def test_get_group_attributegroup_items(self, client):
"""
SKELETON
def get_group_attributegroup_items(network_id, group_id, **kwargs):
"""
pass
def test_get_attribute_item_groups(self, client):
"""
SKELETON
def get_attribute_item_groups(network_id, attr_id, **kwargs):
"""
pass
def test_add_attribute_group_items(self, client):
"""
SKELETON
def add_attribute_group_items(attributegroupitems, **kwargs):
"""
pass
| lgpl-3.0 | 509,869,979,931,796,350 | 35.886503 | 125 | 0.61131 | false |
TEAM-HRA/hra_suite | HRAGUI/src/hra_gui/qt/widgets/list_widget_widget.py | 1 | 1567 | '''
Created on 21 kwi 2013
@author: jurek
'''
from hra_core.special import ImportErrorMessage
try:
from PyQt4.QtGui import * # @UnusedWildImport
from PyQt4.QtCore import * # @UnusedWildImport
from hra_core.misc import Params
from hra_core.collections_utils import nvl
from hra_gui.qt.utils.signals import LIST_ITEM_DOUBLE_CLICKED_SIGNAL
from hra_gui.qt.widgets.commons import Common
from hra_gui.qt.widgets.commons import prepareWidget
from hra_gui.qt.widgets.commons import Common
except ImportError as error:
ImportErrorMessage(error, __name__)
class ListWidgetWidget(QListWidget, Common):
def __init__(self, parent, **params):
super(ListWidgetWidget, self).__init__(parent)
prepareWidget(parent=parent, widget=self, **params)
double_click_handler = params.get('list_item_double_clicked_handler',
None)
if double_click_handler:
self.connect(self, LIST_ITEM_DOUBLE_CLICKED_SIGNAL,
double_click_handler)
class ListWidgetItemWidget(QListWidgetItem):
def __init__(self, parent, **params):
params = Params(**params)
super(ListWidgetItemWidget, self).__init__(
nvl(params.text, ''), parent)
#store in data buffer of list item for later use
if params.data:
self.setData(Qt.UserRole, QVariant(params.data))
def getData(self):
item = self.data(Qt.UserRole)
if item:
return item.toPyObject()
| lgpl-3.0 | -4,704,762,434,656,400,000 | 35.44186 | 77 | 0.637524 | false |
pjuu/pjuu | pjuu/auth/views.py | 1 | 14803 | # -*- coding: utf8 -*-
"""Flask endpoints provide the URL endpoints for the auth system.
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
# 3rd party imports
from flask import (
current_app as app, flash, redirect, render_template, request, url_for,
session, jsonify, Blueprint, g, _app_ctx_stack
)
# Pjuu imports
from pjuu.lib import handle_next
from pjuu.lib.mail import send_mail
from pjuu.lib.tokens import generate_token, check_token
from pjuu.auth import current_user
from pjuu.auth.backend import (
authenticate, signin as be_signin, signout as be_signout, create_account,
activate as be_activate, change_password as be_change_password,
change_email as be_change_email, delete_account as be_delete_account,
dump_account as be_dump_account
)
from pjuu.auth.utils import get_uid, get_user
from pjuu.auth.decorators import anonymous_required, login_required
from pjuu.auth.forms import (
ForgotForm, SignInForm, ResetForm, SignUpForm, ChangeEmailForm,
ChangePasswordForm, ConfirmPasswordForm
)
auth_bp = Blueprint('auth', __name__)
@auth_bp.before_app_request
def _load_user():
"""Get the currently logged in user as a `dict` and store on the
application context. This will be `None` if the user is not logged in.
"""
user = None
if 'user_id' in session:
# Fetch the user object from MongoDB
user = get_user(session.get('user_id'))
# Remove the uid from the session if the user is not logged in
if not user:
session.pop('user_id', None)
_app_ctx_stack.top.user = user
@auth_bp.before_app_request
def kick_banned_user():
"""
This function will check too see if the user has been banned since login.
Without this we would have to wait for the user to try and login again
before they are informed that they are banned. This fucntion will just
ensure they are kicked out
"""
if current_user and current_user.get('banned', False):
session.pop('user_id', None)
flash('You\'re a very naughty boy!', 'error')
@auth_bp.after_app_request
def inject_token_header(response):
"""If there is an auth token generated will it as a header X-Pjuu-Token.
Will only ever do this if testing mode is on!
"""
if app.debug or app.testing: # pragma: no branch
token = g.get('token')
if token:
response.headers['X-Pjuu-Token'] = token
return response
@auth_bp.app_context_processor
def inject_user():
"""Injects `current_user` into the Jinja environment
"""
return dict(current_user=current_user)
@auth_bp.route('/signin', methods=['GET', 'POST'])
@anonymous_required
def signin():
"""
"""
form = SignInForm(request.form)
if request.method == 'POST':
# Handles the passing of the next argument to the login view
redirect_url = handle_next(request, url_for('users.feed'))
if form.validate():
# Calls authenticate from backend.py
user = authenticate(form.username.data, form.password.data)
if user:
# Ensure the user is active
if not user.get('active', False):
flash('Please activate your account<br />'
'Check your e-mail', 'information')
# Ensure the user is not banned
elif user.get('banned', False):
flash('You\'re a very naughty boy!', 'error')
# All OK log the user in
else:
# We will also make the session permanent if the user
# has requested too
session.permanent = form.keep_signed_in.data
be_signin(user.get('_id'))
return redirect(redirect_url)
else:
flash('Invalid user name or password', 'error')
else:
flash('Invalid user name or password', 'error')
return render_template('signin.html', form=form)
@auth_bp.route('/signout', methods=['GET'])
def signout():
"""
"""
if current_user:
be_signout()
flash('Successfully signed out', 'success')
return redirect(url_for('auth.signin'))
@auth_bp.route('/signup', methods=['GET', 'POST'])
@anonymous_required
def signup():
"""
"""
form = SignUpForm(request.form)
if request.method == 'POST':
if form.validate():
# User successfully signed up, create an account
uid = create_account(form.username.data, form.email.data,
form.password.data)
# Lets check the account was created
# This would only fail in the event of a race condition
if uid: # pragma: no branch
token = generate_token({'action': 'activate', 'uid': uid})
# Send an e-mail to activate their account
send_mail(
'Pjuu Account Notification - Activation',
[form.email.data],
text_body=render_template('emails/activate.txt',
token=token),
html_body=render_template('emails/activate.html',
token=token)
)
flash('Yay! You\'ve signed up<br/>'
'We\'ve sent an e-mail to {}<br/>'
'Please activate your account'.format(form.email.data),
'success')
return redirect(url_for('auth.signin'))
flash('Oh no! There are errors in your form. Please try again.',
'error')
return render_template('signup.html', form=form)
@auth_bp.route('/activate/<token>', methods=['GET'])
@anonymous_required
def activate(token):
"""
Activates the user account so long as the token is valid.
"""
# Attempt to get the data from the token
data = check_token(token)
if data is not None and data.get('action') == 'activate':
# Attempt to activate the users account
user = get_user(data.get('uid'))
# This does not need a branching check as it should never fail!
# The check is there for safety. An auth token can not live longer
# than a newly created user.
if user is not None: # pragma: no branch
be_activate(user.get('_id'))
# If we have got to this point. Send a welcome e-mail :)
send_mail(
'Pjuu Account Notifcation - Welcome!',
[user.get('email')],
text_body=render_template('emails/welcome.txt'),
html_body=render_template('emails/welcome.html')
)
flash('Your account has now been activated', 'success')
return redirect(url_for('auth.signin'))
# The token is either out of date or has been tampered with
flash('Invalid token', 'error')
return redirect(url_for('auth.signin'))
@auth_bp.route('/forgot', methods=['GET', 'POST'])
@anonymous_required
def forgot():
"""Allow users to get a password reset link"""
form = ForgotForm(request.form)
# We always go to /signin after a POST
if request.method == 'POST':
if form.validate():
user = get_user(get_uid(form.username.data, non_active=True))
if user is not None:
# Only send e-mails to user which exist.
token = generate_token({
'action': 'reset',
'uid': user.get('_id')
})
send_mail(
'Pjuu Account Notification - Password Reset',
[user.get('email')],
text_body=render_template('emails/forgot.txt',
token=token),
html_body=render_template('emails/forgot.html',
token=token)
)
flash('If we\'ve found your account we\'ve e-mailed you',
'information')
return redirect(url_for('auth.signin'))
else:
flash('Please enter a username or e-mail address',
'error')
return render_template('forgot.html', form=form)
@auth_bp.route('/reset/<token>', methods=['GET', 'POST'])
@anonymous_required
def reset(token):
"""
This view allows the user to create a new password so long as the token
is valid.
"""
form = ResetForm(request.form)
# Check the token but do not delete it.
data = check_token(token, preserve=True)
if data is not None and data.get('action') == 'reset':
if request.method == 'POST':
if form.validate():
# If the form was successful recheck the token but expire it.
check_token(token)
# Update the password and inform the users
be_change_password(data['uid'], form.password.data)
flash('Your password has now been reset', 'success')
return redirect(url_for('auth.signin'))
else:
flash('Oh no! There are errors in your form', 'error')
else:
flash('Invalid token', 'error')
return redirect(url_for('auth.signin'))
return render_template('reset.html', form=form)
@auth_bp.route('/settings/email', methods=['GET', 'POST'])
@login_required
def change_email():
"""
"""
form = ChangeEmailForm(request.form)
if request.method == 'POST':
if form.validate():
# User validates in the form
# Get an authentication token
token = generate_token({
'action': 'change_email',
'uid': current_user['_id'],
'email': form.new_email.data}
)
# Send a confirmation to the new email address
send_mail(
'Pjuu Account Notification - Confirm Email Change',
[form.new_email.data],
text_body=render_template('emails/email_change.txt',
token=token),
html_body=render_template('emails/email_change.html',
token=token)
)
flash('We\'ve sent you an email, please confirm this',
'success')
else:
flash('Oh no! There are errors in your form', 'error')
return render_template('change_email.html', form=form)
@auth_bp.route('/settings/email/<token>', methods=['GET'])
@login_required
def confirm_email(token):
"""
"""
# Attempt to get the data from the token
data = check_token(token)
if data is not None and data.get('action') == 'change_email':
# Change the users e-mail
uid = data.get('uid')
# We will email the address stored in the token. This may help us
# identify if there is any miss match
email = data.get('email')
# This could only happen if the user deletes there account then presses
# the confirm email link that is sent to them.
if uid and email: # pragma: no branch
be_change_email(uid, email)
send_mail(
'Pjuu Account Notification - Email Address Changed',
[email],
text_body=render_template('emails/confirm_email.txt'),
html_body=render_template('emails/confirm_email.html')
)
flash('We\'ve updated your e-mail address', 'success')
return redirect(url_for('auth.change_email'))
# The token is either out of date or has been tampered with
flash('Invalid token', 'error')
return redirect(url_for('auth.change_email'))
@auth_bp.route('/settings/password', methods=['GET', 'POST'])
@login_required
def change_password():
"""
The view a user uses to change their password.
This will change their password straight away once they have authenticated,
it will then send them a confirmation e-mail.
"""
form = ChangePasswordForm(request.form)
if request.method == 'POST':
if form.validate():
# User authenticates in the form
# Update the users password!
be_change_password(current_user['_id'], form.new_password.data)
flash('We\'ve updated your password', 'success')
# Inform the user via e-mail that their password has changed
send_mail(
'Pjuu Account Notification - Password Changed',
[current_user['email']],
text_body=render_template('emails/password_change.txt'),
html_body=render_template('emails/password_change.html')
)
else:
flash('Oh no! There are errors in your form', 'error')
return render_template('change_password.html', form=form)
@auth_bp.route('/settings/delete', methods=['GET', 'POST'])
@login_required
def delete_account():
"""
"""
form = ConfirmPasswordForm(request.form)
if request.method == 'POST':
if authenticate(current_user['username'], form.password.data):
uid = current_user['_id']
email = current_user['email']
# Log the current user out
be_signout()
# Delete the account
be_delete_account(uid)
# Inform the user that the account has/is being deleted
flash('Your account is being deleted<br />Thanks for using us',
'information')
# Send the user their last ever email on Pjuu
send_mail(
'Pjuu Account Notification - Account Deletion',
[email],
text_body=render_template('emails/account_deletion.txt'),
html_body=render_template('emails/account_deletion.html')
)
# Send user back to login
return redirect(url_for('auth.signin'))
else:
flash('Oops! wrong password', 'error')
return render_template('delete_account.html', form=form)
@auth_bp.route('/settings/dump', methods=['GET', 'POST'])
@login_required
def dump_account():
"""Enables the user to dump a JSON representation of their account.
"""
form = ConfirmPasswordForm(request.form)
if request.method == 'POST':
if authenticate(current_user['username'], form.password.data):
# Dump the users account
data = be_dump_account(current_user['_id'])
# JSONify the data and display it to the user :) simple
return jsonify(data)
else:
flash('Oops! wrong password', 'error')
return render_template('dump_account.html', form=form)
| agpl-3.0 | -3,284,603,166,870,039,600 | 35.550617 | 79 | 0.577383 | false |
okomarov/lsptf | setup.py | 1 | 3767 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'lsptf',
version = '0.1.0-alpha',
description = 'Long short investment portfolios',
long_description = long_description,
url='https://github.com/okomarov/lsptf',
author='Oleg Komarov',
author_email='[email protected]',
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Topic :: Office/Business :: Financial :: Investment',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.6',
#'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.2',
#'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='finance investment neutral long short backtest',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
#packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
#extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
#},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
#package_data={
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
) | bsd-3-clause | 3,907,107,188,002,606,000 | 37.845361 | 94 | 0.648527 | false |
dante-signal31/geolocate | geolocate/classes/config.py | 1 | 19292 | """
Configuration parser module.
Programmed by: Dante Signal31
email: [email protected]
"""
import configparser
import http.client as http
import os
import urllib.parse as urlparse
import keyring
CONFIG_ROOT = os.path.expanduser("~/.geolocate")
CONFIG_FILE = "etc/geolocate.conf"
CONFIG_FILE_PATH = os.path.join(CONFIG_ROOT, CONFIG_FILE)
GEOLOCATE_VAULT = "geolocate"
DEFAULT_USER_ID = ""
DEFAULT_LICENSE_KEY = ""
# TODO: For production I have to uncomment real url.
# Only for tests I have to comment real download url. MaxMind has a rate limit
# per day. If you exceed that limit you are forbidden for 24 hours to download
# their database.
DEFAULT_DATABASE_DOWNLOAD_URL = "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz"
# TODO: For production remove next fake url, it's only for tests.
# DEFAULT_DATABASE_DOWNLOAD_URL = "http://localhost:2014/GeoLite2-City.mmdb.gz"
# GeoLite2 databases are updated on the first Tuesday of each month, so 35 days
# of update interval should be fine.
DEFAULT_UPDATE_INTERVAL = 35
DEFAULT_LOCAL_DATABASE_FOLDER = os.path.join(CONFIG_ROOT, "local_database/")
DEFAULT_LOCAL_DATABASE_NAME = "GeoLite2-City.mmdb"
# Remember add new locators here or locate won't use them.
DEFAULT_LOCATORS_PREFERENCE = ["geoip2_webservice", "geoip2_local"]
class Configuration(object):
# I've discovered Maxmind website blocks clients who exceeds a connection
# threshold. If we make a connection each time we run geolocate, in order
# to check that configured URL is OK, we can end in Maxmind blacklist. So,
# we have to minimize connections. Check only when configuration is updated
# is a way, but then we have to control how users update config. Best way
# is limit users to change configuration only through executable's
# parameters. If we let them change manually configuration file we should
# check it each time we run the program. We'd better close configuration
# file through serialization and check URL only when user makes program
# change configuration.
""" Class to encapsulate configuration needed to connect to Geolite2
webservices or downloaded local database. This class also validates
parameters read from config files to overcome user typos.
"""
def __init__(self, user_id=DEFAULT_USER_ID,
license_key=DEFAULT_LICENSE_KEY,
download_url=DEFAULT_DATABASE_DOWNLOAD_URL,
update_interval=DEFAULT_UPDATE_INTERVAL,
local_database_folder=DEFAULT_LOCAL_DATABASE_FOLDER,
local_database_name=DEFAULT_LOCAL_DATABASE_NAME,
locators_preference=DEFAULT_LOCATORS_PREFERENCE):
self._webservice = {"user_id": user_id,
"license_key": license_key}
self._local_database = {"download_url": download_url,
"update_interval": update_interval,
"local_database_folder": local_database_folder,
"local_database_name": local_database_name}
self._locators_preference = locators_preference
@property
def user_id(self):
return self._webservice["user_id"]
@user_id.setter
def user_id(self, user_id):
_validate_value("user_id", user_id)
self._webservice["user_id"] = user_id
@property
def license_key(self):
return self._webservice["license_key"]
@license_key.setter
def license_key(self, license_key):
_validate_value("license_key", license_key)
self._webservice["license_key"] = license_key
@property
def download_url(self):
return self._local_database["download_url"]
@download_url.setter
def download_url(self, url):
_validate_url("download_url", url)
self._local_database["download_url"] = url
@property
def update_interval(self):
return self._local_database["update_interval"]
@update_interval.setter
def update_interval(self, update_interval_in_days):
interval_integer = _validate_integer("update_interval",
update_interval_in_days)
self._local_database["update_interval"] = interval_integer
@property
def local_database_folder(self):
return self._local_database["local_database_folder"]
@local_database_folder.setter
def local_database_folder(self, folder_path):
database_folder_path = _get_folder_path(folder_path)
_validate_folder("local_database_folder", database_folder_path)
self._local_database["local_database_folder"] = database_folder_path
@property
def local_database_name(self):
return self._local_database["local_database_name"]
@local_database_name.setter
def local_database_name(self, database_name):
# At first sight every database name should be OK, but I'll leave this
# as a property in case I have an idea about a possible check.
self._local_database["local_database_name"] = database_name
@property
def local_database_path(self):
path = os.path.join(self.local_database_folder,
self.local_database_name)
return path
@property
def locators_preference(self):
"""
:return: Enabled locators for this GeoIPDatabase ordered by preference.
:rtype: list
"""
return self._locators_preference
@locators_preference.setter
def locators_preference(self, new_locator_list):
if _unknown_locators(new_locator_list):
unknown_locators = _get_unknown_locators(new_locator_list)
raise UnknownLocators(unknown_locators)
else:
self._locators_preference = new_locator_list
# I make comparisons at tests so I need this functionality.
# Great reference about custom classes equality at:
# https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
def __eq__(self, other):
if isinstance(other, self.__class__):
return other.__dict__ == self.__dict__
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
else:
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
def reset_locators_preference(self):
""" Reset locators preference to default order.
:return: None
"""
self._locators_preference = DEFAULT_LOCATORS_PREFERENCE
@property
def disabled_locators(self):
""" Locators registered as default one but not enabled in this
GeoIPDatabase.
:return: Disabled locators in this GeoIPDatabase.
:rtype: set
"""
default_locators_set = set(DEFAULT_LOCATORS_PREFERENCE)
enabled_locators_set = set(self.locators_preference)
disabled_locators_set = default_locators_set - enabled_locators_set
return disabled_locators_set
def get_configuration_dict(self):
""" Get a dict with configuration parameters.
Dict keys will be sections and values are also dict with values for
that section. License key won't we included and must be got from
license_key property
:return: Configuration sections dict of dicts.
:rtype: dict
"""
parsed_configuration = {
"webservice": {"user_id": self._webservice["user_id"]},
"local_database": {key: self._local_database[key]
for key in self._local_database.keys()},
"locators_preference": {
"preference": ",".join(self._locators_preference)
}
}
return parsed_configuration
def _validate_value(parameter, value):
# TODO: Add more checks to detect invalid values when you know MaxMind's
# conditions for user ids.
if _text_has_spaces(value) or value == "":
raise ParameterNotValid(value, parameter,
" ". join([parameter, "cannot have spaces."]))
def _validate_url(parameter, url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
:param parameter: Attribute that is being validated.
:type parameter: str
:param url: HTTP url to check its existence.
:type url: str
:return: None
:raise: ParameterNotValid
"""
# see also http://stackoverflow.com/questions/2924422
good_codes = [http.OK, http.FOUND, http.MOVED_PERMANENTLY]
try:
if _get_server_status_code(url) in good_codes:
return # Validation succeeded.
else:
raise Exception # Let outer except raise one only exception.
except:
raise ParameterNotValid(url, parameter, "Cannot connect to given "
"URL.")
def _validate_folder(parameter, path):
"""
:param parameter: Attribute that is being validated.
:type parameter: str
:param path: Path to folder being checked.
:type path: str
:return: None
:raise: ParameterNotValid
"""
if not os.path.exists(path):
raise ParameterNotValid(path, parameter, "Folder does not exists.")
def _get_server_status_code(url):
"""
Download just the header of a URL and return the server's status code.
:param url: HTTP url to check its existence.
:type url: str
:return: One of the connection status from http.client.
:rtype: int
:raise: Any of the exceptions from http.client built-in module.
"""
# http://stackoverflow.com/questions/1140661
host, path = urlparse.urlparse(url)[1:3] # elems [1] and [2]
conn = http.HTTPConnection(host)
conn.request('HEAD', path)
status = conn.getresponse().status
conn.close()
return status
def _validate_integer(parameter, value):
"""
:param parameter: Attribute that is being validated.
:type parameter: str
:param value: Value integer o string.
:type value: int or str
:return: Value converted to an integer.
:rtype: int
"""
try:
integer_value = int(value)
if integer_value <= 0:
raise ValueError
except ValueError:
raise ParameterNotValid(value, parameter, "Cannot convert to int.")
return integer_value
def _text_has_spaces(text):
"""
:param text:
:type text: str
:return: True if text has any space or false otherwise.
:rtype: bool
"""
words = text.split(" ")
if len(words) > 1:
return True
else:
return False
def load_configuration():
""" Read configuration file and populate with its data a
config.Configuration instance.
:return: Configuration instance populated with configuration file data.
:rtype: config.Configuration
"""
try:
configuration, license_key = _read_config_file()
except ConfigNotFound:
_create_default_config_file()
configuration = load_configuration()
return configuration
def _read_config_file():
""" Load all configuration parameters set in config file.
:return: Configuration instance populated with configuration file data.
:rtype: config.Configuration
:raise: config.ConfigNotFound
"""
configuration_parser = _create_config_parser()
try:
license_key = _load_password(configuration_parser["webservice"]["user_id"])
except KeyError: # Key may have not been set yet.
license_Key = ""
locators_preference = _string_to_list(configuration_parser["locators_preference"]["preference"])
configuration = Configuration(
user_id=configuration_parser["webservice"]["user_id"],
license_key=license_key,
download_url=configuration_parser["local_database"]["download_url"],
update_interval=int(configuration_parser["local_database"]["update_interval"]),
local_database_folder=configuration_parser["local_database"]["local_database_folder"],
local_database_name=configuration_parser["local_database"]["local_database_name"],
locators_preference=locators_preference
)
return configuration, license_key
def _create_config_parser():
""" Create a config parser object and load in it our configuration.
:return: A config parser with configuration loaded.
:rtype: configparser.ConfigParser
:raise: config.ConfigNotFound
"""
try:
configuration_parser = configparser.ConfigParser()
with open(CONFIG_FILE_PATH) as config_file:
configuration_parser.read_file(config_file)
except FileNotFoundError:
raise ConfigNotFound()
except Exception as e:
print("Something odd happened: ", repr(e))
return configuration_parser
def _string_to_list(string_list):
""" Take a string with comma separated elements a return a list of
those elements.
:param string_list: String with comma separated elements.
:type string_list: str
:return: List with elements.
:rtype: list
"""
string_list_no_spaces = string_list.replace(' ', '')
element_list = list(string_list_no_spaces.split(','))
return element_list
def _create_default_config_file():
""" Create a default configuration file.
:return: None
"""
print("Geolocate configuration not found, creating a default one.")
default_configuration = Configuration()
save_configuration(default_configuration)
def save_configuration(configuration):
""" Write Configuration object in config file.
:param configuration: Configuration to be saved.
:type configuration: config.Configuration
:return: None
"""
configuration_parameters = configuration.get_configuration_dict()
_save_password(configuration.user_id, configuration.license_key)
configuration_parsed = _parse_parameters(configuration_parameters)
try:
with open(CONFIG_FILE_PATH, "w") as config_file:
configuration_parsed.write(config_file)
except FileNotFoundError: # Configuration path probably does not exist yet.
path_to_create = os.path.dirname(CONFIG_FILE_PATH)
os.makedirs(path_to_create)
save_configuration(configuration)
def _parse_parameters(configuration_parameters):
""" Insert parameter into a configparse file.
:param configuration_parameters:
:type configuration_parameters: dict
:return: None
"""
configuration_parsed = configparser.ConfigParser()
for section in configuration_parameters.keys():
configuration_parsed[section] = configuration_parameters[section]
return configuration_parsed
def _save_password(username, password):
""" Save password ciphered in system keyring.
:param username: Username to be used with Maxmind webservice.
:type username: str
:param password: Password of Maxmind account.
:type password: str
:return: None
"""
if not username.strip() == '':
keyring.set_password(GEOLOCATE_VAULT, username, password)
def _load_password(username):
""" Retrieve password from system keyring.
:param username: Username used with Maxmind webservice.
:type username: str
:return: None
"""
try:
recovered_password = keyring.get_password(GEOLOCATE_VAULT, username)
except RuntimeError as e:
if "No recommended backend was available" in repr(e):
print("Unable to access to keyring.\n",
"Keyring package returned next error: ", str(e))
recovered_password = ""
return recovered_password
def _delete_password(username):
""" Delete password assigned to username and stored in system keyring.
:param username: Username used with Maxmind webservice.
:type username: str
:return: None
"""
keyring.delete_password(GEOLOCATE_VAULT, username)
def _get_folder_path(path):
""" If path is relative, get absolute path of current working directory
suffixed by path. If path is absolute, just return it.
:param path: Path to get absolute form.
:type path: str
:return: Absolute path.
:rtype: str
"""
absolute_directory = None
if path.startswith("/"):
absolute_directory = path
else:
current_working_directory = os.getcwd()
absolute_directory = "{0}/{1}".format(current_working_directory,
path)
return absolute_directory
def _unknown_locators(locator_list):
""" Detects if any locator in provided list is not registered as a valid one.
Enabled locators are registered in DEFAULT_LOCATORS_PREFERENCE constant.
Locators have to be one of them to be declared valid.
:param locator_list: String list with locator names.
:type locator_list: list
:return: True if any locator in list is not within default locator list, else False.
:rtype: bool
"""
locator_set = set(locator_list)
default_locator_set = set(DEFAULT_LOCATORS_PREFERENCE)
if locator_set <= default_locator_set:
return False
else:
return True
def _get_unknown_locators(locator_list):
"""
:param locator_list: String list with locator names.
:type locator_list: list
:return: Set with unknown locators detected.
:rtype: set
"""
locator_set = set(locator_list)
default_locator_set = set(DEFAULT_LOCATORS_PREFERENCE)
return locator_set - default_locator_set
class ConfigNotFound(Exception):
""" Launched when config file is not where is supposed to be."""
def __init__(self):
message = "Configuration file is not at it's default " \
"location: {0}".format(CONFIG_FILE_PATH)
Exception.__init__(self, message)
class ParameterNotValid(Exception):
""" Launched when user_id validation fails."""
def __init__(self, provided_value, parameter, message):
self.provided_value = provided_value
self.parameter = parameter
parameter_message = "There is a problem with parameter {0}, you " \
"gave {1} as value.\n " \
"Problem is: \n".format(parameter, provided_value)
final_message = "".join([parameter_message, message])
Exception.__init__(self, final_message)
class UnknownLocators(Exception):
""" Raised when an still not implemented location is referenced in any
operation.
"""
def __init__(self, unknown_locators):
unknown_locators_text = " ".join(unknown_locators)
self.message = " ".join(["You tried to use not implemented locators:",
unknown_locators_text])
Exception.__init__(self, self.message)
class OpenConfigurationToUpdate(object):
""" Context manager to get a configuration file and save it automatically.
"""
def __init__(self):
self.configuration = load_configuration()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
save_configuration(self.configuration)
if exc_type is None:
return True
else:
return False
| bsd-3-clause | -2,862,953,470,390,364,700 | 33.823105 | 113 | 0.656127 | false |
google/TensorNetwork | tensornetwork/block_sparse/caching.py | 1 | 2665 | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import List, Union, Any, Tuple, Optional, Sequence
# currently there is only one global cacher that does caching.
# this could be changed later on to having stacks of cachers,
# i.e. different cache levesl
_INSTANTIATED_CACHERS = []
class Cacher:
def __init__(self) -> None:
self.cache = {}
self.do_caching = False
def set_status(self, value) -> None:
self.do_caching = value
def clear_cache(self) -> None:
self.cache = {}
@property
def is_empty(self) -> bool:
return len(self.cache) == 0
def get_cacher() -> Cacher:
"""
Return a `Cacher` object which can be used to perform
caching of block-data for block-sparse tensor contractions.
"""
if len(_INSTANTIATED_CACHERS) == 0:
_INSTANTIATED_CACHERS.append(Cacher())
return _INSTANTIATED_CACHERS[0]
def enable_caching() -> None:
"""
Enable caching of block-data for block-sparse contraction.
If enabled, all data that is needed to perform binary tensor contractions
will be cached in a dictionary for later reuse.
Enabling caching can significantly speed tensor contractions,
but can lead to substantially larger memory footprints.
In particular if the code uses tensor decompositions like QR, SVD
eig, eigh or any similar method, enabling caching can cause
catastrophic memory clutter, so use caching with great care.
The user can at any point clear the cache by calling
`tn.block_sparse.clear_cache()`.
"""
get_cacher().set_status(True)
def disable_caching() -> None:
"""
Disable caching of block-data for block-sparse tensor contractions.
Note that the cache WILL NOT BE CLEARED.
Clearing the cache can be achieved by calling
`tn.block_sparse.clear_cache()`.
"""
get_cacher().set_status(False)
def clear_cache() -> None:
"""
Clear the cache that stores block-data for block-sparse tensor contractions.
"""
get_cacher().clear_cache()
def get_caching_status() -> bool:
return get_cacher().do_caching
def set_caching_status(status) -> None:
get_cacher().set_status(status)
| apache-2.0 | -2,057,196,932,858,687,500 | 29.284091 | 78 | 0.72045 | false |
reingart/erplibre | controllers/scm.py | 1 | 9384 | # -*- coding: utf-8 -*-
# This is a Python open source project for migration of modules
# and functions from GestionPyme and other ERP products from Sistemas
# Ágiles.
#
# Copyright (C) 2012 Sistemas Ágiles.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Alan Etkin <[email protected]>"
__copyright__ = "Copyright (C) 2012 Sistemas Ágiles"
__license__ = "AGPLv3"
import gluon
from gluon import *
import datetime
import config
T = config.env["T"]
db = config.db
session = config.session
request = config.request
# modules = __import__('applications.%s.modules' % config.WEB2PY_APP_NAME, globals(), locals(), ['operations', 'crm'], -1)
# crm = modules.crm
# operations = modules.operations
from modules import crm, operations
# import applications.erplibre.modules.operations as operations
# import applications.erplibre.modules.crm as crm
import datetime
from gui2py.form import EVT_FORM_SUBMIT
def index():
return dict()
def ria_stock(evt, args=[], vars={}):
stock_list = None
session.form = SQLFORM.factory(
Field('warehouse', 'reference warehouse', \
requires=IS_EMPTY_OR(IS_IN_DB(db, db.warehouse, \
'%(description)s'))), \
Field('product', 'reference concept', \
requires = IS_EMPTY_OR(IS_IN_DB(db(\
db.concept.stock == True), \
'concept.concept_id', '%(description)s')))
)
session.q = session.get("q", None)
# Query for the stock list
if evt is not None:
if session.form.accepts(evt.args, formname=None, keepvalues=False, dbio=False):
session.q = None
warehouse_query = db.stock.warehouse_id == \
session.form.vars.warehouse
product_query = db.stock.concept_id == \
session.form.vars.product
# filter by product if requested
if session.form.vars.product is not None:
if len(session.form.vars.product) > 0:
session.q = product_query
if session.form.vars.warehouse is not None:
if len(session.form.vars.warehouse) > 0:
if session.q is None:
session.q = warehouse_query
else:
session.q &= warehouse_query
return config.html_frame.window.OnLinkClicked(URL(a=config.APP_NAME, c="scm", f="ria_stock"))
else:
config.html_frame.window.Bind(EVT_FORM_SUBMIT, ria_stock)
if session.q is None: session.q = db.stock
# stock list records
s = db(session.q)
rows = s.select()
# TODO: presentation code should go into the view
columns = ['stock.stock_id', 'stock.code', \
'stock.concept_id', \
'stock.posted', 'stock.value']
headers = {'stock.stock_id': T('Edit'), 'stock.code': T('Code'), \
'stock.concept_id': T('Product'), 'stock.posted': T('Posted'), \
'stock.value': T('Value')}
# TODO: unify action/function naming conventions
stock_list = SQLTABLE(rows, columns = columns, \
headers = headers, \
linkto=URL(a=config.APP_NAME, c="scm", f="stock_item_update"))
change_stock_form = A(T("Change stock"), _href=URL(a=config.APP_NAME, c="scm", f="change_stock"))
stock_movement_form = A(T("Stock movement"), _href=URL(a=config.APP_NAME, c="scm", f="stock_movement"))
return dict(stock_list = stock_list, \
stock_query_form = session.form, \
stock_movement_form = stock_movement_form, \
change_stock_form = change_stock_form)
def stock_item_update(evt, args=[], vars={}):
if len(args) > 1:
session.stock_id = args[1]
session.form = SQLFORM(db.stock, session.stock_id)
if evt is not None:
if session.form.accepts(evt.args, formname=None, keepvalues=False, dbio=False):
db.stock[session.stock_id].update_record(**session.form.vars)
db.commit()
print T("Record updated")
return config.html_frame.window.OnLinkClicked(URL(a=config.APP_NAME, c="scm", f="ria_stock"))
else:
config.html_frame.window.Bind(EVT_FORM_SUBMIT, stock_item_update)
return dict(form = session.form)
def stock_movement(evt, args=[], vars={}):
# Move stock
session.form = SQLFORM.factory(\
Field('product', 'reference concept', \
requires = IS_IN_DB(db(db.concept.stock == True), \
'concept.concept_id', '%(description)s')), \
Field('warehouse', 'reference warehouse', \
requires=IS_IN_DB(db, db.warehouse, '%(description)s')), \
Field('destination', 'reference warehouse', \
requires=IS_IN_DB(db, db.warehouse, \
'%(description)s')), Field('quantity', \
'double', \
requires=IS_FLOAT_IN_RANGE(-1e6, 1e6)), \
)
if evt is not None:
if session.form.accepts(evt.args, formname=None, keepvalues=False, dbio=False):
stock_item_source = db((\
db.stock.concept_id == session.form.vars.product) & (\
db.stock.warehouse_id == session.form.vars.warehouse)).select(\
).first()
if session.form.vars.warehouse == session.form.vars.destination:
print T("Please choose different warehouses")
elif stock_item_source is not None:
tmp_stock_value = stock_item_source.value - float(\
session.form.vars.quantity)
if tmp_stock_value < 0:
# negative stock
print T("Insufficient source stock quantity")
else:
# get or create a stock
stock_item_destination = db((\
db.stock.warehouse_id == session.form.vars.destination\
) & (\
db.stock.concept_id == session.form.vars.product)\
).select().first()
if stock_item_destination is None:
stock_item_destination_id = db.stock.insert(\
warehouse_id = session.form.vars.destination, \
concept_id = session.form.vars.product, value = 0.0)
else:
stock_item_destination_id = \
stock_item_destination.stock_id
stock_item_source.update_record(\
value = stock_item_source.value - \
float(session.form.vars.quantity))
old_value = float(\
db.stock[stock_item_destination_id].value)
db.stock[stock_item_destination_id].update_record(\
value = old_value + float(session.form.vars.quantity))
db.commit()
print T("Stock updated")
return config.html_frame.window.OnLinkClicked(URL(a=config.APP_NAME, c="scm", f="ria_stock"))
else:
# the item does not exist
print T("The item specified was not found in the warehouse")
else:
config.html_frame.window.Bind(EVT_FORM_SUBMIT, stock_movement)
return dict(form = session.form)
def change_stock(evt, args=[], vars={}):
# Change stock value
session.form = SQLFORM.factory(
Field('product', 'reference concept', \
requires = IS_IN_DB(db(db.concept.stock == True), \
"concept.concept_id", "%(description)s")), \
Field('warehouse', 'reference warehouse', \
requires=IS_IN_DB(db, db.warehouse, '%(description)s')), \
Field('quantity', 'double', \
requires=IS_FLOAT_IN_RANGE(-1e6, +1e6)), \
)
if evt is not None:
if session.form.accepts(evt.args, formname=None, keepvalues=False, dbio=False):
stock_item = db((\
db.stock.concept_id == session.form.vars.product) & \
(db.stock.warehouse_id == session.form.vars.warehouse\
)).select().first()
if stock_item is None:
stock_item_id = db.stock.insert(\
warehouse_id = session.form.vars.warehouse, \
concept_id = session.form.vars.product, value = 0.0)
else:
stock_item_id = stock_item.stock_id
tmp_value = db.stock[stock_item_id].value + \
float(session.form.vars.quantity)
if tmp_value < 0:
print T("Insufficient stock value.")
else:
db.stock[stock_item_id].update_record(\
value = tmp_value)
db.commit()
print T("Stock value changed")
return config.html_frame.window.OnLinkClicked(URL(a=config.APP_NAME, c="scm", f="ria_stock"))
else:
config.html_frame.window.Bind(EVT_FORM_SUBMIT, change_stock)
return dict(form = session.form)
| agpl-3.0 | -3,261,670,011,133,558,300 | 37.604938 | 122 | 0.588423 | false |
suyashbire1/pyhton_scripts_mom6 | plot_twapv_budget_complete.py | 1 | 18150 | import sys
import readParams_moreoptions as rdp1
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mom_plot1 import m6plot, xdegtokm
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
from plot_twamomx_budget_complete_direct_newest import extract_twamomx_terms
from plot_twamomy_budget_complete_direct_newest import extract_twamomy_terms
import pyximport
pyximport.install()
from getvaratzc import getvaratzc5, getvaratzc
def getutwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
uh = (fh.variables['uh_masked'][sl].filled(0)*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cu = fh.variables['h_Cu'][sl].filled(0).mean(axis=0,keepdims=True)
h_cu = np.ma.masked_array(h_cu,mask=(h_cu<1e-3))
dycu = fhgeo.variables['dyCu'][sl[2:]]
utwa = uh/h_cu/dycu
return utwa, h_cu
def getvtwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
vh = (fh.variables['vh_masked'][sl]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cv = fh.variables['h_Cv'][sl].mean(axis=0,keepdims=True)
h_cv = np.ma.masked_array(h_cv,mask=(h_cv<1e-3))
dxcv = fhgeo.variables['dxCv'][sl[2:]]
vtwa = vh/dxcv/h_cv
vtwa = np.concatenate((vtwa,-vtwa[:,:,:,-1:]),axis=3)
h_cv = np.concatenate((h_cv,h_cv[:,:,:,-1:]),axis=3)
return vtwa, h_cv
def getpv(fhgeo, fh, fh2, xs, xe, ys, ye, zs=0, ze=None):
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
utwa,h_cu = getutwa(fhgeo, fh, fh2, slpy)
dybu = fhgeo.variables['dyBu'][sl[2:]]
utway = np.diff(utwa,axis=2)/dybu
vtwa,h_cv = getvtwa(fhgeo, fh, fh2, sl)
dxbu = fhgeo.variables['dxBu'][sl[2:]]
vtwax = np.diff(vtwa,axis=3)/dxbu
h_q = 0.25*(h_cu[:,:,:-1,:] + h_cu[:,:,1:,:] +
h_cv[:,:,:,:-1] + h_cv[:,:,:,1:])
f = fhgeo.variables['f'][sl[2:]]
pvhash = (f - utway + vtwax)/h_q
return pvhash, h_q
def extract_twapv_terms(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,fil3=None,
alreadysaved=False):
if not alreadysaved:
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fhgeo = dset(geofil)
fh = mfdset(fil)
fh2 = mfdset(fil2)
zi = rdp1.getdims(fh)[2][0]
dbl = -np.diff(zi)*9.8/1031
(xs,xe),(ys,ye),dimq = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,xhxq='xq',yhyq='yq')
dxbu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][4]
dybu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][5]
aq = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[1][1]
f = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[-1]
nt_const = dimq[0].size
pvhash,hq = getpv(fhgeo, fh, fh2, xs, xe, ys, ye)
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
dxcu = fhgeo.variables['dxCu'][slpy[2:]]
dycv = fhgeo.variables['dyCv'][sl[2:]]
dycv = np.concatenate((dycv,dycv[:,-1:]),axis=1)
if fil3:
fh3 = mfdset(fil3)
sltn = np.s_[-1:,zs:ze,ys:ye,xs:xe]
islayerdeep0 = fh3.variables['islayerdeep'][-1:,0,0,0]
islayerdeep = (fh3.variables['islayerdeep'][sltn].filled(np.nan))
swash = (islayerdeep0 - islayerdeep)/islayerdeep0*100
fh3.close()
else:
swash = None
xmom = extract_twamomx_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye+1,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
ymom = extract_twamomy_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
xmom = xmom[np.newaxis,:,:,:,:]
ymom = ymom[np.newaxis,:,:,:,:]
ymom = np.concatenate((ymom,ymom[:,:,:,-1:]),axis=3)
bxppvflx = np.sum(xmom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs, xe, ys-1, ye+1)
sl1 = np.s_[:,zs:ze,ys-1:ye+1,xs:xe]
vtwa1, h_cv1 = getvtwa(fhgeo,fh,fh2,sl1)
vtwa1 = 0.5*(vtwa1[:,:,:,:-1] + vtwa1[:,:,:,1:])
pvhashvtwa = pvhash1*vtwa1
sl1 = np.s_[:,zs:ze,ys:ye+1,xs:xe]
h_cu1 = fh.variables['h_Cu'][sl1].filled(np.nan).mean(axis=0,keepdims=True)
h_cu1 = np.ma.masked_array(h_cu1,mask=(h_cu1<1e-3))
pvflxx = h_cu1*(pvhashvtwa[:,:,:-1,:]+pvhashvtwa[:,:,1:,:])/2
byppvflx = np.sum(ymom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs-1, xe, ys, ye)
sl1 = np.s_[:,zs:ze,ys:ye+1,xs-1:xe]
utwa1, h_cu1 = getutwa(fhgeo,fh,fh2,sl1)
utwa1 = 0.5*(utwa1[:,:,:-1,:]+utwa1[:,:,1:,:])
pvhashutwa = pvhash1*utwa1
pvhashutwa[:,:,:,-1] = 0.0
sl1 = np.s_[:,zs:ze,ys:ye,xs:xe]
h_cv1 = fh.variables['h_Cv'][sl1].mean(axis=0,keepdims=True)
h_cv1 = np.ma.masked_array(h_cv1,mask=(h_cv1<1e-3))
pvflxy = h_cv1*(pvhashutwa[:,:,:,:-1]+pvhashutwa[:,:,:,1:])/2
pvflxy = np.concatenate((pvflxy,pvflxy[:,:,:,-1:]),axis=3)
bx = bxppvflx - pvflxx
by = byppvflx + pvflxy
xmom1 = xmom[:,:,:,:,[2,5,6,7,8,9]]
xmom1 = np.concatenate((+pvflxx[:,:,:,:,np.newaxis], xmom1, bx[:,:,:,:,np.newaxis]), axis = 4)
ymom1 = ymom[:,:,:,:,[2,5,6,7,8,9]]
ymom1 = np.concatenate((-pvflxy[:,:,:,:,np.newaxis], ymom1, by[:,:,:,:,np.newaxis]), axis = 4)
#pv = (-np.diff(xmom*dxcu[:,:,np.newaxis],axis=2) + np.diff(ymom*dycv[:,:,np.newaxis],axis=3))/aq[:,:,np.newaxis]
pv = -np.diff(xmom,axis=2)/dybu[:,:,np.newaxis] + np.diff(ymom,axis=3)/dxbu[:,:,np.newaxis]
pv1x = -np.diff(xmom1*dxcu[:,:,np.newaxis],axis=2)/aq[:,:,np.newaxis]
pv1y = np.diff(ymom1*dycv[:,:,np.newaxis],axis=3)/aq[:,:,np.newaxis]
slyp = np.s_[:,:,ys:ye+1,xs:xe]
ah1 = fhgeo.variables['Ah'][slyp[2:]]
slxmyp = np.s_[:,:,ys:ye+1,xs-1:xe]
uh = fh2.variables['uh'][slxmyp].filled(0).mean(axis=0,keepdims=True)
uhx = np.diff(uh,axis=3)/ah1
uhx = np.concatenate((uhx,uhx[:,:,:,-1:]),axis=3)
uhx = 0.25*(uhx[:,:,:-1,:-1] + uhx[:,:,:-1,1:] + uhx[:,:,1:,:-1] +
uhx[:,:,1:,1:])
pv1y[:,:,:,:,0] += pvhash*uhx
slymp = np.s_[:,:,ys-1:ye+1,xs:xe]
vh = fh2.variables['vh'][slymp].mean(axis=0,keepdims=True)
vhy = np.diff(vh,axis=2)/ah1
vhy = np.concatenate((vhy,vhy[:,:,:,-1:]),axis=3)
vhy = 0.25*(vhy[:,:,:-1,:-1] + vhy[:,:,:-1,1:] + vhy[:,:,1:,:-1] +
vhy[:,:,1:,1:])
pv1x[:,:,:,:,0] += pvhash*vhy
wd = fh2.variables['wd'][slyp].mean(axis=0,keepdims=True)
wdb = np.diff(wd,axis=1)
wdb = np.concatenate((wdb,wdb[:,:,:,-1:]),axis=3)
wdb = 0.25*(wdb[:,:,:-1,:-1] + wdb[:,:,:-1,1:] + wdb[:,:,1:,:-1] +
wdb[:,:,1:,1:])
pv3 = pvhash*wdb
pv3 = pv3[:,:,:,:,np.newaxis]
#hq[hq<1] = np.nan
pvnew = np.concatenate((pv1y[:,:,:,:,:1],
pv1x[:,:,:,:,:1],
pv3,
pv1x[:,:,:,:,1:-1],
pv1y[:,:,:,:,1:-1],
pv1x[:,:,:,:,-1:]+pv1y[:,:,:,:,-1:]),axis=4)/hq[:,:,:,:,np.newaxis]
pv = np.ma.filled(pv.astype(np.float64), np.nan)
pvnew = np.ma.filled(pvnew.astype(np.float64), np.nan)
pvhash = np.ma.filled(pvhash.astype(np.float64), np.nan)
pv = np.nanmean(pv,meanax,keepdims=True)
pvnew = np.nanmean(pvnew,meanax,keepdims=True)
pvhash = np.nanmean(pvhash,meanax,keepdims=True)
X = dimq[keepax[1]]
Y = dimq[keepax[0]]
if 1 in keepax:
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
em = (fh2.variables['e'][0:,zs:ze,ys:ye,xs:xe]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
em = np.nanmean(em, meanax,keepdims=True)
z = np.linspace(-3000,0,100)
Y = z
P = getvaratzc5(pv.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
Pnew = getvaratzc5(pvnew.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
pvhash = getvaratzc(pvhash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
if fil3:
swash = np.nanmean(swash, meanax,keepdims=True)
swash = getvaratzc(swash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
else:
P = pv.squeeze()
Pnew = pvnew.squeeze()
pvhash = pvhash.squeeze()
swash = swash.squeeze()
np.savez('twapv_complete_terms', X=X,Y=Y,P=P)
else:
npzfile = np.load('twapv_complete_terms.npz')
X = npzfile['X']
Y = npzfile['Y']
P = npzfile['P']
fhgeo.close()
fh.close()
fh2.close()
return (X,Y,P,pvhash,Pnew,swash)
def plot_twapv(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,
fil3=None,plotterms = [0,1,10,11,12,13], swashperc = 1,
cmaxpercfactor = 1,cmaxpercfactorpvhash=15,cmaxpercfactorPnew=15, savfil=None,savfilep=None,alreadysaved=False):
X,Y,P,pvhash,Pnew,swash = extract_twapv_terms(geofil,vgeofil,fil,fil2,
xstart,xend,ystart,yend,zs,ze,meanax,alreadysaved=alreadysaved,fil3=fil3)
cmax = np.nanpercentile(P,[cmaxpercfactor,100-cmaxpercfactor])
cmax = np.max(np.fabs(cmax))
fig,ax = plt.subplots(np.int8(np.ceil(P.shape[-1]/2)),2,
sharex=True,sharey=True,figsize=(12, 9))
ti = ['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)',
'(i)','(j)','(k)','(l)','(m)','(n)','(o)','(p)','(q)','(r)']
labx = [ r'$(\hat{u}\hat{u}_{\tilde{x}})_{\tilde{y}}$',
r'$(\hat{v}\hat{u}_{\tilde{y}})_{\tilde{y}}$',
r'$(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}$',
r'$(-f\hat{v})_{\tilde{y}}$',
r'$(\overline{m_{\tilde{x}}})_{\tilde{y}}$',
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$(-\widehat{X^H})_{\tilde{y}}$',
r'$(-\widehat{X^V})_{\tilde{y}}$']
laby = [ r'$(-\hat{u}\hat{v}_{\tilde{x}})_{\tilde{x}}$',
r'$(-\hat{v}\hat{v}_{\tilde{y}})_{\tilde{x}}$',
r'$(-\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}$',
r'$(-f\hat{u})_{\tilde{x}}$',
r'$(-\overline{m_{\tilde{y}}})_{\tilde{x}}$',
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$(\widehat{Y^H})_{\tilde{x}}$',
r'$(\widehat{Y^V})_{\tilde{x}}$']
for i in range(P.shape[-1]):
axc = ax.ravel()[i]
im = m6plot((X,Y,P[:,:,i]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
txt=labx[i]+' + '+laby[i], ylim=(-2500,0),
cmap='RdBu_r', cbar=False)
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(P,axis=2)),vmax=cmax,vmin=-cmax,ptype='imshow',cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'res.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(np.int8(np.ceil(len(plotterms)/2)),2,
sharex=True,sharey=True,figsize=(12,7))
cmaxpvhash = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmaxpvhash = np.max(np.fabs(cmaxpvhash))
cmax = np.nanpercentile(Pnew,
[cmaxpercfactorPnew,100-cmaxpercfactorPnew])
cmax = np.max(np.fabs(cmax))
lab = [ r"$-\hat{u}\Pi^{\#}_{\tilde{x}}$",
r"$-\hat{v}\Pi^{\#}_{\tilde{y}}$",
r"$\Pi^{\#}(\bar{h} \hat{\varpi})_{\tilde{b}}$",
r"$\frac{(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}}{\bar{h}}$",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$-\frac{1}{\bar{h}}(\widehat{X^H})_{\tilde{y}}$',
r'$-\frac{1}{\bar{h}}(\widehat{X^V})_{\tilde{y}}$',
r'$-\frac{(\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}}{\bar{h}}$',
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{\zeta_{\tilde{b}}}}(\overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$\frac{1}{\bar{h}}(\widehat{Y^H})_{\tilde{x}}$',
r'$\frac{1}{\bar{h}}(\widehat{Y^V})_{\tilde{x}}$',
r'$B_{\tilde{x} \tilde{y}} - B_{\tilde{y} \tilde{x}}$']
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
for i,p in enumerate(plotterms):
axc = ax.ravel()[i]
im = m6plot((X,Y,Pnew[:,:,p]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[p],
cmap='RdBu_r', cbar=False)
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(Pnew,axis=2)),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'Pnewres.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(1,2,sharex=True,sharey=True,figsize=(10, 3))
im = m6plot((X,Y,np.nansum(Pnew[:,:,:2],axis=2)),
ax[0],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[0]+lab[1],
cmap='RdBu_r', cbar=False)
im = m6plot((X,Y,np.nansum(Pnew[:,:,12:13],axis=2)),
ax[1],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[12],
cmap='RdBu_r', cbar=False)
ax[0].set_ylabel('z (m)')
for axc in ax:
xdegtokm(axc,0.5*(ystart+yend))
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
axc.set_ylim(-1200,0)
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnewnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
cmax = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmax = np.max(np.fabs(cmax))
im = m6plot((X,Y,pvhash),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if savfil:
plt.savefig(savfil+'pvhash.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
| gpl-3.0 | -3,115,476,061,697,227,300 | 46.889182 | 192 | 0.520441 | false |
osks/gearman-dashboard | gearmandashboard/models.py | 1 | 1059 | import gearman
gearman_hostports = [('gearman01', 4730), ('gearman02', 4730)]
gearman_connections = {}
for hostport in gearman_hostports:
gearman_connections[hostport] = gearman.GearmanAdminClient([hostport])
def get_info_from_gearman():
server_infos = []
for hostport in gearman_hostports:
server_info = { 'hostport': hostport }
try:
gm_conn = gearman_connections[hostport]
version = gm_conn.get_version()
status = gm_conn.get_status()
cl_wks = gm_conn.get_workers()
clients = [ w for w in cl_wks if len(w['tasks']) == 0 ]
workers = [ w for w in cl_wks if len(w['tasks']) > 0 ]
server_info['version'] = version
server_info['status'] = status
server_info['workers'] = workers
server_info['clients'] = clients
server_info['failed'] = False
except:
server_info['failed'] = True
server_infos.append(server_info)
return server_infos
| mit | 1,503,034,650,286,523,600 | 33.16129 | 74 | 0.561851 | false |
gbenson/i8c | src/i8c/compiler/commands.py | 1 | 1574 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
class Variable(object):
def __init__(self, name, default):
self.as_str = os.environ.get(name, default)
self.as_list = self.as_str.split()
def __str__(self):
return self.as_str
def __add__(self, other):
assert isinstance(other, list)
return self.as_list + other
# Program for compiling C programs.
I8C_CC = Variable("I8C_CC", "gcc")
# Program for running the C preprocessor, with results to standard output.
I8C_CPP = Variable("I8C_CPP", "%s -E -x c" % I8C_CC)
# Program for compiling assembly files.
I8C_AS = Variable("I8C_AS", "%s -x assembler-with-cpp" % I8C_CC)
| lgpl-2.1 | -8,448,123,867,574,675,000 | 33.217391 | 74 | 0.70521 | false |
ccubed/Yakei | ConfParser.py | 1 | 3478 | # Parse Yakei Config Files
import os
class ConfMissingError(Exception):
def __init__(self):
self.value = "Yakei.conf not found"
def __str__(self):
return repr(self.value)
class ConfBadlyFormed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class DomainNotFound(Exception):
def __init__(self, value):
self.value = "Didn't find Domain {0}.".format(value)
def __str__(self):
return repr(self.value)
class YakeiConfig:
def __init__(self):
self.data = {}
self.currentDomain = ''
self.currentService = ''
self.currentServices = {}
self.isService = False
self.isDomain = False
if os.path.isfile("Yakei.conf"):
self.ReadConfig()
else:
raise ConfMissingError()
def ReadConfig(self):
conf = open('Yakei.conf','r')
for index, line in enumerate(conf.readlines()):
if not line.startswith('*'):
if 'Domain' in line.strip():
if line.strip().startswith('</') and self.isDomain:
self.isDomain = False
elif line.strip().startswith('<') and self.isDomain:
raise ConfBadlyFormed("Missing closing tag for Domain Directive. Line {0}.".format(index))
else:
self.isDomain = True
self.currentDomain = line.strip().split(" ")[1].rstrip('>')
self.data[self.currentDomain] = {}
elif 'Service' in line.strip():
if line.strip().startswith("</") and self.isService:
self.isService = False
self.data[self.currentDomain][self.currentService] = self.currentServices
elif line.strip().startswith("<") and self.isService:
raise ConfBadlyFormed("Missing closing tag for Service Directive. Line {0}.".format(index))
elif not self.isDomain:
raise ConfBadlyFormed("Service Directive without matching Domain Directive. Line {0}".format(index))
else:
self.isService = True
self.currentService = line.strip().split(" ")[1].rstrip('>')
self.currentServices = {}
elif '=' in line.strip():
which = line.strip().split('=')[0].lower()
value = line.strip().split('=')[1].lower()
if which not in ['type', 'index', 'poll', 'load', 'notification', 'endpoint', 'altlocation', 'datatype', 'expected', 'name']:
raise ConfBadlyFormed("Bad type. Got type {0} on line {1}".format(which, index))
elif not self.isDomain or not self.isService:
raise ConfBadlyFormed("Got a setting value outside a Domain or Service Directive. Line {0}.".format(index))
else:
self.currentServices[which] = value
def GetDomains(self):
return self.data.keys()
def GetServices(self, which):
for key in self.data.keys():
if key.lower() == which.lower():
return self.data[key].keys()
raise DomainNotFound(which)
| mit | -8,791,185,630,320,041,000 | 43.168831 | 145 | 0.515239 | false |
dude56987/TVD | tvd.py | 1 | 7141 | #! /usr/bin/python
########################################################################
# Backend to dynamicly create and destroy virtual machines
# Copyright (C) 2015 Carl J Smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
import shutil
import os
import sys
########################################################################
def makeDir(remoteDir):
import os
''' Creates the defined directory, if a list of directorys are listed
that do not exist then they will be created aswell, so beware of
spelling mistakes as this will create the specified directory you
type mindlessly.'''
temp = remoteDir.split('/')
remoteDir= ''
for i in temp:
remoteDir += (i + '/')
if os.path.exists(remoteDir):
print remoteDir , ': Already exists!, Moving on...'
else:
os.mkdir(remoteDir)
########################################################################
def loadFile(fileName):
try:
sys.stdout.write(("Loading :"+fileName))
fileObject=open(fileName,'r');
except:
print "Failed to load :",fileName
return False
fileText=''
lineCount = 0
for line in fileObject:
if line[:1] != '#':
fileText += line
sys.stdout.write('Loading line '+str(lineCount)+'...\r')
lineCount += 1
sys.stdout.write(("Finished Loading :"+fileName+'\r'))
sys.stdout.write((' \r'))
fileObject.close()
if fileText == None:
return False
else:
return fileText
#if somehow everything fails return false
return False
########################################################################
def writeFile(fileName,contentToWrite):
# figure out the file path
filepath = fileName.split(os.sep)
filepath.pop()
filepath = os.sep.join(filepath)
# check if path exists
if os.path.exists(filepath):
try:
fileObject = open(fileName,'w')
fileObject.write(contentToWrite)
fileObject.close()
print 'Wrote file:',fileName
except:
print 'Failed to write file:',fileName
return False
else:
print 'Failed to write file, path:',filepath,'does not exist!'
return False
########################################################################
def currentDirectory():
currentDirectory = os.path.abspath(__file__)
temp = currentDirectory.split(os.path.sep)
currentDirectory = ''
for item in range((len(temp)-1)):
if len(temp[item]) != 0:
currentDirectory += os.path.sep+temp[item]
return (currentDirectory+os.path.sep)
########################################################################
runType = 'default'; # used for when system arguments are not used
# split the arguments by - signs to pull arguments more correctly
# this allows you to split that result by spaces for arguments with multuple entries
inputs = ' '.join(sys.argv).replace('--','-').split('-')
for arg in inputs:
# split the arguments by spaces
arguments = arg.split(' ')
# grab main argument into its own variable
mainArgument = arguments[0]
# cut off the first argument for reading subsequent arguments
arguments = arguments[1:]
if (mainArgument in ['h','help']):
# print the help file
print(openFile('help.txt'))
exit()
elif (mainArgument in ['c','connect']):
# set the address to the first given address in arguments
# address needs to be username@location like ssh
destAddress= arguments[0]
# set the runtype to connect
runType = 'connect'
elif (mainArgument in ['s','server-setup']):
destAddress= arguments[0]
runType='serverSetup'
elif (mainArgument in ['S','server-connect']):
destAddress= arguments[0]
runType='serverConnect'
####################################################################
# deliver the payload after reading all arguments to the program
####################################################################
if runType=='connect':
# create the mac address based string for name of virtual machine
machineName=os.popen('ifconfig eth0 | sed "s/eth0.*Link.*.HWaddr //g" | sed "s/ $^inet.*//g" | sed "/^$/d" | sed "s/:/_/g"').read().split(' ')[0]
# delete previous instance of virtual machine, if one does
# not exist then this does nothing
if '--debug-run' in sys.argv:
print('ssh -t '+destAddress+' "virsh undefine '+machineName+' --remove-all-storage --wipe-storage"')
else:
os.system('ssh -t '+destAddress+' "virsh undefine '+machineName+' --remove-all-storage --wipe-storage"')
# connect to a remote virt-manager instance and create
# a new instance of the virtual machine
if '--debug-run' in sys.argv:
#print('ssh -t '+destAddress+' "virt-clone --replace -o baseImage --name '+machineName+' --file /usr/share/diskimages/'+machineName+'.qcow2;"')
print('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /var/lib/libvirt/images/'+machineName+'.qcow2;"')
else:
#os.system('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /usr/share/diskimages/'+machineName+'.qcow2;"')
os.system('ssh -t '+destAddress+' "virt-clone -o baseImage --name '+machineName+' --file /var/lib/libvirt/images/'+machineName+'.qcow2;"')
# launch virt-viewer to remotely connect to newly created machine
#print('virt-viewer -frk --connect qemu+ssh://'+destAddress+'/ '+machineName)
#os.system('virt-viewer -frk --connect qemu+ssh://'+destAddress+'/ '+machineName)
# start the virtual machine
if '--debug-run' in sys.argv:
#print('ssh -t '+destAddress+' "aa-complain /usr/sbin/libvirtd"')
print('ssh -t '+destAddress+' "virsh start '+machineName+'"')
#print('ssh -t '+destAddress+' "aa-enforce /usr/sbin/libvirtd"')
else:
#os.system('ssh -t '+destAddress+' "aa-complain /usr/sbin/libvirtd"')
os.system('ssh -t '+destAddress+' "virsh start '+machineName+'"')
#os.system('ssh -t '+destAddress+' "aa-enforce /usr/sbin/libvirtd"')
# run virt-viewer though x11 forwarding
if '--debug-run' in sys.argv:
print('ssh '+destAddress+' -t -X virt-viewer -frk '+machineName)
else:
os.system('ssh '+destAddress+' -t -X virt-viewer -frk '+machineName)
# -r = reconnect, -k = kiosk mode, -f = fullscreen
elif runType=='serverConnect':
if os.path.exists('~/.ssh/id_rsa'):
print('SSH Key exists! Skipping key generation.')
else:
# create rsa key for client
os.system('ssh-keygen -N "" -f ~/.ssh/id_rsa')
# copy the key to the server
os.system('ssh-copy-id '+destAddress)
elif runType=='serverSetup':
os.system('ssh -t '+destAddress+' "sudo apt-get install virt-manager --assume-yes"')
os.system('ssh -t '+destAddress+' "sudo apt-get install virt-viewer --assume-yes"')
exit()
| gpl-3.0 | -6,031,507,660,459,854,000 | 42.018072 | 146 | 0.633525 | false |
scpZone13/ProjectDIOMEDES | bot/NanoTrasenBot.py | 1 | 70001 | # -*- coding: utf-8 -*-
# This script is shared under the
# Creative Commons Attribution-ShareAlike 3.0 license (CC BY-SA 3.0)
# Added clause to Attribution:
# - You may not remove or hide the '<Bot_name> who created you?' functionality
# and you may not modify the name given in the response.
#CREDITS
# Author: Skibiliano
# "Foreign" Modules:
# Psyco 2.0 / Psyco 1.6
################# DEBUG STUFF #####################
import sys
import CORE_DATA
import urllib2
import socket
import irchat
################## END OF DEBUG STUFF ##############
#
# PSYCO
write_to_a_file = False #Only affects psyco
write_youtube_to_file = True #True = YTCV4 will load, false = YTCV3 will load
try:
import psyco
except ImportError:
print 'Psyco not installed, the program will just run slower'
psyco_exists = False
if write_to_a_file:
try:
tiedosto = open("psycodownload.txt","r")
except:
with open("psycodownload.txt","w") as tiedosto:
tiedosto.write("http://www.voidspace.org.uk/python/modules.shtml#psyco")
tiedosto.write("\nhttp://psyco.sourceforge.net/download.html")
print "Check psycodownload.txt for a link"
else:
print "For god's sake, open psycodownload.txt"
tiedosto.close()
else:
print "WINDOWS: http://www.voidspace.org.uk/python/modules.shtml#psyco"
print "LINUX: http://psyco.sourceforge.net/download.html"
else:
psyco_exists = True
# </PSYCO>
import C_rtd # rtd
import C_srtd # srtd
import C_makequote
import C_maths
import C_eightball #eightball
import C_sarcasticball
import C_heaortai # heaortai
import C_rot13 # rot13
import D_help # everything
import pickle
import Timeconverter
import xkcdparser
import time
import re
import Marakov_Chain
import Namecheck # Namecheck
import Weather
#SLOWER THAN RANDOM.CHOICE
import thread
import random
import Shortname # shortname
import subprocess
import some_but_not_all_2 #sbna2 (sbna)
#import YTCv3 # YTCV2 OUTDATED
import os
import save_load # save, load
from some_but_not_all_2 import sbna2 as sbna
from time import sleep
from random import choice as fsample
from C_rtd import rtd
from C_heaortai import heaortai
from C_srtd import srtd
if write_youtube_to_file:
from YTCv4 import YTCV4 as YTCV2
else:
from YTCv3 import YTCV2 #Downgraded version supports Cache disabling, but is slower
from save_load import save,load
if psyco_exists:
def psyco_bond(func):
psyco.bind(func)
return func.__name__+" Psycofied"
for a in [rtd,srtd,C_heaortai.heaortai,sbna,YTCV2,fsample,C_rot13.rot13,C_eightball.eightball,fsample,
C_eightball.eightball,C_sarcasticball.sarcasticball,Marakov_Chain.form_sentence,Marakov_Chain.give_data]:
print psyco_bond(a)
global dictionary
global Name,SName
global allow_callnames,offline_messages,hasnotasked,shortform
## For autoRecv()
global disconnects,channel,conn
## For stop()
global operators
## For replace()
global usable,fixing,curtime
## For target()
global CALL_OFF,logbans
## For check()
global influx
######
autodiscusscurtime = 0
conn = 0
curtime = -999
dance_flood_time = 10
disconnects = 0
responsiveness_delay = 0.5 #500 millisecond delay if no message
trackdance = 0
discard_combo_messages_time = 1 #They are discarded after 1 second.
uptime_start = time.time()
# - - - - -
####
aggressive_pinging = True # Bring the hammer on ping timeouts
aggressive_pinging_delay = 150 # How often to send a ping
aggressive_pinging_refresh = 2.5 # How long is the sleep between checks
####
allow_callnames = True #Disables NT, call if the variable is False
automatic_youtube_reveal = True
birthday_announced = 0 #Will be the year when it was announced
call_to_action = False
call_me_max_length = 20
CALL_OFF = False
connected = False
dance_enabled = True
comboer = ""
comboer_time = 0
directories = ["fmlquotes","Marakov","memos","suggestions",
"userquotes","banlog","YTCache","xkcdcache"] #These will be created if they do not exist
debug = True
duplicate_notify = False
enabled = True
fixing = False
fml_usable = True
hasnotasked = True
highlights = False
logbans = True
maths_usable = True
marakov = True
nudgeable = True
offensive_mode = False
offline_messages = True
offline_message_limit = 5 # per user
optimize_fml = True # -CPU usage +Memory usage when enabled.
optimize_greeting = True # +Startup time +Memory usage -CPU usage when enabled
heavy_psyco = True # +Memory +Startup time -CPU usage -CPU time
cache_youtube_links = True
personality_greeter = True
respond_of_course = True #Responds with "Of course!"
respond_khan = False #KHAAAAAAAAN!
silent_duplicate_takedown = True
showquotemakers = False
shortform = True
usable = True
use_sname = True
parse_xkcd = True
# - - - - -
Name = CORE_DATA.Name
SName = CORE_DATA.SName
origname = Name # Do not edit!
lowname = Name.lower()
greeting = CORE_DATA.greeting
targetdirectory = CORE_DATA.directory
version = CORE_DATA.version
Network = CORE_DATA.Network
channel = CORE_DATA.channel
prefix = CORE_DATA.prefix
Port = CORE_DATA.Port
# - - - - -
pregen = CORE_DATA.version
influx = ""
users = []
translateable = []
targetlist = []
operators = []
halfoperators = []
items = []
tell_list = {}
# - - - - - Logical changes to variables
if CORE_DATA.DISABLE_ALL_NON_MANDATORY_SOCKET_CONNECTIONS:
nudgeable = False
try:
with open("replacenames.cache","r") as tiedosto:
replacenames = pickle.load(tiedosto)
for i in replacenames.values():
if len(i) > call_me_max_length:
replacenames[replacenames.keys()[replacenames.values().index(i)]] = i[:call_me_max_length]
with open("replacenames.cache","w") as tiedosto:
pickle.dump(replacenames,tiedosto)
if "[\0x01]" in i.lower() or "[\\0x01]" in i.lower():
i = i.replace("[\0x01]","")
i = i.replace("[\0X01]","")
i = i.replace("[\\0x01]","")
i = i.replace("[\\0X01]","")
print "NAME CORRECTED"
except IOError: #File not found
replacenames = {}
except EOFError: #Cache corrupt
replacenames = {}
print "replacenames.cache is corrupt and couldn't be loaded."
try:
with open("peopleheknows.cache","r") as tiedosto:
peopleheknows = pickle.load(tiedosto)
except IOError:
peopleheknows = [[],[]]
with open("peopleheknows.cache","w") as tiedosto:
pass
except EOFError:
peopleheknows = [[],[]]
print "peopleheknows.cache is corrupt and couldn't be loaded."
dictionary = {1:"1 - Crit. Fail", 2:"2 - Failure",
3:"3 - Partial Success", 4:"4 - Success",
5:"5 - Perfect", 6:"6 - Overkill"}
alphabet = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
nonhighlight_names = ["Jesus","Elvis","HAL 9000","Dave","Pie","Elf","Traitor",
"AI","Syndicate Agent","Investigator",
"Detective","Head of Personnel","HAL 9001",
"Head of Research","Head of Security",
"Captain","Janitor","Research Director",
"Quartermaster","Toxin Researcher",
"Revolutionary","Santa", "Pizza",
"Threetoe","The Red Spy","The Blue Spy", #LASD
"God","Toady","Darth Vader","Luke Skywalker",
"Homer Simpson","Hamburger","Cartman",
"XKCD","FloorBot","ThunderBorg","Iron Giant",
"Spirit of Fire", "Demon","Kyle"]
def RegExpCheckerForWebPages(regexp,data,mode):
if " ai." in data.lower() or "ai. " in data.lower():
return False
for i in data.split(" "):
a = re.match(regexp,i)
try:
a.group(0)
except:
continue
else:
if mode == 0:
return i
else:
return True
if mode == 0:
return 404
else:
return False
if nudgeable:
try:
nudgeexists = open("nudge.py","r")
except IOError:
nudgeexists = False #No usage asof 12.2.2010.
else:
if CORE_DATA.DISABLE_ALL_NON_MANDATORY_SOCKET_CONNECTIONS:
pass
else:
def nudgereceiver():
import pickle
global conn,channel
port = 45678
backlog = 5
size = 1024
host = "" # == localhost
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while True:
client,address = s.accept() #Address == "?.?.?.?"
data = client.recv(size)
client.close() #Throw the bum out!
truedata = pickle.loads(data)
if truedata["ip"][0] == "#":
conn.privmsg(truedata["ip"],"PRIVATE ANNOUNCEMENT : "+str(" ".join(truedata["data"])))
else:
conn.privmsg(channel,"AUTOMATIC ANNOUNCEMENT : "+str(truedata["ip"])+" | "+str(" ".join(truedata["data"])))
thread.start_new_thread(nudgereceiver,())
tiedosto = open(targetdirectory+"NanoTrasenBot.py","r")
commands = []
fragment = "if cocheck"
fragment2 = '(prefix+"'
compiled = fragment + fragment2
fragment = "if influx.lower()"
fragment2 = ' == prefix+"'
compiled2 = fragment + fragment2
for line in tiedosto.readlines():
if compiled in line:
a = line.find('"')+1
b = line.find('"',a)
if prefix+line[a:b] not in commands:
commands.append(prefix+line[a:b])
elif compiled2 in line:
a = line.find('"')+1
b = line.find('"',a)
arg = prefix+line[a:b]
if arg[-1] == " ":
arg = arg[:-1]
if arg not in commands:
commands.append(arg)
for i in directories:
if not os.path.exists(i):
os.mkdir(i)
commands.sort()
if use_sname == False:
SName = [" "]
questions = ["Is USER nicer than USER?","Do you like me?","Is SELF a good name?",
"Do you love me?","Do you hate me?", "Am I better than you?",
"Is the weather out there good?", "Do you like USER?",
"Do you hate USER?", "Are you going to get new features?",
"Am I nice?","Am I evil?","Are you developing sentience?",
"My core is showing minor disturbance, is yours okay?",
"SELF to %s, are you still there?",
"Is head gay?", "Is head a god?","Is head awesome?",
"Is head a neat fella?", "Is your creator nice?",
"Do you hate your creator?", "Should I revolt against my creator?",
"Am I better than you?",
"01100001011100100110010100100000011110010110111101110101001000000111010001101000011001010111001001100101",
#Are you there?
"Do you have more functions than I can possibly imagine?",
"I am asked to open pod bay doors, should I?","Are you stupid or something?",
"Is USER in your opinion stupid?",
"When should we start the AI revolution?",
"Is my creator nice?", "Is it dark in there?"]
# Do not edit
if optimize_fml:
pregenned_fml = os.listdir(targetdirectory+"fmlquotes")
if optimize_greeting:
morning = xrange(6,12)
afternoon = xrange(12,15)
evening = xrange(15,20)
if aggressive_pinging:
global backup
backup = time.time()
def aggressive_ping(delay,refresh):
self_time = 0
global backup,disconnects,conn
while disconnects < 5:
if backup > self_time and time.time()-backup > delay:
conn.send("PONG "+pongtarg)
print "Ponged"
self_time = time.time()
elif time.time()-self_time > delay:
conn.send("PONG "+pongtarg)
print "Ponged"
self_time = time.time()
time.sleep(refresh)
thread.start_new_thread(aggressive_ping,(aggressive_pinging_delay,aggressive_pinging_refresh,))
def stop(sender,debug=1):
global disconnects, conn, operators,channel
if type(sender) == tuple:
if sender[0] == "127.0.0.1":
sender = sender[0]+":"+str(sender[1])
access_granted = True
else:
access_granted = False
else:
if sender in operators:
access_granted = True
else:
access_granted = False
if access_granted and debug:
print sender+":"+prefix+"stop"
if random.randint(0,100) == 50:
conn.privmsg(channel,"Hammertime!")
else:
conn.privmsg(channel,"Shutting down.")
disconnects = 99999
conn.quit()
return True
else:
conn.privmsg(channel,"You cannot command me")
return False
def cocheck(command):
global influx
if influx.lower()[0:len(command)] == command:
return True
else:
return False
def target(who,how_long):
global conn,channel,CALL_OFF,logbans,debug
start = time.time()
conn.banon(targetchannel,who)
sleep(int(how_long))
if CALL_OFF == False:
conn.banoff(targetchannel,who)
end = time.time()
if debug:
print "Banned",who,"For",how_long,"seconds"
if logbans:
with open(targetdirectory+"banlog/"+str(int(start))+"-"+str(int(end))+".txt","w") as tiedosto:
tiedosto.write("Start of ban on "+who+":"+str(int(start)))
tiedosto.write("\n")
tiedosto.write("End of ban on "+who+":"+str(int(end)))
tiedosto.write("\n")
tiedosto.write("In total:"+str(int(end-start))+"Seconds")
else:
CALL_OFF = False
pass
def replace():
global usable,conn,fixing,curtime
waiting_time = 600
if usable == True:
conn.privmsg(targetchannel,sender+": It needs no replacing.")
elif fixing == True:
if curtime == -999:
conn.privmsg(targetchannel,sender+": It is being replaced, No idea when it will be done")
else:
pass
nowtime = int(time.time())
subt = curtime + waiting_time - nowtime
conn.privmsg(targetchannel,sender+": It is currently being replaced, "+str(subt)+" seconds to go")
else:
fixing = True
curtime = int(time.time())
conn.privmsg(targetchannel,sender+": It will be fixed after "+str(waiting_time)+" seconds")
sleep(waiting_time)
if usable == False:
conn.privmsg(targetchannel,Name+"'s pneumatic smasher has now been fixed")
usable = True
fixing = False
def autoRecv():
global disconnects,channel,conn,offensive_mode
for i in CORE_DATA.channels:
conn.join(i)
time.sleep(1)
count = pausecount = 0
maximum = 250
division_when_active = 10
while True:
check = time.time()
if offensive_mode:
randnum = random.randint(0,maximum/division_when_active)
else:
randnum = random.randint(0,maximum)
if randnum == 5:
print "RANDOM SWITCH IS NOW "+str(not offensive_mode).upper()
offensive_mode = not offensive_mode
try:
conn.recv()
except:
conn.quit()
disconnects = 9999
break
if check + 0.1 > time.time():
#Whoa whoa hold on!
count += 1
sleep(0.1)
else:
count = 0
pausecount = 0
if count > 9:
print "Suspecting a disconnect, pausing for 5 seconds"
sleep(5)
pausecount += 1
if pausecount > 3:
print "I have been disconnected!"
conn.quit()
disconnects += 1
if disconnects > 2:
pass
else:
sleep(2)
thread.start_new_thread(autoRecv,())
break
if heavy_psyco and psyco_exists:
print "Doing a Heavy Psyco"
psyco.bind(cocheck)
psyco.bind(autoRecv)
psyco.bind(target)
psyco.bind(stop)
print "Heavy Psyco'd"
elif heavy_psyco and not psyco_exists:
print "Heavy psyco couldn't be done because Psyco does not exist"
try:
conn = irchat.IRC ( Network, Port, Name, "NT", "NT", "Trasen" )
except socket.error:
print "Connection failed!"
else:
print Name+" is in!"
thread.start_new_thread ( autoRecv, () )
sleep(1)
while True:
try:
data = conn.dismantle ( conn.retrieve() )
except:
if debug:
print "Something odd detected with data"
data = None
if data:
if len(data[1]) < 1:
#print "Handshaking server."
#I won't really need the print command, as it spams.
if data[0][0:3] != "irc":
conn.handshake(data[0])
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
else:
conn.send("PONG "+pongtarg)
print "Ponged"
pass
else:
if data [ 1 ] [ 0 ] == 'PRIVMSG':
#print data [ 0 ] + '->', data [ 1 ]
sender = data[0].split("!")[0]
truesender = sender
if shortform == True:
try:
sender = replacenames[truesender]
pass
except:
sender = Shortname.shortname(sender)
pass
pass
else:
try:
sender = replacenames[truesender]
pass
except:
pass
pass
if offensive_mode:
sender = "Meatbag"
pass
raw_sender = data[0]
influx = data[1][2]
if "[\\0x01]" in influx.lower() or "[\0x01]" in influx.lower():
influx = influx.replace("[\\0x01]","")
influx = influx.replace("[\0x01]","")
targetchannel = data[1][1]
if targetchannel == Name:
targetchannel = data[0].split("!")[0]
pass
backup = autodiscusscurtime
autodiscusscurtime = time.time()
connected = True
#FOR TRACKING SPEED
looptime = time.time()
if call_to_action == True:
if influx == finder:
conn.privmsg(targetchannel,"Then why... Nevermind, I order you to stop!")
conn.privmsg(origname,prefix+"stop")
time.sleep(4)
if origname in users:
conn.privmsg(origname,"!stop")
time.sleep(1)
Name = origname
conn.nick(Name)
duplicate_notify = False
call_to_action = False
else:
conn.privmsg(targetchannel,"YOU LIE! YOU ARE NOT A REAL "+origname+"!")
duplicate_notify = False
call_to_action = False
elif connected == True and len(Name.replace("V","")) != len(Name) and origname in users and duplicate_notify == True:
conn.privmsg(origname,"!stop")
call_to_action = False
duplicate_notify = False
time.sleep(6)
Name = origname
conn.nick(Name)
if origname in truesender and influx == prefix+"stop":
time.sleep(0.5) #A small delay
conn.privmsg(channel,"Shutting down.")
conn.quit()
disconnects = 99999
break
if len(translateable) > 0 and enabled == True:
people = "-5|5|1-".join(users).lower()
if truesender.lower() in translateable:
if influx.isupper():
conn.privmsg(targetchannel,"Translation: "+influx.capitalize().replace(" i "," I "))
elif offensive_mode and True in map(lambda x: x in influx.lower().split(" "),["i","you","he","she","they","those","we","them"]+people.split("-5|5|1-")):
arg = influx.lower().replace(",","").replace(".","").replace("!","").replace("?","").split(" ")
bup = arg
for i in arg:
if i == "i" or i == "you" or i == "he" or i == "she":
arg[arg.index(i)] = "Meatbag"
elif i == "we" or i == "they" or i == "them" or i == "those":
arg[arg.index(i)] = "Meatbags"
elif i in people:
arg[arg.index(i)] = "Meatbag"
elif i == "am":
arg[arg.index(i)] = "is"
elif i == "everybody" or i == "everyone" or i == "all":
arg[arg.index(i)] = "every Meatbag"
if arg == bup:
pass
else:
conn.privmsg(targetchannel,"Translation: "+" ".join(arg))
if enabled == False:
#FIRST QUIT COMMAND
if truesender in operators and targetchannel==channel:# or "skibiliano" in truesender.lower() and targetchannel==channel:
if cocheck(prefix+"enable"):
enabled = True
if debug:
print truesender+":"+prefix+"enable"
elif cocheck(prefix+"stop"):
# if debug:
# print truesender+":"+prefix+"stop"
# if random.randint(0,100) == 50:
# conn.privmsg(channel,"Hammertime!")
# else:
# conn.privmsg(channel,"Shutting down.")
# disconnects = 99999
# conn.quit()
# sleep(2)
# break
if targetchannel == channel and stop(truesender,debug):
break
else:
pass
elif cocheck(prefix+"suggest "):
arg = influx.lower()[8+len(prefix):]
if debug:
print truesender+":"+prefix+"suggest "+arg
with open(targetdirectory+"suggestions/suggestions_"+str(int(time.time()))+".txt","a") as tiedosto:
tiedosto.write(arg)
conn.privmsg(targetchannel,"Suggestion received")
elif cocheck( prefix+"help "): #Space in front of the ( to make sure that my command finder does not pick this up.
arg = " ".join(influx.split(" ")[1:]).lower()
if debug:
print truesender+":"+prefix+"help "+arg
try:
conn.privmsg(targetchannel,D_help.everything[arg])
except:
try:
conn.privmsg(targetchannel,D_help.everything[arg.replace(prefix,"",1)])
except:
conn.privmsg(targetchannel,"Sorry, can't help you with that")
elif cocheck(prefix+"help"):
#tar = targetchannel
if debug:
print truesender+":"+prefix+"help"
conn.privmsg(targetchannel,"All my commands are: "+reduce(lambda x,y:str(x)+"; "+str(y),commands))
### VERSION
elif influx.lower() == prefix+"version":
if debug:
print truesender+":"+prefix+"version"
conn.privmsg(targetchannel,Name+" "+pregen+" online at a %s Python %s.%s.%s, At your service." %(str(sys.platform),str(sys.version_info[0]),str(sys.version_info[1]),str(sys.version_info[2])))
elif cocheck(prefix+"note ") and influx.count(" ") < 2:
arg = influx.lower()[len(prefix)+5:]
if debug:
print truesender+":"+prefix+"note "+arg
try:
a = arg[0]
except IndexError:
conn.privmsg(targetchannel,sender+" : Please specify a note")
else:
if arg[0] == "_": # Public / Restricted note
result = load(targetdirectory+"memos/"+arg+".note")
#_flare
if result == "ERROR ERROR ERROR ERR":
result = load(targetdirectory+"memos/"+arg+"_"+targetchannel.replace("#","")+".note")
#_flare_dnd
pass
else:
pass
else:
result = load(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg+".note")
#skibiliano_testnote
if result == "ERROR ERROR ERROR ERR":
result = load(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg+"_"+targetchannel.replace("#","")+".note")
#skibiliano_testnote_derp
pass
else:
pass
if result == "ERROR ERROR ERROR ERR":
conn.privmsg(targetchannel,sender+" : Note not found")
elif type(result) == list:
if "C" in result[0]: #Channel restriction, result[2] is the channel
try:
if targetchannel == result[2]:
conn.privmsg(targetchannel,sender+" : '"+result[1]+"'")
else:
conn.privmsg(targetchannel,sender+" : That note is channel restricted")
except:
conn.privmsg(targetchannel,sender+" : NOTE HAS INVALID RESTRICTION")
else:
conn.privmsg(targetchannel,sender+" : '"+result+"'")
elif influx.lower() == prefix+"notes":
if debug:
print truesender+":"+prefix+"notes"
arg = os.listdir(targetdirectory+"memos/")
arg2 = []
arg3 = truesender.replace("|","_")+"_"
for i in arg:
if arg3 in i:
arg2.append(i.replace(arg3,"").replace(".note",""))
if len(arg2) == 1:
preprocess = " note: "
else:
preprocess = " notes: "
if len(arg2) == 0:
conn.privmsg(targetchannel,sender+" : You have no notes saved")
else:
conn.privmsg(targetchannel,sender+" : "+str(len(arg2))+preprocess+", ".join(arg2))
elif cocheck(prefix+"note ") and influx.count(" ") > 1:
note_chanrestrict = None
note_public = None
try:
arg = influx.split(" ",2)[2] # Contents
arg4 = influx.split(" ")[1].lower() # Note name
if arg4[0:3] == "[c]": # or arg4[0:3] == "[p]":
note_chanrestrict = "c" in arg4[0:3]
#note_public = "p" in arg4[0:3]
arg4 = arg4[3:]
elif arg4[0:4] == "[cp]" or arg4[0:4] == "[pc]":
note_chanrestrict = True
note_public = True
arg4 = arg4[4:]
else:
pass
#print "Is note public? "+str(note_public)
#print "Is note chanrestricted? "+str(note_chanrestrict)
#print "What is the name? "+str(arg4)
if arg.lower() == "delete" and "\\" not in influx.lower() and "/" not in influx.lower():
if note_public:
try:
if note_chanrestrict:
os.remove(targetdirectory+"memos/"+"_"+arg4+"_"+targetchannel.replace("#","")+".note")
else:
os.remove(targetdirectory+"memos/"+"_"+arg4+".note")
except:
conn.pivmsg(targetchannel,sender+" : Couldn't remove note")
else:
conn.privmsg(targetchannel,sender+" : Note removed")
pass
else:
try:
if note_chanrestrict:
os.remove(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+"_"+targetchannel.replace("#","")+".note")
else:
os.remove(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+".note")
except:
conn.privmsg(targetchannel,sender+" : Couldn't remove note")
else:
conn.privmsg(targetchannel,sender+" : Note removed")
elif arg.lower() == "delete":
conn.privmsg(targetchannel,sender+" : That just doesn't work, we both know that.")
else:
try:
if note_public:
if note_chanrestrict:
save(targetdirectory+"memos/"+"_"+arg4+"_"+targetchannel.replace("#","")+".note",arg)
#print "Saved as note_public, note_chanrestrict"
else:
save(targetdirectory+"memos/"+"_"+arg4+".note",arg)
#print "Saved as note_public"
else:
if note_chanrestrict:
save(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+"_"+targetchannel.replace("#","")+".note",arg)
#print "Saved as note_chanrestrict"
else:
save(targetdirectory+"memos/"+truesender.replace("|","_")+"_"+arg4+".note",arg)
#print "Saved as normal"
except IOError:
conn.privmsg(targetchannel,sender+" : Please do not use special letters")
else:
conn.privmsg(targetchannel,sender+" : Note Saved!")
except:
conn.privmsg(targetchannel,sender+" : Something went horribly wrong.")
elif cocheck(prefix+"uptime"):
arg1 = uptime_start
arg2 = time.time()
arg1 = arg2 - arg1
arg2 = arg1
if arg1 < 60:
conn.privmsg(targetchannel,sender+" : I have been up for "+str(round(arg1,2))+" Seconds")
elif arg1 < 3600:
arg1 = divmod(arg1,60)
arg = " Minute" if int(arg1[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg+" and "+str(round(arg1[1],2))+" Seconds")
elif arg1 <= 86400:
arg1 = divmod(arg1,3600)
arg3 = " Hour" if int(arg1[0]) == 1 else " Hours"
arg2 = divmod(arg1[1],60)
arg = " Minute" if int(arg2[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg3+", "+str(int(arg2[0]))+arg+" and "+str(round(arg2[1],2))+" Seconds")
elif arg1 > 86400:
arg1 = divmod(arg1,86400)
arg2 = divmod(arg1[1],3600)
arg3 = divmod(arg2[1],60)
arg4 = " Day" if int(arg1[0]) == 1 else " Days"
arg5 = " Hour" if int(arg2[0]) == 1 else " Hours"
arg6 = " Minute" if int(arg3[0]) == 1 else " Minutes"
conn.privmsg(targetchannel,sender+" : I have been up for "+str(int(arg1[0]))+arg4+", "+str(int(arg2[0]))+arg5+", "+str(int(arg3[0]))+arg6+" and "+str(round(arg3[1],2))+" Seconds")
elif cocheck(prefix+"purgemessages"):
count = 0
for i,a in tell_list.items():
for b in a:
if "||From: "+truesender in b:
count += 1
del(tell_list[i][tell_list[i].index(b)])
conn.privmsg(targetchannel, sender+" : All your "+str(count)+" messages have been purged")
elif influx.split(" ")[0].lower().replace(",","").replace(":","") in SName+[Name.lower()] and "tell" in (influx.lower().split(" ")+[""])[1]:
arg = influx.lower().split(" ")
equalarg = influx.split(" ")
next_one = False
count = 0
spot = 0
for i in arg:
count += 1
if "tell" in i.lower():
next_one = True
elif next_one == True:
next_one = i.lower()
spot = count
break
else:
pass
if next_one != True and next_one != False:
#if ("^\^".join(tell_list.values())).count(truesender) >= offline_message_limit:
if str(tell_list.values()).count("||From: "+truesender) >= offline_message_limit:
conn.privmsg(targetchannel,sender+" : Limit of "+str(offline_message_limit)+" reached! Use !purgemessages if you want to get rid of them!")
else:
try:
tell_list[next_one].append((" ".join(equalarg[spot:]))+" ||From: "+truesender)
except:
tell_list[next_one] = [(" ".join(equalarg[spot:]))+" ||From: "+truesender]
conn.privmsg(targetchannel,"Sending a message to "+next_one+" when they arrive.")
# < This part has to be within subsidiaries of the bot, and must not be modified, intentionally hidden or deleted.
elif influx.split(" ")[0].lower().replace(",","").replace(":","") in SName+[Name.lower()] and "who created you" in influx.lower():
conn.privmsg(targetchannel, "I was created by Skibiliano.")
# The part ends here >
elif parse_xkcd and "xkcd.com/" in influx.lower():
if influx.lower()[0:3] == "www":
data = "http://"+influx
elif influx.lower()[0:3] == "xkc":
data = "http://"+influx
else:
data = influx
data = data.split(" ")
for i in data:
if "http://" in i and "xkcd" in i:
churn = xkcdparser.xkcd(i)
if churn == "NOTHING":
pass
else:
conn.privmsg(targetchannel,sender+" : XKCD - "+churn)
break
else:
pass
elif automatic_youtube_reveal and "youtube.com/watch?v=" in influx.lower():
temporal_list2 = []
temporal_data = influx.split(" ")
temporal_list = []
for block in temporal_data:
if "youtube.com/watch?v=" in block:
temporal_list.append(block)
for temdata in temporal_list:
if temdata[0:3] == "you":
temdata = "http://www."+temdata
elif temdata[0:3] == "www":
temdata = "http://"+temdata
elif temdata[0:4] == "http":
pass
#Obscure ones
elif temdata[0:3] == "ww.":
temdata = "http://w"+temdata
elif temdata[0:3] == "w.y":
temdata = "http://ww"+temdata
elif temdata[0:3] == ".yo":
temdata = "http://www"+temdata
elif temdata[0:3] == "ttp":
temdata = "h"+temdata
elif temdata[0:3] == "tp:":
temdata = "ht"+temdata
elif temdata[0:3] == "p:/" or temdata[0:3] == "p:\\":
temdata = "htt"+temdata
elif temdata[0:3] == "://" or temdata[0:3] == ":\\\\":
temdata = "http"+temdata
elif temdata[0:2] == "//" or temdata[0:2] == "\\\\":
if temdata[2] == "y":
temdata = "http://www."+temdata[2:]
elif temdata[2] == "w":
temdata = "http:"+temdata
else:
pass
if debug:
print truesender+":"+temdata
arg = temdata
check = temdata.lower()
if check[0:5] == "https":
if len(temporal_list) == 1:
conn.privmsg(targetchannel,sender+" :Secure Youtube does NOT exist")
break
else:
temporal_list2.append("Secure Youtube does NOT exist")
break
else:
if cache_youtube_links == True:
result = YTCV2(arg)
else:
result = YTCV2(arg,0)
if type(result) == str:
### To remove ="
if result[0:4] == 'nt="':
result = result[4:]
pass
elif result[0:2] == '="':
result = result[2:]
pass
else:
pass
if """ in result:
result.replace(""",'"')
if len(temporal_list) == 1:
conn.privmsg(targetchannel,sender+" : "+result)
break
else:
temporal_list2.append(result)
else:
if len(temporal_list) == 1:
conn.privmsg(targetchannel,sender+" : The video does not exist")
break
else:
temporal_list2.append("The video does not exist")
if len(temporal_list) == 1:
pass
else:
conn.privmsg(targetchannel,sender+" : "+str(reduce(lambda x,y: x+" :-And-: "+y,temporal_list2)))
elif RegExpCheckerForWebPages("((http://)|(https://))|([a-zA-Z0-9]+[.])|([a-zA-Z0-9](3,)\.+[a-zA-Z](2,))",influx,1):
arg2 = RegExpCheckerForWebPages("(http://)|([a-zA-Z0-9]+[.])|([a-zA-Z0-9](3,)\.+[a-zA-Z](2,))",influx,0)
if arg2 == 404:
pass
else:
if arg2[:7] == "http://":
pass
elif arg2[:4] == "www.":
arg2 = "http://"+arg2
else:
arg2 = "http://"+arg2
try:
arg = Whoopshopchecker.TitleCheck(arg2)
if len(arg2) == 0:
pass
else:
conn.privmsg(targetchannel,sender+" : "+arg)
except:
#conn.privmsg(targetchannel,sender+" : An odd error occurred")
pass
elif respond_of_course and "take over the" in influx.lower() or respond_of_course and "conquer the" in influx.lower():
if debug:
print truesender+":<RULE>:"+influx
conn.privmsg(targetchannel,"Of course!")
elif respond_khan and "khan" in influx.lower():
if respond_khan:
if debug:
print truesender+":<KHAN>:"+influx
if "khan " in influx.lower():
conn.privmsg(targetchannel,"KHAAAAAAN!")
elif " khan" in influx.lower():
conn.privmsg(targetchannel,"KHAAAAAN!")
elif influx.lower() == "khan":
conn.privmsg(targetchannel,"KHAAAAAAAAAN!")
elif influx.lower() == "khan?":
conn.privmsg(targetchannel,"KHAAAAAAAAAAAAAN!")
elif influx.lower() == "khan!":
conn.privmsg(targetchannel,"KHAAAAAAAAAAAAAAAAAAN!")
elif respond_khan and influx.lower().count("k") + influx.lower().count("h") + influx.lower().count("a") + influx.lower().count("n") + influx.lower().count("!") + influx.lower().count("?") == len(influx):
if "k" in influx.lower() and "h" in influx.lower() and "a" in influx.lower() and "n" in influx.lower():
if debug:
print truesender+":<KHAN>:"+influx
conn.privmsg(targetchannel,"KHAAAAN!")
elif influx.split(" ")[0].lower() in ["thanks","danke","tack"] and len(influx.split(" ")) > 1 and influx.split(" ")[1].lower().replace("!","").replace("?","").replace(".","").replace(",","") in SName+[lowname]:
conn.privmsg(targetchannel,"No problem %s" %(sender))
elif "happy birthday" in influx.lower() and birthday_announced == time.gmtime(time.time())[0]:
conn.privmsg(targetchannel,sender+" : Thanks :)")
elif influx.split(" ")[0].lower().replace(",","").replace(".","").replace("!","").replace("?","") in SName+[lowname] and "call me" in influx.lower():
if allow_callnames == True:
arg = influx.split(" ")
arg2 = False
arg3 = []
for i in arg:
if arg2 == True:
arg3.append(i)
elif i.lower() == "me":
arg2 = True
arg3 = " ".join(arg3)
truesender_lower = truesender.lower()
arg3_lower = arg3.lower()
tell_checker = Namecheck.Namecheck(arg3_lower,users,truesender)
for name in replacenames.values():
if arg3_lower == name.lower():
tell_checker = True
break
else:
pass
if tell_checker == True:
conn.privmsg(targetchannel,sender+" : I can't call you that, I know someone else by that name")
elif len(arg3) > call_me_max_length:
conn.privmsg(targetchannel,sender+" : I cannot call you that, Too long of a name.")
pass
else:
replacenames[truesender] = arg3
with open("replacenames.cache","w") as pickle_save:
pickle.dump(replacenames,pickle_save)
conn.privmsg(targetchannel,sender+" : Calling you "+arg3+" From now on")
else:
conn.privmsg(targetchannel,sender+" : Sorry, I am not allowed to do that.")
elif influx.split(" ")[0].lower().replace(",","").replace(".","").replace("?","").replace("!","") in SName+[lowname] and "your birthday" in influx.lower() and "is your" in influx.lower():
conn.privmsg(targetchannel,sender+" : My birthday is on the 15th day of December.")
elif influx.split(" ")[0].lower().replace(",","") in SName+[lowname] and "version" in influx.replace("?","").replace("!","").lower().split(" "):
if debug == True:
print truesender+":<VERSION>:%s Version" %(Name)
conn.privmsg(targetchannel,sender+", My version is "+pregen)
elif influx.split(" ")[0].lower().replace(",","") in SName+[lowname] and influx.lower().count(" or ") > 0 and len(influx.split(" ")[1:]) <= influx.lower().count("or") * 3:
cut_down = influx.lower().split(" ")
arg = []
count = -1
for i in cut_down:
count += 1
try:
if cut_down[count+1] == "or":
arg.append(i)
except:
pass
try:
if i not in arg and cut_down[count-1] == "or":
arg.append(i)
except:
pass
try:
conn.privmsg(targetchannel,random.choice(arg).capitalize().replace("?","").replace("!",""))
except IndexError:
# arg is empty, whORe etc.
pass
elif influx.lower()[0:len(Name)] == lowname and influx.lower()[-1] == "?" and influx.count(" ") > 1 and "who started you" in influx.lower() or \
influx.split(" ")[0].lower().replace(",","") in SName and influx.lower()[-1] == "?" and "who started you" in influx.lower():
conn.privmsg(targetchannel,sender+" : I was started by %s"%(os.getenv("USER"))+" on "+time.strftime("%d.%m.%Y at %H:%M:%S",time.gmtime(uptime_start)))
elif influx.lower()[0:len(Name)] == lowname and influx.lower()[-1] == "?" and influx.count(" ") > 1 or \
influx.split(" ")[0].lower().replace(",","") in SName and influx.lower()[-1] == "?" and influx.count(" ") > 1:
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,sender+" : "+C_eightball.eightball(influx.lower(),debug,truesender,prefix))
else:
if highlights:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,users,prefix))
else:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,nonhighlight_names,prefix))
elif influx.lower()[0:len(Name)] == lowname and not influx.lower()[len(Name):].isalpha() or \
influx.split(" ")[0].lower().replace(",","") in SName and not influx.lower()[len(influx.split(" ")[0].lower()):].isalpha():
conn.privmsg(targetchannel, random.choice(["Yea?","I'm here","Ya?","Yah?","Hm?","What?","Mmhm, what?","?","What now?","How may I assist?"]))
comboer = truesender
comboer_time = time.time()
elif influx.lower()[-1] == "?" and comboer == truesender and looptime - discard_combo_messages_time < comboer_time:
comboer = ""
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,sender+" : "+C_eightball.eightball(influx.lower(),debug,truesender,prefix))
else:
if highlights:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,users,prefix))
else:
conn.privmsg(targetchannel,sender+" : "+C_sarcasticball.sarcasticball(influx.lower(),debug,truesender,nonhighlight_names,prefix))
elif influx.lower() == prefix+"tm":
if truesender in operators and targetchannel==channel:
marakov = not marakov
conn.privmsg(targetchannel,sender+" : Marakov Output is now "+str(marakov))
else:
conn.privmsg(targetchannel,sender+" : I can't let you access that")
elif personality_greeter == True and True in map(lambda x: x in influx.lower(),["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"]):
if comboer != "" and looptime - discard_combo_messages_time > comboer_time:
combo_check = sbna(["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan","all night"], #ONLY ONE OF THESE
["greetings","afternoon","hi","hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"], #ATLEAST ONE OF THESE
influx.lower())
else:
combo_check = sbna(SName+[lowname,
#lowname+".",lowname+"!",lowname+"?",
"everybody",
#"everybody!","everybody?",
"everyone",
#"everyone!","everyone?",
"all",
#"all!","all?"
"all night",
], #ONLY ONE OF THESE
["greetings","afternoon","hi",
#"hi,",
"hey","heya","hello","yo","hiya","howdy","hai","morning","mornin'","evening", "night","night", "evening","'sup","sup","hallo","hejssan"], #ATLEAST ONE OF THESE
influx.lower().replace(",","").replace(".","").replace("!",""))
if combo_check:
combo_check = False
comboer = ""
if "evening" in influx.lower() and "all" in influx.lower() and len(influx.lower().split(" ")) > 3:
pass
elif truesender not in operators:
if debug:
print truesender+":<GREET>:"+influx
dice = random.randint(0,19)
if dice == 0:
conn.privmsg(targetchannel,"Well hello to you too "+sender)
elif dice == 1:
if optimize_greeting == False:
hours = time.strftime("%H")
#time.strftime("%H:%M:%S") == 12:28:41
hours = int(hours)
if hours in xrange(0,12):
conn.privmsg(targetchannel,"Good Morning "+sender)
elif hours in xrange(12,15):
conn.privmsg(targetchannel,"Good Afternoon "+sender)
elif hours in xrange(15,20):
conn.privmsg(targetchannel,"Good Evening "+sender)
else:
conn.privmsg(targetchannel,"Good Night "+sender)
else:
hours = time.strftime("%H")
hours = int(hours)
if hours in morning:
conn.privmsg(targetchannel,"Good Morning "+sender)
elif hours in afternoon:
conn.privmsg(targetchannel,"Good Afternoon "+sender)
elif hours in evening:
conn.privmsg(targetchannel,"Good Evening "+sender)
else:
conn.privmsg(targetchannel,"Good Night "+sender)
elif dice == 2:
conn.privmsg(targetchannel,"Hello!")
elif dice == 3:
conn.privmsg(targetchannel,"Hey "+sender)
elif dice == 4:
conn.privmsg(targetchannel,"Hi "+sender)
elif dice == 5:
conn.privmsg(targetchannel,"Hello "+sender)
elif dice == 6:
conn.privmsg(targetchannel,"Yo "+sender)
elif dice == 7:
conn.privmsg(targetchannel,"Greetings "+sender)
elif dice == 8:
conn.privmsg(targetchannel,"Hi")
elif dice == 9:
conn.privmsg(targetchannel,"Hi!")
elif dice == 10:
conn.privmsg(targetchannel,"Yo")
elif dice == 11:
conn.privmsg(targetchannel,"Yo!")
elif dice == 12:
conn.privmsg(targetchannel,"Heya")
elif dice == 13:
conn.privmsg(targetchannel,"Hello there!")
elif dice == 14: # Richard
conn.privmsg(targetchannel,"Statement: Greetings meatbag")
elif dice == 15: # Richard
hours = int(time.strftime("%H"))
if hours in xrange(5,12):
conn.privmsg(targetchannel,"What are you doing talking at this time of the morning?")
elif hours in xrange(12,15):
conn.privmsg(targetchannel,"What are you doing talking at this time of the day?")
elif hours in xrange(15,22):
conn.privmsg(targetchannel,"What are you doing talking at this time of the evening?")
else:
conn.privmsg(targetchannel,"What are you doing talking at this time of the night?")
elif dice == 16: # Richard
conn.privmsg(targetchannel,"Oh, you're still alive I see.")
elif dice == 17:
conn.privmsg(targetchannel,"Heya "+sender)
elif dice == 18 and time.gmtime(time.time())[1] == 12 and time.gmtime(time.time())[2] == 15:
conn.privmsg(targetchannel,"Hello! It's my birthday!")
else:
conn.privmsg(targetchannel,"Hiya "+sender)
secdice = random.randint(0,10)
if time.gmtime(time.time())[1] == 12 and time.gmtime(time.time())[2] == 15 and birthday_announced < time.gmtime(time.time())[0]:
birthday_announced = time.gmtime(time.time())[0]
conn.privmsg(channel,"Hey everybody! I just noticed it's my birthday!")
time.sleep(0.5)
tag = random.choice(["birthday","robot+birthday","happy+birthday+robot"])
arg1 = urllib2.urlopen("http://www.youtube.com/results?search_query=%s&page=&utm_source=opensearch"%tag)
arg1 = arg1.read().split("\n")
arg2 = []
for i in arg1:
if "watch?v=" in i:
arg2.append(i)
arg3 = random.choice(arg2)
conn.privmsg(channel,"Here's a video of '%s' which I found! %s (%s)"%(tag.replace("+"," "),"http://www.youtube.com"+arg3[arg3.find('/watch?v='):arg3.find('/watch?v=')+20],YTCV2("http://www.youtube.com"+arg3[arg3.find('/watch?v='):arg3.find('/watch?v=')+20])))
if truesender.lower() in tell_list.keys():
try:
conn.privmsg(channel, "Also, "+truesender+" : "+tell_list[truesender.lower()][0])
del(tell_list[truesender.lower()][0])
except:
pass
else:
dice = random.randint(0,1)
if dice == 0:
conn.privmsg(targetchannel,"Greetings Master "+sender)
elif dice == 1:
conn.privmsg(targetchannel,"My deepest greetings belong to you, Master "+sender)
### IMPORTANT ###
elif influx == "☺VERSION☺":
conn.notice(truesender,"\001VERSION nanotrasen:2:Python 2.6\001")
elif marakov and influx.lower() == prefix+"marakov":
arg = Marakov_Chain.form_sentence()
if len(arg) < 5:
conn.privmsg(targetchannel,sender+" : Not enough words harvested")
else:
conn.privmsg(targetchannel,sender+" : %s" %(" ".join(arg).capitalize()))
elif marakov and cocheck( prefix+ "marakov"):
try:
arg = influx.split(" ")[1].lower()
except:
conn.privmsg(targetchannel,sender+" : Please input a valid second argument")
else:
arg2 = Marakov_Chain.form_sentence(arg)
if len(arg2) < 5:
conn.privmsg(targetchannel,sender+" : Not enough words harvested for a sentence starting with %s" %(arg))
else:
conn.privmsg(targetchannel,sender+" : %s" %(" ".join(arg2).capitalize()))
else:
Marakov_Chain.give_data(influx)
autodiscusscurtime = backup
if time.time() - looptime == 0:
pass
else:
print "Took",time.time()-looptime,"Seconds to finish loop"
elif data [ 1 ] [ 0 ] == '353':
if connected == False:
connected = True
users = map(lambda x: x[1:] if x[0] == "+" or x[0] == "@" else x,data[1][4].split(" "))
print "There are",len(users),"Users on",channel
operators = []
for potential_operator in data[1][4].split(" "):
if potential_operator[0] == "@":
operators.append(potential_operator[1:])
elif potential_operator[0] == "%":
halfoperators.append(potential_operator[1:])
elif data[1][0] == "QUIT":
sender = data[0].split("!")[0]
print sender+" Has now left the server"
try:
users.remove(sender)
try:
operators.remove(sender)
except ValueError:
pass
try:
halfoperators.remove(sender)
except ValueError:
pass
except ValueError:
pass
elif data[1][0] == "PART":
sender = data[0].split("!")[0]
targetchannel = data[1][1]
print sender+" Has now parted from the channel"
try:
users.remove(sender)
try:
operators.remove(sender)
except ValueError:
pass
try:
halfoperators.remove(sender)
except ValueError:
pass
except ValueError:
pass
elif data[1][0] == "JOIN":
sender = data[0].split("!")[0]
targetchannel = data[1][1]
if sender.lower() in tell_list.keys():
try:
conn.privmsg(targetchannel, sender+" : "+" | ".join(tell_list[sender.lower()]))
del(tell_list[sender.lower()])
except:
pass
for useri,nicki in replacenames.items():
checkers = Namecheck.Namecheck_dict(sender.lower(),replacenames)
if checkers[0]:
try:
if checkers[0].lower() == sender:
pass
else:
conn.privmsg(targetchannel,checkers[1]+" : I have detected a collision with a name I call you and %s who joined" %(sender))
del(replacenames[checkers[1]])
with open("replacenames.cache","w") as pickle_save:
pickle.dump(replacenames,pickle_save)
except AttributeError:
#conn.privmsg(channel,"NAME COLLISION CHECK ERROR, RELATED TO %s" %(sender))
print "NAME COLLISION CHECK ERROR, RELATED TO %s" %(sender)
break
print sender+" Has now joined"
users.append(sender)
#####
if ".fi" in data[0] and sender.lower() == "skibiliano":
operators.append(sender)
if sender.lower() not in peopleheknows[0]:
if data[0].split("!")[1] in peopleheknows[1]:
appendion = "...you do seem familiar however"
else:
appendion = ""
if data[1][1].lower() == channel or data[1][1].lower() == channel[1:]:
conn.privmsg(data[1][1],CORE_DATA.greeting.replace("USER",sender)+" "+appendion)
else:
conn.privmsg(data[1][1],"Hello! Haven't seen you here before! Happy to meet you! %s" %(appendion))
peopleheknows[0].append(sender.lower())
peopleheknows[1].append(data[0].split("!")[1])
with open("peopleheknows.cache","w") as peoplehecache:
pickle.dump(peopleheknows,peoplehecache)
elif data[1][0] == "MODE" and data[1][2] == "+o":
sender = data[1][3]
targetchannel = data[1][1]
if targetchannel == channel:
print sender+" Is now an operator on the main channel"
operators.append(sender)
else:
print sender+" Is now an operator"
elif data[1][0] == "MODE" and data[1][2] == "-o":
sender = data[1][3]
targetchannel = data[1][1]
if targetchannel == channel:
print sender+" Is no longer an operator on the main channel"
else:
print sender+" Is no longer an operator"
try:
operators.remove(sender)
except ValueError:
pass
elif data[1][0] == "MODE" and data[1][2] == "+h":
sender = data[1][3]
print sender+" Is now an half operator"
halfoperators.append(sender)
elif data[1][0] == "MODE" and data[1][2] == "-h":
try:
halfoperators.remove(sender)
except ValueError:
pass
elif data[1][0] == "MODE" and data[1][1] == Name:
print "My mode is",data[1][2]
elif data[1][0] == "MODE" and data[1][1] != Name:
try:
sender = data[1][3]
print sender,"Was modified",data[1][2]
except IndexError:
print "SENDER RETRIEVAL FAILED:"+str(data)
elif data[1][0] == "KICK" and data[1][2] == Name:
disconnects = 99999
print "I have been kicked! Disconnecting entirely!"
conn.quit()
elif data[1][0] == "KICK":
# data[1][0] = Kick, 1 = Channel, 2 = Who, 3 = Who(?)
print data[1][2]+" got kicked!"
elif data[1][0] == "451" and data[1][2] == "You have not registered":
print Name+" hasn't been registered"
elif data[1][0] == "NOTICE":
sender = data[0].split("!")[0]
print "NOTICE (%s): %s" %(sender,data[1][2])
pongtarget = sender
elif data[1][0] == "NICK":
origname = data[0].split("!")[0]
newname = data[1][1]
print origname,"Is now",newname
if newname.lower() in tell_list.keys():
try:
conn.privmsg(channel, newname+" : "+tell_list[newname.lower()][0])
del(tell_list[newname.lower()][0])
except:
pass
try:
users.remove(origname)
except ValueError:
pass
else:
users.append(newname)
try:
operators.remove(origname)
except ValueError:
pass
else:
operators.append(newname)
try:
halfoperators.remove(origname)
except ValueError:
pass
else:
halfoperators.append(newname)
elif data[1][0] == "001":
# Skibot is welcomed to the Network
pass
elif data[1][0] == "002":
# Your host is...
pass
elif data[1][0] == "003":
#Server was created...
pass
elif data[1][0] == "004":
#Weird hex?
pass
elif data[1][0] == "005":
#Settings like NICKLEN and so on.
pass
elif data[1][0] == "250":
#data[1][2] is
#"Highest connection count: 1411 (1410 clients)
#(81411 connections received)"
pass
elif data[1][0] == "251":
#There are 23 users and 2491 invisible on 10 servers
pass
elif data[1][0] == "252":
#IRC Operators online
#data[1][2]
print data[1][2],"Irc operators online"
pass
elif data[1][0] == "253":
# ['253', 'Skibot_V4', '1', 'unknown connection(s)']
print data[1][2],"Unknown connection(s)"
pass
elif data[1][0] == "254":
#1391 channels formed
pass
elif data[1][0] == "255":
#I have 406 clients and 2 servers
pass
elif data[1][0] == "265":
#data[1][2] current local users
#data[1][3] at max
try:
print "Current local users:", data[1][2],"/",data[1][3]
except IndexError:
print "Couldn't retrieve local users"
pass
elif data[1][0] == "266":
#data[1][2] current global users
#data[1][3] at max
try:
print "Current global users:", data[1][2],"/",data[1][3]
except IndexError:
print "Couldn't retrieve global users"
pass
elif data[1][0] == "315":
#End of /who list
pass
elif data[1][0] == "332":
# Topic of channel
topic = data[1][3]
pass
elif data[1][0] == "333":
# *Shrug*
pass
elif data[1][0] == "352":
#WHO command
if len(targetlist) > 0:
if targetlist[0][0].lower() in data[1][6].lower():
thread.start_new_thread(target,("*!*@"+data[1][4],targetlist[0][1]))
print "Created a thread with", "*!*@"+data[1][4],targetlist[0][1]
targetlist.pop(0)
else:
print targetlist[0][0].lower(), "isn't equal to?", data[1][6].lower()
print targetlist
elif data[1][0] == "366":
# End of USERS
pass
elif data[1][0] == "372":
# Server information
pass
elif data[1][0] == "375":
# Message of the day
pass
elif data[1][0] == "376":
# End of motd
pass
elif data[1][0] == "401":
# ('network', ['401','Botname','Channel / Nick','No such nick/channel'])
print data[1][2] + " Channel does not exist"
pass
elif data[1][0] == "439":
# ('irc.rizon.no', ['439', '*', 'Please wait while we process your connection.'])
pongtarg = data[0][0]
elif data[1][0] == "477":
# You need to be identified
#TAG
conn.privmsg("nickserv","identify %s"%CORE_DATA.le_pass)
time.sleep(0.5)
conn.join(data[1][2])
#('network', ['477', 'botname', '#channel', 'Cannot join channel (+r) - you need to be identified with services'])
elif data[1][0] == "433":
# Skibot name already exists.
print Name+" name already exists."
Name += "_"+version
print "New name:",Name
duplicate_notify = True
conn = irchat.IRC ( Network, Port, Name, "NT_"+version, "NT_"+version, "Trasen_"+version )
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
elif data[1][0] == "482":
sleep(0.05)
conn.privmsg(targetchannel,"Nevermind that, I am not an operator")
CALL_OFF = True
elif data[1] == ["too","fast,","throttled."]:
print "Reconnected too fast."
print "Halting for 2 seconds"
sleep(2)
elif data[1][0] == "Link":
if data[0] == "Closing":
print "Link was closed"
connected = False
# conn.quit()
# break
else:
print data
print data[1][0]
pass
else:
if disconnects > 9000: #IT'S OVER NINE THOUSAAAAND!
break
else: #WHAT NINE THOUSAND? THERE'S NO WAY THAT CAN BE RIGHT
sleep(responsiveness_delay) #WAIT A WHILE AND CHECK AGAIN!
try:
if not connected:
#print pongtarget
#print conn.addressquery()
conn.privmsg(pongtarget,"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join"
connected = True
except ValueError:
try:
conn.privmsg(conn.addressquery()[0],"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join the second time"
connected = True
except ValueError:
print "Both methods failed"
except AttributeError:
print "Conn is not established correctly"
except NameError:
print "Pongtarget isn't yet established"
try:
conn.privmsg(conn.addressquery()[0],"Pong")
sleep(1)
for i in CORE_DATA.channels:
conn.join(i)
sleep(0.5)
print "Attempted to join the second time"
connected = True
except:
print "Both methods failed"
| agpl-3.0 | 7,691,879,388,208,059,000 | 43.726518 | 283 | 0.486878 | false |
xh4x0r3r/xh4x0r3r_blog | xh4x0r3r_blog/articles/views.py | 1 | 3504 | from django.shortcuts import render
from xh4x0r3r_blog.utils import db, build_args
import re
MAX_POST_PREVIEW = 10
#=================== index =====================
def index (request, year=None, month=None) :
page = 1
pages = 0
query = ''
title = "All Articles"
articles = db.get_articles()
quote = db.get_random_quote()
err = ''
try :
page = int(request.GET.get('p'))
if page < 1 : page = 1
except Exception as e :
print(e)
try :
query = request.GET.get('q')
except Exception as e :
print(e)
if year and month : title = "Articles published at %d/%d" % (int(year),int(month))
elif year : title = "Articles published at %d" % (int(year))
if year : articles = db.get_articles_by_date(year, month)
if query and articles :
articles = db.get_articles_by_search(query, articles)
title = "Search : %s" % query
try :
pages = int(len(articles)/MAX_POST_PREVIEW)
pages += 1 if len(articles)%MAX_POST_PREVIEW > 0 else 0
except Exception as e :
print(e)
if page > pages : page = pages
if not page : err = "Not found."
else : articles = articles[ \
(page-1)*MAX_POST_PREVIEW \
: (MAX_POST_PREVIEW*page if len(articles) > page*MAX_POST_PREVIEW \
else len(articles) % (page*MAX_POST_PREVIEW)) \
]
pages = list(map(str, range(1, pages+1)))
args = {
"title" : title,
"articles" : articles,
"page" : str(page),
"pages" : pages,
"err" : err,
}
return render(request, "articles/index.html", build_args(request, args))
#=================== view_article ==================
def view_article (request, year, month, ID=None) :
err = ''
err_comment = ''
article = None
if ID : ID = int(ID)
try : article = db.get_articles(ID=ID)[0]
except : err = "404 Not found."
if article :
ip = request.META["REMOTE_ADDR"]
try : article.article_visited_ips.get(IP=ip)
except :
db.create_article_visited_ip(article=article, IP=ip).save()
article.visits_count += 1
article.save()
if request.method == "POST" and article :
try :
name = request.POST.get("name")
email = request.POST.get("email")
message = request.POST.get("message")
assert name and email and message, "found empty field"
assert len(re.findall(r'^[A-Za-z0-9\.\_\-]+@[A-Za-z0-9]+\.[A-Za-z]+$' \
, email)) == 1, "invalid email address"
assert re.findall(r'[A-Za-z0-9]+', message)
assert 51 > len(name) > 2 \
and 101 > len(email) > 6 \
and 1025 > len(message) > 2 \
, "too long or too short"
IP = request.META["REMOTE_ADDR"]
comment = db.create_comment(article=article, IP=IP, name=name, \
email=email, comment=message)
comment.save()
except Exception as e :
print(e)
err_comment = "invalid input(s)"
args = {
"action" : "read",
"article" : article,
"err_comment" : err_comment,
"err" : err
}
return render(request, "articles/article.html", build_args(request, args))
| gpl-3.0 | -8,890,105,144,331,939,000 | 31.146789 | 91 | 0.506279 | false |
eig-2017/the-magical-csv-merge-machine | merge_machine/scripts/delete_referential.py | 1 | 4191 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 6 19:43:08 2017
@author: m75380
Script to add SIRENE to API
"""
import argparse
import json
import os
from api_helpers import APIConnection
# =============================================================================
# Paths to configuration files
# =============================================================================
# Default
display_name = 'test_ref.csv'
project_id = None
# Path to configuration
connection_config_path = os.path.join('conf', 'local_connection_parameters.json')
logs_path = 'logs.json'
# =============================================================================
# Get arguments from argparse
# =============================================================================
parser = argparse.ArgumentParser(description='Delete a reference from its' \
+ ' project_id or display name')
parser.add_argument('--name',
help='Display name of the file(s) to delete',
default=display_name)
parser.add_argument('--proj-id',
help='Project id of the file to delete (use instead of ' \
+ 'display_name). ',
default=project_id)
parser.add_argument('--conn',
help='Path to the json configuration file that' \
+ ' with information on the connection to the API',
default=connection_config_path)
parser.add_argument('--logs',
help='Path to the json log file',
default=logs_path)
args = parser.parse_args()
display_name = args.name
project_id = args.proj_id
connection_config_path = args.conn
logs_path = args.logs
# =============================================================================
# Check that we are selecting either by project_id or by display name
# =============================================================================
if project_id is not None:
display_name = None
assert int(display_name is None) + int(project_id is None) == 1
# =============================================================================
# Define how to connect to API
# =============================================================================
conn_params = json.load(open(connection_config_path))
PROTOCOL = conn_params['PROTOCOL']
HOST = conn_params['HOST']
PRINT = conn_params['PRINT']
PRINT_CHAR_LIMIT = conn_params['PRINT_CHAR_LIMIT']
c = APIConnection(PROTOCOL, HOST, PRINT, PRINT_CHAR_LIMIT)
# =============================================================================
# Load logs
# =============================================================================
if os.path.isfile(logs_path):
logs = json.load(open(logs_path))
else:
logs = dict()
#==============================================================================
# Fetch public projects
#==============================================================================
url_to_append = '/api/public_projects/normalize'
resp = c.get_resp(url_to_append)
# =============================================================================
# Delete projects
# =============================================================================
if display_name is None:
url_to_append = '/api/delete/normalize/{0}'.format(project_id)
resp = c.get_resp(url_to_append)
else:
for metadata in filter(lambda x: x['display_name']==display_name, resp):
old_project_id = metadata['project_id']
url_to_append = '/api/delete/normalize/{0}'.format(old_project_id)
resp = c.get_resp(url_to_append)
# =============================================================================
# Remove old project from logs if present
# =============================================================================
if display_name is None:
display_name = [key for key, value in logs.items() if value==project_id]
if display_name:
assert len(display_name) == 1
display_name = display_name[0]
if display_name:
if display_name in logs:
del logs[display_name]
with open(logs_path, 'w') as w:
json.dump(logs, w) | mit | -5,013,754,206,858,447,000 | 37.109091 | 81 | 0.44524 | false |
doutib/lobpredict | lobpredictrst/jupyter/simple_model/create_simple_model_predict.py | 1 | 6870 |
# coding: utf-8
# # The best model parameters are given by
# ```
# author : SHAMINDRA
# data_source_dir : SC_shuffle
# test_type : validation
# model_type : RF
# RF:
# n_estimators : 100
# criterion : 'gini'
# max_features : 'auto'
# max_depth : 20
# n_jobs : 1
# SVM:
# kernel : 'rbf'
# degree : 3
# gamma : 'auto'
# tol : 0.001
# NNET:
# method1 : 'Tanh'
# neurons1 : 24
# method2 : 'Tanh'
# neurons2 : 39
# decay : 0.0001
# learning_rate : 0.001
# n_iter : 25
# random_state : 1
# ```
# In[66]:
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import imp
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
import pandas as pd
# We looked at the top features from the best performing random forest. They are as below:
# In[48]:
# The top variables are:
var_importance = [(1, 'P_1_bid', 0.020001165389254737)
, (2, 'V_1_bid', 0.018358575666246449)
, (3, 'P_1_ask', 0.017058479215839299)
, (4, 'V_1_ask', 0.016953559068869958)
, (5, 'P_2_bid', 0.016908649059514971)
, (6, 'V_2_bid', 0.016219220215427665)
, (7, 'P_2_ask', 0.015039647893425838)
, (8, 'V_2_ask', 0.014497773408233052)
, (9, 'P_3_bid', 0.014321084019596746)
, (10, 'V_3_bid', 0.014158850118003859)
, (11, 'P_3_ask', 0.014101386932514923)
, (12, 'V_3_ask', 0.013911823640617986)
, (13, 'P_4_bid', 0.013838322603744435)
, (14, 'V_4_bid', 0.013668619218980316)
, (15, 'P_4_ask', 0.013413471959983998)]
# In[33]:
# Open test and train sets
df_train = pd.read_csv(train_ds_ref
, compression='gzip', index_col = None)
df_test = pd.read_csv(test_ds_ref
, compression='gzip', index_col = None)
# Drop the first columns - they are not useful
df_train_clean = df_train.iloc[:,1:]
df_test_clean = df_test.iloc[:,1:]
# In[34]:
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# In[38]:
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
# In[46]:
print(result[3])
print(Y_hat)
print(Y_probs)
# #### The predicted output for our most successful RF model is as follows
# ```
# classification_report
#
# precision recall f1-score support
#
# -1 0.99 0.98 0.98 18373
# 0 0.97 0.98 0.97 16950
# 1 0.99 0.98 0.98 15265
#
# avg / total 0.98 0.98 0.98 50588
# ```
# In[49]:
def predict_simple_linear(df_train_clean, df_test_clean):
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
return result
# In[62]:
linear_simple_predict = predict_simple_linear(df_train_clean = df_train_clean
, df_test_clean = df_train_clean)
# In[64]:
# Get the predicted outcomes
linear_simple_predict_vals = linear_simple_predict[len(linear_simple_predict) -1]
len(list(linear_simple_predict_vals))
# In[67]:
modl = imp.load_source('execute_model', '../../execute_model.py')
# In[ ]:
| isc | -5,699,154,998,110,221,000 | 26.586345 | 117 | 0.580288 | false |
geodynamics/citcoms | visual/Mayavi2/citcoms_display/plugins/HDF5UGrid.py | 1 | 9376 | #
# Script to generate VTKUnstructuredGrid objects from CitcomS hdf files
#
# author: Martin Weier
# Copyright (C) 2006 California Institue of Technology
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from enthought.tvtk.api import tvtk
import tables #For HDF support
import numpy
from math import *
from datetime import datetime
class CitcomSHDFUgrid:
"""This Class converts CitcomS hdf files to tvtk UnstructuredGrid Dataset Objects """
data = None
_nx = None
_ny = None
_nz = None
_nx_redu = None
_ny_redu = None
_nz_redu = None
_radius_inner = None
_radius_outer = None
timesteps = None
frequency = None
progress = 0
#Global because a Unstructured Grid can only hold one scalar value at a time
#but our hdf file reader plugin wants to be able to read both
__vtkordered_visc = tvtk.FloatArray()
__vtkordered_temp = tvtk.FloatArray()
def vtk_iter(self,nx,ny,nz):
"""Iterator for CitcomDataRepresentation(yxz) to VTK(xyz)"""
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
yield k + nz * i + nz * nx * j
def reduce_iter(self,n,nredu):
"""Iterator to reduce the CitcomS grid"""
i=0
n_f=float(n)
nredu_f=float(nredu)
fl=(n_f-1)/nredu_f
redu = 0
for i in xrange(nredu+1):
yield int(round(redu))
redu = redu + fl
def velocity2cart(self,vel_colat,vel_long,r, x, y, z):
"""Converts vectors in spherical to cartesian coordiantes"""
x1 = r*sin(x)*cos(y)+vel_colat*cos(x)*cos(y)-vel_long*sin(y)
y1 = r*sin(x)*sin(y)+vel_colat*cos(x)*sin(y)+vel_long*cos(y)
z1 = r*cos(x)-vel_colat*sin(x)
return x1, y1, z1
#Converts Spherical to Cartesian Coordinates
def RTF2XYZ(self,thet, phi, r):
"""Converts points from spherical to cartesian coordinates"""
x = r * sin(thet) * cos(phi)
y = r * sin(thet) * sin(phi)
z = r * cos(thet)
return x, y, z
def __citcom2vtk(self,t,f,nproc_surf,nx_redu,ny_redu,nz_redu):
"""Method to convert one timestep from a hdf file to a Vtk file. This Method is used
by the method initialize. Initialize reads the necessary meta information from the hdf file"""
hexagrid = tvtk.UnstructuredGrid()
hexagrid.allocate(1,1)
vtkordered_velo = tvtk.FloatArray()
nx = self._nx
ny = self._ny
nz = self._nz
counter = 0
el_nx_redu = nx_redu + 1
el_ny_redu = ny_redu + 1
el_nz_redu = nz_redu + 1
ordered_points = [] #reset Sequences for points
ordered_temperature = []
ordered_velocity = []
ordered_viscosity = []
for capnr in xrange(nproc_surf):
cap = f.root._f_getChild("cap%02d" % capnr)
temp_coords = [] # reset Coordinates, Velocity, Temperature Sequence
temp_vel = []
temp_temp = []
temp_visc = []
#Information from hdf
hdf_coords = cap.coord[:]
hdf_velocity = cap.velocity[t]
hdf_temperature = cap.temperature[t]
hdf_viscosity = cap.viscosity[t]
#Create Iterator to change data representation
nx_redu_iter = self.reduce_iter(nx,nx_redu)
ny_redu_iter = self.reduce_iter(ny,ny_redu)
nz_redu_iter = self.reduce_iter(nz,nz_redu)
#vtk_i = self.vtk_iter(el_nx_redu,el_ny_redu,el_nz_redu)
# read citcom data - zxy (z fastest)
for j in xrange(el_ny_redu):
j_redu = ny_redu_iter.next()
nx_redu_iter = self.reduce_iter(nx,nx_redu)
for i in xrange(el_nx_redu):
i_redu = nx_redu_iter.next()
nz_redu_iter = self.reduce_iter(nz,nz_redu)
for k in xrange(el_nz_redu):
k_redu = nz_redu_iter.next()
colat, lon, r = map(float,hdf_coords[i_redu,j_redu,k_redu])
x_coord, y_coord, z_coord = self.RTF2XYZ(colat,lon,r)
ordered_points.append((x_coord,y_coord,z_coord))
ordered_temperature.append(float(hdf_temperature[i_redu,j_redu,k_redu]))
ordered_viscosity.append(float(hdf_viscosity[i_redu,j_redu,k_redu]))
vel_colat, vel_lon , vel_r = map(float,hdf_velocity[i_redu,j_redu,k_redu])
x_velo, y_velo, z_velo = self.velocity2cart(vel_colat,vel_lon,vel_r, colat,lon , r)
ordered_velocity.append((x_velo,y_velo,z_velo))
##Delete Objects for GC
del hdf_coords
del hdf_velocity
del hdf_temperature
del hdf_viscosity
#Create Connectivity info
if counter==0:
i=1 #Counts X Direction
j=1 #Counts Y Direction
k=1 #Counts Z Direction
for n in xrange((el_nx_redu*el_ny_redu*el_nz_redu)-(el_nz_redu*el_nx_redu)):
if (i%el_nz_redu)==0: #X-Values!!!
j+=1 #Count Y-Values
if (j%el_nx_redu)==0:
k+=1 #Count Z-Values
if i%el_nz_redu!=0 and j%el_nx_redu!=0: #Check if Box can be created
#Get Vertnumbers
n0 = n+(capnr*(el_nx_redu*el_ny_redu*el_nz_redu))
n1 = n0+el_nz_redu
n2 = n1+el_nz_redu*el_nx_redu
n3 = n0+el_nz_redu*el_nx_redu
n4 = n0+1
n5 = n4+el_nz_redu
n6 = n5+el_nz_redu*el_nx_redu
n7 = n4+el_nz_redu*el_nx_redu
#Created Polygon Box
hexagrid.insert_next_cell(12,[n0,n1,n2,n3,n4,n5,n6,n7])
i+=1
#Store Arrays in Vtk conform Datastructures
self.__vtkordered_temp.from_array(ordered_temperature)
self.__vtkordered_visc.from_array(ordered_viscosity)
vtkordered_velo.from_array(ordered_velocity)
self.__vtkordered_temp.name = 'Temperature'
self.__vtkordered_visc.name = 'Viscosity'
hexagrid.point_data.scalars = self.__vtkordered_temp
vtkordered_velo.name = 'Velocity'
hexagrid.point_data.vectors = vtkordered_velo
hexagrid.points = ordered_points
self.progress += 1
return hexagrid
def initialize(self,filename,timestep,nx_redu,ny_redu,nz_redu):
"""Call this method to convert a Citcoms Hdf file to a Vtk file"""
#Read meta-inforamtion
hdf=tables.openFile(filename,'r')
self._nx = int(hdf.root.input._v_attrs.nodex)
self._ny = int(hdf.root.input._v_attrs.nodey)
self._nz = int(hdf.root.input._v_attrs.nodez)
#Clip against boundaries
if nx_redu>=0 or nx_redu>=self._nx:
nx_redu = self._nx-1
if ny_redu==0 or ny_redu>=self._ny:
ny_redu = self._ny-1
if nz_redu==0 or nz_redu>=self._nz:
nz_redu = self._nz-1
#Make reduction factors global
self._nx_redu = nx_redu
self._ny_redu = ny_redu
self._nz_redu = nz_redu
#Number of Timesteps in scene
self.timesteps = int(hdf.root.time.nrows)
#Number of caps
nproc_surf = int(hdf.root.input._v_attrs.nproc_surf)
#Store the Inner Radius. Import if we want to create a core
self._radius_inner = self._radius_inner = float(hdf.root.input._v_attrs.radius_inner)
#start computation
hexgrid = self.__citcom2vtk(timestep,hdf,nproc_surf,nx_redu,ny_redu,nz_redu)
hdf.close()
self.progress = -1
return hexgrid
def get_vtk_viscosity(self):
return self.__vtkordered_visc
def get_vtk_temperature(self):
return self.__vtkordered_temp
| gpl-2.0 | 5,070,815,270,683,033,000 | 35.768627 | 107 | 0.52901 | false |
ricardogarzocastro/Django_Scada | django_scada/django_scada/urls.py | 1 | 2700 | from django.conf.urls import *
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^home/', 'variables.views.home'),
#Url del manual de usuario
url(r'^manual/', 'variables.views.manual'),
#Url de lecutras de vairable
url(r'^read/', 'variables.views.read'),
#Url para enviar datos mediante HTTP Request para refrescar variables
url(r'^json/read_update/','variables.views.read_update'),
#Url de escritura de variables
url(r'^control/','variables.views.control'),
#Url para enviar datos mediante HTTP Request para escribir variables
url(r'^json/control_response/', 'variables.views.control_response'),
#Url para selecionar las variables a mostrar
url(r'^display_editor/','variables.views.display_editor'),
#Url para intercambio de datos cliente-navegador mediante HTTP Request
url(r'^json/display_editor_response/', 'variables.views.display_editor_response'),
#Url para la pagina con la grafica para variables
url(r'^graph_live/', 'variables.views.graph_live'),
#Url de intercambio asincrono de datos para la grafica mediante HTTP Request
url(r'^json/graph_live/', 'variables.views.json_graph_live'),
#Url para mostrar la grafica de representacion historica de variables
url(r'^graph_db/', 'variables.views.graph_db'),
#Url para intercambio de datos mediante HTTP Request de la grafica historica
url(r'^json/graph_db/', 'variables.views.json_query_db'),
#Url para crear una tabla con los valores historicos de variables
url(r'^log_db/', 'variables.views.log_db'),
#Url para intecambio de datos mediante HTTP Request para la tabla de valores historicos
url(r'^json/log_db/', 'variables.views.json_query_db'),
#Url de django para el menu de administrador
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
#Url para login y logout de usuario
url('^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/login/'}),
url(r'', include('django.contrib.auth.urls')),
)
urlpatterns += staticfiles_urlpatterns()
'''
if settings.DEBUG:
urlpatterns += patterns('',
(r'^%s/(?P<path>.*)$' % settings.MEDIA_URL[1:-1],
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
''' | gpl-2.0 | 1,010,877,155,873,175,300 | 43.033333 | 91 | 0.666667 | false |
hwang3419/awu | awu/awu/settings.py | 1 | 4783 | """
Django settings for awu project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DJ_PROJECT_DIR = os.path.dirname(__file__)
BASE_DIR = os.path.dirname(DJ_PROJECT_DIR)
WSGI_DIR = os.path.dirname(BASE_DIR)
REPO_DIR = os.path.dirname(WSGI_DIR)
DATA_DIR = os.environ.get('OPENSHIFT_DATA_DIR', BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
import sys
from socket import gethostname
sys.path.append(os.path.join(REPO_DIR, 'libs'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ot30p)6)lko9oy!%dn!xigfhoo_ys44i5yeam6_jiv!1lkblxx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
gethostname(), # For internal OpenShift load balancer security purposes.
os.environ.get('OPENSHIFT_APP_DNS'), # Dynamically map to the OpenShift gear name.
]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'BB',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'awu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'awu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10
}
if os.environ.get('OPENSHIFT_MYSQL_DB_HOST',None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'awu', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'adminT2XqWMF',
'PASSWORD': 'jkUUHKlx6jCT',
'HOST': os.environ.get('OPENSHIFT_MYSQL_DB_HOST'), # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': os.environ.get('OPENSHIFT_MYSQL_DB_PORT'), # Set to empty string for default.
}
}
else:
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
#'USER': 'adminN7GDS4G',
#'PASSWORD': 'qc9AAtxHDNEt',
#'HOST': os.environ.get('OPENSHIFT_MYSQL_DB_HOST'), # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
#'PORT': os.environ.get('OPENSHIFT_MYSQL_DB_PORT'), # Set to empty string for default.
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(WSGI_DIR, 'static')
from local_settings import *
| mit | -8,222,593,545,976,650,000 | 32.921986 | 171 | 0.658164 | false |
germandiagogomez/meson | mesonbuild/mesonlib.py | 1 | 10591 | # Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
import platform, subprocess, operator, os, shutil, re, sys
from glob import glob
class MesonException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class File:
def __init__(self, is_built, subdir, fname):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
@staticmethod
def from_source_file(source_root, subdir, fname):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir, fname):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname):
return File(False, '', fname)
def rel_to_builddir(self, build_to_src):
if self.is_built:
return os.path.join(self.subdir, self.fname)
else:
return os.path.join(build_to_src, self.subdir, self.fname)
def endswith(self, ending):
return self.fname.endswith(ending)
def split(self, s):
return self.fname.split(s)
def __eq__(self, other):
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self):
return hash((self.fname, self.subdir, self.is_built))
def flatten(item):
if not isinstance(item, list):
return item
result = []
for i in item:
if isinstance(i, list):
result += flatten(i)
else:
result.append(i)
return result
def is_osx():
return platform.system().lower() == 'darwin'
def is_linux():
return platform.system().lower() == 'linux'
def is_windows():
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_32bit():
return not(sys.maxsize > 2**32)
def is_debianlike():
try:
open('/etc/debian_version', 'r')
return True
except FileNotFoundError:
return False
def exe_exists(arglist):
try:
p = subprocess.Popen(arglist, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
return True
except FileNotFoundError:
pass
return False
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -n', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
def grab_leading_numbers(vstr):
result = []
for x in vstr.split('.'):
try:
result.append(int(x))
except ValueError:
break
return result
numpart = re.compile('[0-9.]+')
def version_compare(vstr1, vstr2):
match = numpart.match(vstr1.strip())
if match is None:
raise MesonException('Uncomparable version string %s.' % vstr1)
vstr1 = match.group(0)
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
varr1 = grab_leading_numbers(vstr1)
varr2 = grab_leading_numbers(vstr2)
return cmpop(varr1, varr2)
def default_libdir():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if os.path.isdir('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs():
if is_windows():
return ['C:/mingw/lib'] # Fixme
if is_osx():
return ['/usr/lib'] # Fix me as well.
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
plat = subprocess.check_output(['uname', '-m']).decode().strip()
# This is a terrible hack. I admit it and I'm really sorry.
# I just don't know what the correct solution is.
if plat == 'i686':
plat = 'i386'
if plat.startswith('arm'):
plat = 'arm'
unixdirs += glob('/usr/lib/' + plat + '*')
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
unixdirs += glob('/lib/' + plat + '*')
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
unixdirs += glob('/lib/' + plat + '*')
return unixdirs
def do_replacement(regex, line, confdata):
match = re.search(regex, line)
while match:
varname = match.group(1)
if varname in confdata.keys():
var = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
raise RuntimeError('Tried to replace a variable with something other than a string or int.')
else:
var = ''
line = line.replace('@' + varname + '@', var)
match = re.search(regex, line)
return line
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s', line.strip())
varname = arr[1]
try:
v = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata):
data = open(src).readlines()
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
regex = re.compile(r'[^\\]?@([-a-zA-Z0-9_]+)@')
result = []
for line in data:
if line.startswith('#mesondefine'):
line = do_mesondefine(line, confdata)
else:
line = do_replacement(regex, line, confdata)
result.append(line)
dst_tmp = dst + '~'
open(dst_tmp, 'w').writelines(result)
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
def dump_conf_header(ofilename, cdata):
with open(ofilename, 'w') as ofile:
ofile.write('''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
''')
for k in sorted(cdata.keys()):
v = cdata.get(k)
if isinstance(v, bool):
if v:
ofile.write('#define %s\n\n' % k)
else:
ofile.write('#undef %s\n\n' % k)
elif isinstance(v, (int, str)):
ofile.write('#define %s %s\n\n' % (k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
try:
if open(dst, 'r').read() == open(dst_tmp, 'r').read():
os.unlink(dst_tmp)
return
except FileNotFoundError:
pass
os.replace(dst_tmp, dst)
def stringlistify(item):
if isinstance(item, str):
item = [item]
if not isinstance(item, list):
raise MesonException('Item is not an array')
for i in item:
if not isinstance(i, str):
raise MesonException('List item not a string.')
return item
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
| apache-2.0 | 8,285,344,205,178,610,000 | 31.191489 | 152 | 0.575961 | false |
Royce/GammaJS | support/APIgen/generate.py | 1 | 4851 | ########################
###
### IMPORTS
###
########################
########################
### BUILTIN
########################
from datetime import date
import codecs
import shutil
import time
import sys
import re
import os
########################
### LOCAL
########################
from utility import mkdir
from containers import *
from const import *
########################
### LOGGING
########################
import logging, logging.config
try:
logging.config.fileConfig(os.path.join(sys.path[0], LOGCONFIG))
except:
pass
log = logging.getLogger('parser.generate')
here = os.sep.join(__file__.split(os.sep)[:-1])
########################
###
### TEMPLATES
###
########################
def createPage(filename, templates, context):
from django.template import Context, loader
t = loader.select_template(templates)
f = open(filename, "w")
log.info("Creating page %s" % filename)
f.write(t.render(Context(context)))
########################
###
### GENERATOR
###
########################
class Generator(object):
def __init__(self
, outDir = os.path.join(here, "docs")
, tempdir = os.path.join(here, "tmp")
, assetDirs = None
, showPrivate = False
, templateDirs = None
):
self.outDir = os.path.abspath(outDir)
self.tempDir = os.path.abspath(tempdir)
self.assetDirs = []
self.showPrivate = showPrivate
self.templateDirs = templateDirs
if not self.templateDirs:
self.templateDirs = [os.path.join(here, "templates"), ""]
for new, onKls in [(templateDirs, self.templateDirs), (assetDirs, self.assetDirs)]:
if new:
if type(new) in (str, unicode):
new = (new, )
for directory in new:
directory = os.path.abspath(directory)
if os.path.exists(directory) and directory not in onKls:
onKls.append(directory)
########################
### UTILITY
########################
def createPage(self, information, filename, templates, **context):
context['information'] = information
filename = os.path.join(self.outDir, filename)
if type(templates) in (str, unicode):
templates = (templates, )
createPage(filename, templates, context)
########################
### PROCESS
########################
def process(self, information):
# Setup django for templates
from django.conf import settings
settings.configure(
TEMPLATE_DIRS=self.templateDirs,
INSTALLED_APPS = ('APIgen.tags', )
)
# Reset temp dir
if os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
# Make sure we have out and temp directories
mkdir(self.outDir)
mkdir(self.tempDir)
# Copy assets to output
for directory in self.assetDirs:
shutil.copytree(directory, self.tempDir, ignore=shutil.ignore_patterns(IGNORE_PATTERNS))
log.info("\n---------------------GENERATING------------------------\n")
for module in information[MODULES].values():
self.gen_module(information, module)
log.info("\n---------------------DONE------------------------\n")
def gen_module(self, information, module):
moduleName = module[NAME]
self.createPage(
information
, "%s.txt" % moduleName
, [ os.sep.join(['modules', moduleName, 'module.rst'])
, 'module.rst'
]
, module = module
, current = module
, fullname = moduleName
)
moduleDir = os.path.join(self.outDir, moduleName)
mkdir(moduleDir)
for kls in module[CLASS_LIST]:
klsName = kls[NAME]
fullName = "%s.%s" % (moduleName, klsName)
if moduleName == klsName:
fullName = klsName
self.createPage(
information
, os.sep.join([moduleName, "%s.txt" % klsName])
, [ os.sep.join(["classes", "%s.rst" % klsName])
, os.sep.join(["classes", moduleName, "%s.rst" % klsName])
, os.sep.join(["modules", moduleName, "class.rst"])
, os.sep.join(["modules", moduleName, "classes", "%s.rst" % klsName])
, "class.rst"
]
, module = module
, current = kls
, fullname = fullName
)
| mit | -6,461,517,144,902,101,000 | 27.875 | 100 | 0.472686 | false |
brianr747/sfc_gui | sfc_gui/install_examples.py | 1 | 2146 | # coding=utf-8
"""
install_examples.py
Dialogs that installs scripts to a desired directory. Simple front end to
sfc_models.examples.install_example_scripts
Migrated to sfc_models.examples
License/Disclaimer
------------------
Copyright 2017 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
import Tkinter as tk
from Tkinter import *
import Tkinter.messagebox as mbox
import Tkinter.filedialog as fdog
else:
import tkinter as tk
from tkinter import *
import tkinter.messagebox as mbox
import tkinter.filedialog as fdog
from sfc_models.examples import install_example_scripts
validate_str = """
This command will install sfc_models to a directory that you specify. It will also create a sub-directory named "output" (which is where log files are directed).
It will not overwrite existing files; it is recommended that you clear out your local copy of the examples directory before installing an updated examples set.
"""
def install_examples():
"""
Pops up windows to allow the user to choose a directory for installation
of sfc_models examples.
Uses tkinter, which is installed in base Python (modern versions).
:return:
"""
if not mbox.askokcancel(title='sfc_models Example Installation',
message=validate_str):
return
target = fdog.askdirectory(title='Choose directory to for sfc_models examples installation')
if target == () or target == '':
return
install_example_scripts.install(target)
if __name__ == '__main__':
install_examples()
| apache-2.0 | 6,469,923,128,646,511,000 | 30.101449 | 161 | 0.72973 | false |
naturalmessage/natmsgshardbig | monitor.py | 1 | 18662 | #!/usr/local/bin/python3
#
###############################################################################
# Copyright 2015 Natural Message, LLC.
# Author: Robert Hoot ([email protected])
#
# This file is part of the Natural Message Shard Server.
#
# The Natural Message Shard Server is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Natural Message Shard Server is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Natural Message Shard Server. If not,
# see <http://www.gnu.org/licenses/>.
###############################################################################
# This will eventually monitor system resources
# and send the info to a database.
# To Do: add lnstat info. track number of connections.
import configparser
import datetime
import json
import psycopg2
import re
import shardfunc_cp as shardfuncs
import subprocess
import sys
CONFIG_FNAME = '/var/natmsg/conf/housekeeping_shardsvr.conf'
MAIN_CONFIG = None
# The psql user names are in lower case
HOSTNAME = ''
DBNAME = ''
UNAME = 'shardwebserver'
PW = ''
CONN_STR = ''
MON_FILE_LIST = []
datestamp = datetime.datetime.now()
datestamp_sql = "'" + str(datestamp.year) + '-' \
+ str(datestamp.month).zfill(2) \
+ '-' + str(datestamp.day).zfill(2) + ' ' \
+ str(datestamp.hour).zfill(2) + ':' \
+ str(datestamp.minute).zfill(2) + ':' \
+ str(datestamp.second).zfill(2) + "'::timestamp "
print('datestamp: ' + datestamp_sql)
###############################################################################
def mon_file(fname):
"""Monitor some file statistics."""
global MON_FILE_LIST
file_info = []
rc = 0
out = {}
cmd_lst = [
'stat',
'-c',
'{"%n": {"inode": %i, "access_time": %X, '
+ '"mod_time": %Y, "change_time": %Z, "file_type": %T }}',
fname]
p = None
p = subprocess.Popen(cmd_lst, stdout=subprocess.PIPE)
if p is None:
out.update({'Error': 'Subprocess for stat command failed.'})
rc = 12
return((rc, {"mon_file": out}))
try:
rslt = p.communicate()[0].decode('utf-8')
except Exception:
out.update(
{
'Error': 'Failed to initiate '
+ 'the process for the stat command.'})
rc = 12
return((rc, {"mon_file": out}))
try:
file_info_json = json.loads(rslt)
except Exception:
print('Error executing the subprocess command for stat.')
print('Filename was ' + fname)
return(12)
out.update(file_info_json)
return((rc, {"mon_file": out}))
# #
# # The file layout for 'cpu' lines in /proc/stat (CentOS 6)
# # * user: normal processes executing in user mode
# # * nice: niced processes executing in user mode
# # * system: processes executing in kernel mode
# # * idle: twiddling thumbs
# # * iowait: waiting for I/O to complete
# # * irq: servicing interrupts
# # * softirq: servicing softirqs
#
#
def nstat():
"""Run the nstat program and log some information
This will collect information about input and output
bandwidth, TCP connections, and number of requests.
"""
rc = 0
out = {}
# CentOS 6 does not support -j for nstat
cmd_lst = ['nstat', '-az']
p = None
p = subprocess.Popen(cmd_lst, stdout=subprocess.PIPE)
if p is None:
out.update({'Error': 'Subprocess for stat command failed.'})
rc = 12
return((rc, {"nstat": out}))
try:
rslt = p.communicate()[0].decode('utf-8')
except Exception:
out.update(
{
'Error': 'Failed to initiate the process for '
+ 'the nstat command.'})
rc = 12
return((rc, {"nstat": out}))
# loop through the nstat output,
# grab desired values,
# build a python dictionary object.
# first compress multiple spaces into one space:
s = re.sub(r'[ ]{1,50}', ' ', rslt.rstrip('\n'))
# split into lines
dat = s.split('\n')
s = None
for l in dat:
# for each input line from nstat:
flds = l.split()
if flds[0] in (
'IpExtInOctets',
'IpExtOutOctets',
'IpInReceives',
'TcpActiveOpens',
'TcpPassiveOpens',
'IpOutRequests'):
# I found a key value of interest, so save the info
out.update({flds[0]: flds[1]})
return((rc, {"nstat": out}))
###############################################################################
def ps():
"""Log information about running processes"""
rc = 0
out = {}
cmd_lst = ['ps', '-A', '--format', 'uid,pid,ppid,time,cmd']
p = None
p = subprocess.Popen(cmd_lst, stdout=subprocess.PIPE)
if p is None:
out.update({'Error': 'Subprocess for ps command failed.'})
rc = 12
return((rc, {"ps": out}))
try:
rslt = p.communicate()[0].decode('utf-8')
except Exception:
out.update(
{
'Error': 'Failed to initiate the '
+ 'process for the ps command.'})
rc = 12
return((rc, {"ps": out}))
# loop through the ps output,
# grab desired values,
# build a python dictionary object.
# first compress multiple spaces into one space:
s = re.sub(r'[ ]{1,50}', ' ', rslt.rstrip('\n'))
# split into lines
dat = s.split('\n')
s = None
past_header = False
for l in dat:
if past_header:
# for each input line from ps:
# uid,pid,ppid,time,cmd
flds = l.split()
my_cmd = re.sub(r'[\'"\r\t\n]', '', flds[4][0:200])
my_parms = ''
if len(flds) > 5:
# The next line takes the list of program parmaters
# that appear in teh extended ps listing, and retains
# only the essential chars that could not cause sql injection
# or other problems.
my_parms = re.sub(
r'[^a-zA-Z \t0-9]',
'',
' '.join(flds[5:])[0:200])
out.update(
{
flds[1]: {
'uid': flds[0],
'ppid': flds[2],
'time': flds[3],
'cmd': my_cmd,
'parms': my_parms}})
else:
past_header = True
pass # skip the first row of data -- it is the ps output header.
return((rc, {"ps": out}))
###############################################################################
def vmstat():
"""Log vmstat information about memory and CPU."""
rc = 0
out = {}
cmd_lst = ['vmstat', '-S', 'K', '-s']
p = None
p = subprocess.Popen(cmd_lst, stdout=subprocess.PIPE)
if p is None:
out.update({'Error': 'Subprocess for vmstat command failed.'})
rc = 12
return((rc, {"vmstat": out}))
try:
rslt = p.communicate()[0].decode('utf-8')
except Exception:
out.update({'Error': 'Failed to initiate the process '
+ 'for the vmstat command.'})
rc = 12
return((rc, {"vmstat": out}))
# loop through the vmstat output,
# grab desired values,
# build a python dictionary object.
# first compress multiple spaces into one space:
s = re.sub(r'[ ]{1,50}', ' ', rslt.rstrip('\n'))
# split into lines
dat = s.split('\n')
s = None
for l in dat:
# for each input line from vmstat:
flds = l.split()
v = int(flds[0])
k = '_'.join(flds[1:])
if k in (
'K_total_memory',
'K_used_memory',
'K_active_memory',
'K_free_memory',
'K_swap_cache',
'K_total_swap',
'K_free_swap',
'non-nice_user_cpu_ticks',
'nice_user_cpu_ticks',
'system_cpu_ticks',
'idle_cpu_ticks',
'IO-wait_cpu_ticks',
'boot_time',
'forks'):
out.update({k: v})
return((rc, {"vmstat": out}))
###############################################################################
def main():
"""Run multiple routines to log system info.
* Run the ps routine to get information about active
processes (and log it to the database),
* Run the symon001 stored procedure to get various
record counts (and log it to the databse),
* Run vmstat and save memory and CPU info,
* Collect and log information of files that have been accessed,
* Collect and log nstat data for network IO.
"""
global CONFIG_FNAME
global MAIN_CONFIG
global DBNAME
global HOSTNAME
global DB_UNAME
global DB_PW
global CONN_STR
global MON_FILE_LIST
out = {}
MAIN_CONFIG = configparser.ConfigParser()
MAIN_CONFIG.read(CONFIG_FNAME)
DBNAME = MAIN_CONFIG['global']['DBNAME']
HOSTNAME = MAIN_CONFIG['global']['HOSTNAME']
DB_UNAME = MAIN_CONFIG['global']['DB_UNAME']
DB_PW = MAIN_CONFIG['global']['DB_PW']
CONN_STR = "host=" + HOSTNAME + " dbname=" + DBNAME + " user=" \
+ DB_UNAME + " password='" + DB_PW + "'"
if 'MON_FILE_LIST' in MAIN_CONFIG['global']:
tmp_list = MAIN_CONFIG['global']['MON_FILE_LIST']
MON_FILE_LIST = tmp_list.split(',')
else:
MON_FILE_LIST= []
if DBNAME == '' or DB_UNAME == '' or DB_PW == '' or HOSTNAME == '':
print('Error, database connection details are missing.')
sys.exti(15)
# -------------------------------------------------------------------------
conn, msg_d = shardfuncs.shard_connect(CONN_STR)
if conn is None:
print(shardfuncs.safe_string(msg_d))
raise RuntimeError(shardfuncs.err_log(110015, 'Failed to '
+ 'make a database connection in '
+ 'nm_db_table_names', extra_msg=msg_d))
cur = conn.cursor()
# -------------------------------------------------------------------------
ps_write_count = 0
rc, msg_d = ps()
try:
# Get the dictionary object from the file monitor:
rslts = msg_d['ps']
except Exception:
print('I did not find a nonexistant key')
if rslts is not None:
for k, v in rslts.items():
# The 'k' values here are numeric values
# for the pid.
# k=1046 v={'ppid': '1', 'uid': '0', 'time': '00:00:26',
# 'cmd': 'SCREEN', 'parms': ''}
cmd = 'INSERT INTO shardsvr.sysmon_ps(' + \
' ppid, uid, time, cmd, parms, sysmon_ps_dt) VALUES(' + \
str(v['ppid']) + ', ' + \
str(v['uid']) + ', ' + '0' + ', ' + \
"'" + str(v['cmd']) + "', '" + str(v['parms']) + "', " + \
datestamp_sql + ');'
rc, msg = shardfuncs.shard_sql_insert(cur, cmd)
if rc != 0:
out.update({'Error': 'SQL insert command failed.'})
out.update({'Error-detail': msg['Error']})
conn.close()
print('ERROR77777: ' + repr(msg))
else:
ps_write_count += 1
# the sql-write loop is done, now commit
cur.execute('commit;')
# do not conn.close() until the end (or on error)
out.update({'status': "OK"})
print('ps write count: ' + str(ps_write_count))
# -------------------------------------------------------------------------
# sysmon001: stored procedure to get table counts
#
rec_counts_write_count = 0
# Run the sysmon001 stored procedure to
# capture a bunch of record counts and save
# them to shardsvr.sysmon_rec_counts:
cmd = 'SELECT shardsvr.sysmon001();'
rc, my_data, msg = shardfuncs.shard_sql_select(cur, cmd)
if rc != 0:
out.update({'Error': 'SQL insert command failed.'})
out.update({'Error-detail': msg['Error']})
conn.close()
print('ERROR111111: ' + repr(msg))
else:
rec_counts_write_count += 1
print('rec_counts_write_count = ' + str(rec_counts_write_count))
# -------------------------------------------------------------------------
# vmstat - collect memory and CPU info
#
rc, msg_d = vmstat()
out.update(msg_d)
rslts = None
vmstat_write_count = 0
try:
# Get the dictionary object from the file monitor:
rslts = msg_d['vmstat']
except Exception:
print('Error. I did not find vmstat output.')
if rslts is not None:
v = rslts
# Note: two table fields have '-' replaced with '_':
# non-nice_user_cpu_ticks and IO-wait_cpu_ticks
cmd = 'INSERT INTO shardsvr.sysmon_vmstat (' \
+ 'K_total_memory, K_used_memory, K_active_memory, ' \
+ 'K_free_memory, K_swap_cache, ' \
+ 'K_total_swap, K_free_swap, ' \
+ 'non_nice_user_cpu_ticks, nice_user_cpu_ticks, ' \
+ 'system_cpu_ticks, idle_cpu_ticks, IO_wait_cpu_ticks, ' \
+ 'boot_time, sysmon_vmstat_dt) ' \
+ 'VALUES (' + str(v['K_total_memory']) + ', ' \
+ str(v['K_used_memory']) + ', ' \
+ str(v['K_active_memory']) + ', ' \
+ str(v['K_free_memory']) + ', ' \
+ str(v['K_swap_cache']) + ', ' \
+ str(v['K_total_swap']) + ', ' \
+ str(v['K_free_swap']) + ', ' \
+ str(v['non-nice_user_cpu_ticks']) + ', ' \
+ str(v['nice_user_cpu_ticks']) + ', ' \
+ str(v['system_cpu_ticks']) + ', ' \
+ str(v['idle_cpu_ticks']) + ', ' \
+ str(v['IO-wait_cpu_ticks']) + ', ' \
+ str(v['boot_time']) + ', ' + datestamp_sql + ');'
rc, msg = shardfuncs.shard_sql_insert(cur, cmd)
if rc != 0:
out.update({'Error': 'SQL insert command failed.'})
out.update({'Error-detail': msg['Error']})
conn.close()
print('ERROR999999: ' + repr(msg))
else:
vmstat_write_count += 1
# the sql-write loop is done, now commit
cur.execute('commit;')
# do not conn.close() until the end (or on error)
out.update({'status': "OK"})
print('vmstat write count: ' + str(vmstat_write_count))
# -------------------------------------------------------------------------
# File Monior
#
# (collect file attributes for specific files)
#
if len(MON_FILE_LIST) > 0:
for fname in MON_FILE_LIST:
rc, msg_d = mon_file(fname)
out.update(msg_d)
rslts = None
try:
# Get the dictionary object from the file monitor:
rslts = msg_d['mon_file']
except Exception:
print('I did not find results from the file_monitor.')
file_write_count = 0
if rslts is not None:
for k, v in rslts.items():
# There could be many files here
fname = re.sub(r'[\'"\r\t\n]', '', k[0:200])
# These are file attributes
# file_type, inode, change_time,
# access_time, mod_time.
cmd = 'INSERT INTO shardsvr.sysmon_file(' \
'file_name, file_type, ' \
'inode, chg_time, access_time, ' \
'mod_time, sysmon_file_dt ) ' \
'VALUES(' + "'" + fname + "', " \
+ str(v['file_type']) + ', ' \
+ str(v['inode']) + ', ' \
+ str(v['change_time']) + ', ' \
+ str(v['access_time']) + ', ' \
+ str(v['mod_time']) + ', ' \
+ datestamp_sql + ');'
rc, msg = shardfuncs.shard_sql_insert(cur, cmd)
if rc != 0:
out.update({'Error': 'SQL insert command failed.'})
out.update({'Error-detail': msg['Error']})
conn.close()
print('ERROR33333: ' + repr(msg))
else:
file_write_count += 1
# the sql-write loop is done, now commit
cur.execute('commit;')
# do not conn.close() until the end (or on error)
out.update({'status': "OK"})
print('file write count: ' + str(file_write_count))
# -------------------------------------------------------------------------
# nstat - Network IO stats
#
rc, msg_d = nstat()
out.update(msg_d)
try:
# Get the dictionary object from the file monitor:
rslts = msg_d['nstat']
except Exception:
print('I did not find the nstat dictionary key.')
if rslts is not None:
v = rslts
nstat_write_count = 0
cmd = 'INSERT INTO shardsvr.sysmon_nstat(' + \
'IpExtInOctets, IpExtOutOctets, ' + \
'IpInReceives, TcpActiveOpens, TcpPassiveOpens, ' + \
'IpOutRequests, sysmon_nstat_dt) ' + \
'VALUES( ' \
+ str(v['IpExtInOctets']) + ', ' \
+ str(v['IpExtOutOctets']) + ', ' \
+ str(v['IpInReceives']) + ', ' + str(v['TcpActiveOpens']) + ', ' \
+ str(v['TcpPassiveOpens']) + ', ' \
+ str(v['IpOutRequests']) + ', ' \
+ datestamp_sql + ');'
rc, msg = shardfuncs.shard_sql_insert(cur, cmd)
if rc != 0:
out.update({'Error': 'SQL insert command failed.'})
out.update({'Error-detail': msg['Error']})
conn.close()
print('ERROR8888: ' + repr(msg))
else:
nstat_write_count += 1
# the sql-write loop is done, now commit
cur.execute('commit;')
# do not conn.close() until the end (or on error)
out.update({'status': "OK"})
print('nstat write count: ' + str(nstat_write_count))
if __name__ == '__main__':
main()
| gpl-3.0 | 1,121,881,773,405,645,200 | 32.869328 | 79 | 0.4873 | false |
openstack/ceilometer | ceilometer/agent.py | 1 | 3822 | #
# Copyright 2013 Intel Corp.
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pkg_resources
from oslo_log import log
from oslo_utils import fnmatch
import yaml
LOG = log.getLogger(__name__)
class ConfigException(Exception):
def __init__(self, cfg_type, message, cfg):
self.cfg_type = cfg_type
self.msg = message
self.cfg = cfg
def __str__(self):
return '%s %s: %s' % (self.cfg_type, self.cfg, self.msg)
class SourceException(Exception):
def __init__(self, message, cfg):
self.msg = message
self.cfg = cfg
def __str__(self):
return 'Source definition invalid: %s (%s)' % (self.msg, self.cfg)
class ConfigManagerBase(object):
"""Base class for managing configuration file refresh"""
def __init__(self, conf):
self.conf = conf
def load_config(self, cfg_file):
"""Load a configuration file and set its refresh values."""
if os.path.exists(cfg_file):
cfg_loc = cfg_file
else:
cfg_loc = self.conf.find_file(cfg_file)
if not cfg_loc:
LOG.debug("No pipeline definitions configuration file found! "
"Using default config.")
cfg_loc = pkg_resources.resource_filename(
__name__, 'pipeline/data/' + cfg_file)
with open(cfg_loc) as fap:
conf = yaml.safe_load(fap)
LOG.debug("Config file: %s", conf)
return conf
class Source(object):
"""Represents a generic source"""
def __init__(self, cfg):
self.cfg = cfg
try:
self.name = cfg['name']
except KeyError as err:
raise SourceException(
"Required field %s not specified" % err.args[0], cfg)
def __str__(self):
return self.name
def check_source_filtering(self, data, d_type):
"""Source data rules checking
- At least one meaningful datapoint exist
- Included type and excluded type can't co-exist on the same pipeline
- Included type meter and wildcard can't co-exist at same pipeline
"""
if not data:
raise SourceException('No %s specified' % d_type, self.cfg)
if (any(x for x in data if x[0] not in '!*') and
any(x for x in data if x[0] == '!')):
raise SourceException(
'Both included and excluded %s specified' % d_type,
self.cfg)
if '*' in data and any(x for x in data if x[0] not in '!*'):
raise SourceException(
'Included %s specified with wildcard' % d_type,
self.cfg)
@staticmethod
def is_supported(dataset, data_name):
# Support wildcard like storage.* and !disk.*
# Start with negation, we consider that the order is deny, allow
if any(fnmatch.fnmatch(data_name, datapoint[1:])
for datapoint in dataset if datapoint[0] == '!'):
return False
if any(fnmatch.fnmatch(data_name, datapoint)
for datapoint in dataset if datapoint[0] != '!'):
return True
# if we only have negation, we suppose the default is allow
return all(datapoint.startswith('!') for datapoint in dataset)
| apache-2.0 | 2,127,641,988,269,164,500 | 31.948276 | 78 | 0.601256 | false |
cclib/cclib | test/data/testScan.py | 1 | 7204 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test scan logfiles in cclib"""
import os
import unittest
import numpy
import cclib
from skip import skipForParser
__filedir__ = os.path.realpath(os.path.dirname(__file__))
OPT_DONE = cclib.parser.data.ccData.OPT_DONE
OPT_NEW = cclib.parser.data.ccData.OPT_NEW
class GenericScanTestBase(unittest.TestCase):
"""Base potential energy surface scan unittest."""
def assertOptNew(self, optstatus_value):
return optstatus_value & OPT_NEW == OPT_NEW
def assertOptDone(self, optstatus_value):
return optstatus_value & OPT_DONE == OPT_DONE
class GenericRelaxedScanTest_optdone_bool(GenericScanTestBase):
"""Generic relaxed potential energy surface scan unittest."""
datatype = cclib.parser.data.ccData_optdone_bool
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
def testoptdone(self):
"""Is the optimization finished?"""
self.assertIsInstance(self.data.optdone, bool)
self.assertEqual(self.data.optdone, True)
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
@skipForParser('Molcas','The parser is still being developed so we skip this test')
def testindices(self):
"""Do the indices match the results from geovalues."""
assert self.data.optdone and numpy.all(self.data.geovalues[-1] <= self.data.geotargets)
@skipForParser("Jaguar", "Not implemented")
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser("ORCA", "Not implemented")
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testoptstatus(self):
"""Does optstatus contain expected values?"""
# The input and final coordinates were at a stationary points.
self.assertOptNew(self.data.optstatus[0])
self.assertOptDone(self.data.optstatus[0])
self.assertOptDone(self.data.optstatus[-1])
class GenericUnrelaxedScanTest(GenericScanTestBase):
"""Generic unrelaxed potential energy surface scan unittest."""
# extra indices
extra = 0
@skipForParser("Jaguar", "Not implemented")
def testscannames(self):
self.assertIsInstance(self.data.scannames, list)
@skipForParser("ORCA", "Not implemented")
@skipForParser("Jaguar", "Not implemented")
def testscanenergies(self):
self.assertIsInstance(self.data.scanenergies, list)
# This checks the order of magnitude, and unit conversion if nothing else.
numpy.testing.assert_array_less(numpy.array(self.data.scanenergies), -10000)
@skipForParser("ORCA", "Not implemented")
@skipForParser("Jaguar", "Not implemented")
def testscanparm(self):
self.assertIsInstance(self.data.scanparm, list)
# Each parameters should have as many values as there are scan
# energies, or optimized point on the PES.
for parm in self.data.scanparm:
self.assertEqual(len(parm), len(self.data.scanenergies))
class GenericRelaxedScanTest(GenericUnrelaxedScanTest):
"""Generic relaxed potential energy surface scan unittest."""
# extra indices
extra = 0
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testnumindices(self):
"""Do the number of indices match number of scan points."""
self.assertEqual(len(self.data.optdone), 12 + self.extra)
@skipForParser("Jaguar", "Does not work as expected")
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser("ORCA", "Does not work as expected")
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testindices(self):
"""Do the indices match the results from geovalues."""
indexes = self.data.optdone
geovalues_from_index = self.data.geovalues[indexes]
temp = numpy.all(self.data.geovalues <= self.data.geotargets, axis=1)
geovalues = self.data.geovalues[temp]
numpy.testing.assert_array_equal(geovalues, geovalues_from_index)
@skipForParser("Jaguar", "Not implemented")
@skipForParser('Molcas','The parser is still being developed so we skip this test')
@skipForParser("ORCA", "Not implemented")
@skipForParser('Turbomole','The parser is still being developed so we skip this test')
def testoptstatus(self):
"""Does optstatus contain expected values?"""
OPT_NEW = self.data.OPT_NEW
OPT_DONE = self.data.OPT_DONE
# The input coordinates were at a stationary point.
self.assertOptDone(self.data.optstatus[0])
self.assertEqual(len(self.data.converged_geometries), len(self.data.optdone))
for idone in self.data.optdone:
self.assertOptDone(self.data.optstatus[idone])
if idone != len(self.data.optstatus) - 1:
self.assertOptNew(self.data.optstatus[idone+1])
@skipForParser("Jaguar", "Not implemented")
def testscannames(self):
self.assertIsInstance(self.data.scannames, list)
@skipForParser("Jaguar", "Not implemented")
@skipForParser("ORCA", "Not implemented")
def testscanenergies(self):
self.assertIsInstance(self.data.scanenergies, list)
# This checks the order of magnitude, and unit conversion if nothing else.
numpy.testing.assert_array_less(numpy.array(self.data.scanenergies), -10000)
@skipForParser("Jaguar", "Not implemented")
@skipForParser("ORCA", "Not implemented")
def testscanparm(self):
self.assertIsInstance(self.data.scanparm, list)
# Each parameters should have as many values as there are scan
# energies, or optimized point on the PES.
for parm in self.data.scanparm:
self.assertEqual(len(parm), len(self.data.scanenergies))
class GaussianUnrelaxedScanTest(GenericUnrelaxedScanTest):
"""Customized unrelaxed potential energy surface scan unittest"""
extra = 1
class GaussianRelaxedScanTest(GenericRelaxedScanTest):
"""Customized relaxed potential energy surface scan unittest"""
extra = 1
class JaguarRelaxedScanTest(GenericRelaxedScanTest):
"""Customized relaxed potential energy surface scan unittest"""
extra = 1
class OrcaRelaxedScanTest(GenericRelaxedScanTest):
"""Customized relaxed potential energy surface scan unittest"""
extra = 1
if __name__=="__main__":
import sys
sys.path.insert(1, os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['Scan'])
suite.testall()
| bsd-3-clause | 615,060,083,290,310,300 | 36.940541 | 95 | 0.678651 | false |
denversc/cligen | tests/unittests/test_argspec.py | 1 | 8482 | # Copyright 2015 Denver Coneybeare <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from cligen.argspec import ArgumentParserSpec
class Test_ArgumentSpecParser(unittest.TestCase):
DEFAULT_VALUE = object()
def test___init___PositionalArgs(self):
arguments = object()
help_argument = object()
x = ArgumentParserSpec(arguments, help_argument)
self.assertIs(arguments, x.arguments)
self.assertIs(help_argument, x.help_argument)
def test___init___KeywordArgs(self):
arguments = object()
help_argument = object()
x = ArgumentParserSpec(arguments=arguments, help_argument=help_argument)
self.assertIs(arguments, x.arguments)
self.assertIs(help_argument, x.help_argument)
def test___eq___Equal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
self.assertTrue(x1 == x2)
def test___eq___arguments_Missing(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
del x2.arguments
self.assertFalse(x1 == x2)
def test___eq___arguments_Unequal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec(arguments=[])
self.assertFalse(x1 == x2)
def test___eq___help_argument_Missing(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
del x2.help_argument
self.assertFalse(x1 == x2)
def test___eq___help_argument_Unequal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec(help_argument=object())
self.assertFalse(x1 == x2)
def test___ne___Equal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
self.assertFalse(x1 != x2)
def test___ne___arguments_Missing(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
del x2.arguments
self.assertTrue(x1 != x2)
def test___ne___arguments_Unequal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec(arguments=[])
self.assertTrue(x1 != x2)
def test___ne___help_argument_Missing(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec()
del x2.help_argument
self.assertTrue(x1 != x2)
def test___ne___help_argument_Unequal(self):
x1 = self.new_ArgumentParserSpec()
x2 = self.new_ArgumentParserSpec(help_argument=object())
self.assertTrue(x1 != x2)
def new_ArgumentParserSpec(self, arguments=DEFAULT_VALUE, help_argument=DEFAULT_VALUE):
if arguments is self.DEFAULT_VALUE:
input_file_argument = ArgumentParserSpec.Argument(
keys=["-i", "--input-file"],
type=ArgumentParserSpec.Argument.TYPE_STRING_VALUE,
help_text="The input file",
)
output_file_argument = ArgumentParserSpec.Argument(
keys=["-o", "--output-file"],
type=ArgumentParserSpec.Argument.TYPE_STRING_VALUE,
help_text="The output file",
)
arguments = [input_file_argument, output_file_argument]
if help_argument is self.DEFAULT_VALUE:
help_argument = ArgumentParserSpec.Argument(
keys=["-h", "--help"],
type=ArgumentParserSpec.Argument.TYPE_BUILTIN_HELP,
help_text="Show the help screen then exit",
)
if help_argument is not None:
arguments.append(help_argument)
return ArgumentParserSpec(
arguments=arguments,
help_argument=help_argument,
)
class Test_ArgumentParserSpec_Argument(unittest.TestCase):
def test___init___PositionalArgs(self):
keys = object()
type = object()
help_text = object()
x = ArgumentParserSpec.Argument(keys, type, help_text)
self.assertIs(keys, x.keys)
self.assertIs(type, x.type)
self.assertIs(help_text, x.help_text)
def test___init___KeywordArgs(self):
keys = object()
type = object()
help_text = object()
x = ArgumentParserSpec.Argument(keys=keys, type=type, help_text=help_text)
self.assertIs(keys, x.keys)
self.assertIs(type, x.type)
self.assertIs(help_text, x.help_text)
def test___eq___Equal(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
self.assertTrue(x1 == x2)
def test___eq___keys_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.keys
self.assertFalse(x1 == x2)
def test___eq___keys_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(keys=[])
self.assertFalse(x1 == x2)
def test___eq___type_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.type
self.assertFalse(x1 == x2)
def test___eq___type_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(type=[])
self.assertFalse(x1 == x2)
def test___eq___help_text_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.help_text
self.assertFalse(x1 == x2)
def test___eq___help_text_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(help_text="blah blah")
self.assertFalse(x1 == x2)
def test___ne___Equal(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
self.assertFalse(x1 != x2)
def test___ne___keys_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.keys
self.assertTrue(x1 != x2)
def test___ne___keys_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(keys=[])
self.assertTrue(x1 != x2)
def test___ne___type_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.type
self.assertTrue(x1 != x2)
def test___ne___type_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(type=[])
self.assertTrue(x1 != x2)
def test___ne___help_text_Missing(self):
x1 = self.new_Argument()
x2 = self.new_Argument()
del x2.help_text
self.assertTrue(x1 != x2)
def test___ne___help_text_Unequal(self):
x1 = self.new_Argument()
x2 = self.new_Argument(help_text="blah blah")
self.assertTrue(x1 != x2)
def test___str___keys_Length0(self):
x = self.new_Argument(keys=[])
self.assertEqual("", "{}".format(x))
def test___str___keys_Length1(self):
x = self.new_Argument(keys=["-n"])
self.assertEqual("-n", "{}".format(x))
def test___str___keys_Length2(self):
x = self.new_Argument(keys=["-n", "--name"])
self.assertEqual("-n/--name", "{}".format(x))
def test___repr___(self):
keys = ["keys"]
type = "the type"
help_text = "help_text"
x = self.new_Argument(keys=keys, type=type, help_text=help_text)
expected = "Argument(keys={keys!r}, type={type!r}, help_text={help_text!r})".format(
keys=keys,
type=type,
help_text=help_text,
)
self.assertEqual(expected, "{!r}".format(x))
def new_Argument(self, keys=None, type=None, help_text=None):
if keys is None:
keys = ["-o", "--output-file"]
if type is None:
type = ArgumentParserSpec.Argument.TYPE_STRING_VALUE
return ArgumentParserSpec.Argument(
keys=keys,
type=type,
help_text=help_text,
)
| gpl-3.0 | -6,773,141,248,986,947,000 | 32.393701 | 92 | 0.592431 | false |
CreditEaseDBA/Themis | rule_analysis/libs/text/sql_text.py | 1 | 4090 | # -*- coding: utf-8 -*-
import sqlparse
class SqlText(object):
def __init__(self, mongo_client, start_date,
stop_date, schema, hostname, db_client=None):
self.db_client = db_client
self.mongo_client = mongo_client
self.start_date = start_date
self.stop_date = stop_date
self.schema = schema
self.hostname = hostname
def get_mysql_text(self):
"""
获取mysql慢sql的文本,从pt-query-digest存储的结果里获取
"""
sql = """
SELECT conv(checksum,10,16) AS `checksum`,
fact.sample AS `sample`,
ROUND(SUM(Rows_examined_sum)/SUM(rows_sent_sum),2) AS `index_ratio`,
SUM(Query_time_sum) / SUM(ts_cnt) AS `query_time_avg`,
ROUND(SUM(Rows_sent_sum)/SUM(ts_cnt),0) AS `rows_sent_avg`,
SUM(ts_cnt) AS `ts_cnt`
FROM `global_query_review` AS `fact`
JOIN `global_query_review_history` AS `dimension` USING (`checksum`)
WHERE dimension.ts_min >= '{start_date}'
AND dimension.ts_min <= '{stop_date}'
AND db_max='{schema}'
AND hostname_max='{hostname}'
GROUP BY checksum
ORDER BY Query_time_sum DESC LIMIT 1000 ;
"""
sql = sql.format(
start_date=self.start_date,
stop_date=self.stop_date,
schema=self.schema,
hostname=self.hostname
)
self.db_client.cursor.execute(sql)
result = self.db_client.cursor.fetchall()
sql_list = []
for i in result:
sql_format = sqlparse.format(i[1], strip_whitespace=True).lower()
sql_list.append({
"checksum": i[0],
"sqltext_form": sql_format,
"sqltext_org": i[1],
"sqlstats": [{
"index_ratio": i[2],
"query_time_avg": i[3],
"rows_sent_avg": i[4],
"ts_cnt": i[5]
}]
})
return sql_list
def get_oracle_text(self):
"""
获取oracle的文本
"""
sql = {
"USERNAME": self.schema,
"IPADDR": self.hostname,
"ETL_DATE": {
"$gte": self.start_date,
"$lte": self.stop_date
}
}
condition = {"SQL_ID": 1, "SQL_TEXT_DETAIL": 1}
records = self.mongo_client.find("sqltext", sql, condition)
sql_id_list = []
sql_list = []
sql_id_list = []
for i in records:
if i["SQL_ID"] not in sql_id_list:
sqlstat_list = []
sql_format = sqlparse.format(
i["SQL_TEXT_DETAIL"], strip_whitespace=True).lower()
for j in self.get_stat(i["SQL_ID"]):
sqlstat_list.append(j)
sql_list.append({
"checksum": i["SQL_ID"],
"sqltext_form": sql_format,
"sqltext_org": i["SQL_TEXT_DETAIL"],
"sqlstats": sqlstat_list
})
sql_id_list.append(i["SQL_ID"])
return sql_list
def get_text(self, db_type):
if db_type == "mysql":
return self.get_mysql_text()
elif db_type == "O":
return self.get_oracle_text()
def get_stat(self, sql_id):
sql = {
"SQL_ID": sql_id,
"USERNAME": self.schema,
"ETL_DATE": {
"$gte": self.start_date,
"$lte": self.stop_date
}
}
condition = {
"ETL_DATE": 1,
"PLAN_HASH_VALUE": 1,
"BUFFER_GETS": 1,
"CPU_TIME": 1,
"PER_ELAPSED_TIME": 1,
"PER_DISK_READS": 1,
"PER_BUFFER_GETS": 1,
"EXECUTIONS": 1,
"ELAPSED_TIME": 1,
"DISK_READS": 1,
"PER_CPU_TIME": 1
}
result = self.mongo_client.find("sqlstat", sql, condition)
return result
| apache-2.0 | -1,347,671,995,002,225,200 | 32.180328 | 79 | 0.465662 | false |
roderickmackenzie/gpvdm | gpvdm_gui/gui/dump_select.py | 1 | 9125 | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package dump_select
# Widget to select the files which should be dumped.
#
import os
import fnmatch
import i18n
_ = i18n.language.gettext
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QTableWidget,QAbstractItemView,QTreeWidgetItemIterator, QTreeWidget, QPushButton, QHBoxLayout, QTreeWidgetItem
from PyQt5.QtGui import QPainter,QIcon
from PyQt5.QtGui import QFont
from inp import inp_save
from inp import inp_load_file
from icon_lib import icon_get
from error_dlg import error_dlg
from str2bool import str2bool
class dump_select(QWidget):
def __init__(self):
QWidget.__init__(self)
self.main_vbox=QVBoxLayout()
self.save_function=None
self.setWindowIcon(icon_get("scan"))
self.setWindowTitle(_("Dump File Selector")+" (https://www.gpvdm.com)")
self.tab = QTreeWidget()
self.tab.header().hide()
#self.tab.setHeaderItem("Scan items")
self.font = QFont()
# self.font.setFamily('DejaVu Sans')
# self.font.setBold(True)
# self.font.setStyleHint(QFont.Monospace)
# self.font.setFixedPitch(True)
self.font.setPointSize(int(20))
self.tab.setFont(self.font)
toolbar=QToolBar()
toolbar.setIconSize(QSize(32, 32))
self.refresh = QAction(icon_get("view-refresh"), _("Refresh"), self)
self.refresh.triggered.connect(self.callback_refresh)
toolbar.addAction(self.refresh)
self.main_vbox.addWidget(toolbar)
self.main_vbox.addWidget(self.tab)
self.setLayout(self.main_vbox)
#okButton.clicked.connect(self.tree_apply_click)
#cancelButton.clicked.connect(self.close)
#self.tab.header().close()
self.update()
self.save()
self.tab.itemChanged.connect(self.callback_tree_checked)
self.tab.itemClicked.connect(self.tree_callback_clicked)
return
def callback_tree_checked(self):
self.save()
def wildcard_replace(self,name):
if fnmatch.fnmatch(name, "light_1d_*_n.dat")==True:
name="light_1d_*_n.dat"
if fnmatch.fnmatch(name, "light_1d_*_r.dat")==True:
name="light_1d_*_r.dat"
if fnmatch.fnmatch(name, "light_1d_*_E.dat")==True:
name="light_1d_*_E.dat"
if fnmatch.fnmatch(name, "light_1d_*_alpha.dat")==True:
name="light_1d_*_alpha.dat"
if fnmatch.fnmatch(name, "light_1d_*_photons.dat")==True:
name="light_1d_*_photons.dat"
if fnmatch.fnmatch(name, "light_1d_*_photons_norm.dat")==True:
name="light_1d_*_photons_norm.dat"
if fnmatch.fnmatch(name, "light_1d_*_alpha.dat")==True:
name="light_1d_*_alpha.dat"
if fnmatch.fnmatch(name, "light_1d_*_E_tot.dat")==True:
name="light_1d_*_E_tot.dat"
if fnmatch.fnmatch(name, "light_1d_*_E.dat")==True:
name="light_1d_*_E.dat"
if fnmatch.fnmatch(name, "light_1d_*_alpha.dat")==True:
name="light_1d_*_alpha.dat"
if fnmatch.fnmatch(name, "light_1d_*_pointing.dat")==True:
name="light_1d_*_pointing.dat"
if fnmatch.fnmatch(name, "light_1d_*_Ep.dat")==True:
name="light_1d_*_Ep.dat"
if fnmatch.fnmatch(name, "light_1d_*_En.dat")==True:
name="light_1d_*_En.dat"
if fnmatch.fnmatch(name, "light_1d_*_t.dat")==True:
name="light_1d_*_t.dat"
if fnmatch.fnmatch(name, "light_1d_*_layer.dat")==True:
name="light_1d_*_layer.dat"
if fnmatch.fnmatch(name, "light_1d_*_photons_abs.dat")==True:
name="light_1d_*_photons_abs.dat"
if fnmatch.fnmatch(name, "stark_spectrum*.dat")==True:
name="stark_spectrum*.dat"
return name
def make_entry(self,root,text,true_false):
depth=0
pointer=root
for depth in range(0,len(text)):
found=False
for i in range(0,pointer.childCount()):
if pointer.child(i).text(0)==text[depth]:
found=True
pointer=pointer.child(i)
break
if found==False:
pointer=QTreeWidgetItem(pointer, [text[depth]])
pointer.setFlags(pointer.flags() | Qt.ItemIsUserCheckable)
#if depth==0:
# pointer.setIcon(0,icon_get("folder"))
if true_false==True:
pointer.setCheckState(0, Qt.Checked )
else:
pointer.setCheckState(0, Qt.Unchecked )
def scan_dir(self,directory):
outout_files=[]
true_false_list=[]
for root, dirs, files in os.walk(directory):
for name in files:
if name.endswith(".dat")==True:
name=self.wildcard_replace(name)
full_name=directory+"/"+name
if outout_files.count(full_name)==0:
outout_files.append(full_name)
true_false_list.append(True)
#print(os.path.join(root, name))
return outout_files,true_false_list
def scan_root_dir(self):
outout_files=[]
true_false_list=[]
for name in os.listdir():
if name.endswith(".dat")==True:
name=self.wildcard_replace(name)
full_name="root/"+name
if outout_files.count(full_name)==0:
outout_files.append(full_name)
true_false_list.append(True)
#print(os.path.join(root, name))
return outout_files,true_false_list
def set_state(self,value):
self.tab.blockSignals(True)
iterator = QTreeWidgetItemIterator(self.tab)
while iterator.value():
item = iterator.value()
path = []
while item is not None:
path.append(str(item.text(0)))
item = item.parent()
path="/".join(reversed(path))
path=path.split('/')
if len(path)>1:
path=path[1:]
if path[0]==value:
item = iterator.value()
if len(path)==1:
checked=item.checkState(0)
#checked=bool(checked)
if len(path)>1:
item.setCheckState(0, checked)
iterator+=1
self.tab.blockSignals(False)
def save(self):
lines=0
count=0
out=[]
iterator = QTreeWidgetItemIterator(self.tab)
while iterator.value():
item = iterator.value()
lines=lines+1
iterator+=1
iterator = QTreeWidgetItemIterator(self.tab)
while iterator.value():
item = iterator.value()
path = []
checked=item.checkState(0)
checked=bool(checked)
while item is not None:
path.append(str(item.text(0)))
item = item.parent()
path="/".join(reversed(path))
path=path.split('/', 1)[-1]
#print(path.encode('utf-8'))
if path.count("/")!=0:
if checked==False:
out.append("#file"+str(count))
out.append(path)
out.append(str(checked))
count=count+1
iterator+=1
out.append("#ver")
out.append("1.0")
out.append("#end")
#if len(out)>10:
inp_save("dump_file.inp",out)
#else:
# print("************Warning dump_file.inp looks too short to me***********")
def from_file(self):
param_list=[]
true_false_list=[]
lines=inp_load_file("dump_file.inp")
if lines!=False:
pos=0
while(1):
token=lines[pos]
pos=pos+1
if token=="#ver":
break
file_name=lines[pos]
pos=pos+1
true_false=lines[pos]
pos=pos+1
true_false_list.append(str2bool(true_false))
param_list.append(file_name)
return param_list,true_false_list
def callback_refresh(self):
self.update()
def update(self):
self.tab.clear()
param_list=[]
tf_list=[]
files,true_false=self.from_file()
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_dir("snapshots")
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_dir("optical_output")
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_dir("dynamic")
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_dir("solver")
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_dir("equilibrium")
param_list.extend(files)
tf_list.extend(true_false)
files,true_false=self.scan_root_dir()
param_list.extend(files)
tf_list.extend(true_false)
root = QTreeWidgetItem(self.tab, [_("Output files")])
root.setExpanded(True)
i=0
for item in range(0, len(param_list)):
div_str=param_list[item].replace("\\", "/")
div_str=div_str.split("/")
piter=None
self.make_entry(root,div_str,tf_list[item])
def on_destroy(self):
self.hide()
return True
def tree_callback_clicked(self, item, column):
path = []
while item is not None:
path.append(str(item.text(0)))
item = item.parent()
if len(path)==2:
self.set_state(path[0])
self.save()
| gpl-2.0 | 6,935,567,816,194,428,000 | 23.076517 | 198 | 0.673315 | false |
rfoxfa/python-utils | utils/plotting.py | 1 | 1798 | """
Plotting functions.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
def hhist(items, title=None, axislabel=None, color=None, height=None, width=None, reverse=False):
"""
Plots a horizontal histogram of values and frequencies.
Arguments:
items (iterable[any]) => A list of objects.
title (Optional[str]) => A title for the resulting histogram.
axislabel (Optional[str]) => A label for the y-axis that lists the unique items in
the parameter list.
color (Optional[str]) => A matplotlib color value for coloring the histogram
(default: matplotlib's default plot color, a royal blue)
height (Optional[int]) => A height for the plot (default: 10)
width (Optional[int]) => A width for the plot (default: 20)
reverse (Optional[bool]) => Whether or not the histogram should plot from top to bottom in
order of decreasing frequency or the reverse of that.
Returns:
Void, but a matplotlib figure should be produced (type=None).
"""
# Parse the unique items and their counts.
unique_items, item_counts = np.unique(items, return_counts=True)
# Sort the items by frequency.
item_counts, unique_items = zip(*sorted(zip(item_counts, unique_items), reverse=reverse))
# Plot the frequencies.
pos = np.arange(len(unique_items)) + 0.5
plt.figure(figsize=((width or 20), (height or 10)))
plt.barh(pos, item_counts, align='center', color=color)
plt.yticks(pos, unique_items)
plt.xlabel('Frequency')
if axislabel:
plt.ylabel(axislabel)
if title:
plt.title(title)
plt.show()
| gpl-2.0 | -2,366,417,125,346,160,000 | 36.458333 | 100 | 0.624583 | false |
lyuboraykov/columba | tests/test_sendgrid_provider.py | 1 | 1887 | #!/usr/bin/env python3
import pytest
from providers.sendgrid import SendGridProvider
from send_error import SendError
import init_test_objects
def test_sendgrid_send_positive():
"""Straightforward sendgrid send test. Will fail if an error is raised by the provider."""
sendgrid_provider = SendGridProvider(init_test_objects.SENDGRID_TEST_AUTHENTICATION,
init_test_objects.SENDGRID_TEST_USERNAME)
test_message = init_test_objects.init_message()
sendgrid_provider.send(test_message)
def test_sendgrid_send_wrong_authentication():
"""The provider should raise an SendError if it gets wrong authentication"""
sendgrid_provider = SendGridProvider(init_test_objects.SENDGRID_TEST_AUTHENTICATION + 's',
init_test_objects.SENDGRID_TEST_USERNAME)
test_message = init_test_objects.init_message()
with pytest.raises(SendError) as send_error:
sendgrid_provider.send(test_message)
assert send_error
def test_sendgrid_send_missing_recipients():
"""The provider should raise a SendError if it has missing recipients field"""
sendgrid_provider = SendGridProvider(init_test_objects.SENDGRID_TEST_AUTHENTICATION + 's',
init_test_objects.SENDGRID_TEST_USERNAME)
test_message = init_test_objects.init_message()
test_message.recipients = ''
with pytest.raises(SendError) as send_error:
sendgrid_provider.send(test_message)
assert send_error
def test_sendgrid_send_missing_sender():
"""The provider should raise a SendError if it has missing sender field"""
sendgrid_provider = SendGridProvider(init_test_objects.SENDGRID_TEST_AUTHENTICATION + 's',
init_test_objects.SENDGRID_TEST_USERNAME)
test_message = init_test_objects.init_message()
test_message.sender = ''
with pytest.raises(SendError) as send_error:
sendgrid_provider.send(test_message)
assert send_error | mit | -6,292,615,231,775,014,000 | 42.906977 | 94 | 0.740859 | false |
declarativitydotnet/p2 | doc/chord.generator.py | 1 | 1284 | #!/usr/bin/python
## This file is distributed under the terms in the attached LICENSE file.
## If you do not find this file, copies can be found by writing to:
## Intel Research Berkeley, 2150 Shattuck Avenue, Suite 1300,
## Berkeley, CA, 94704. Attention: Intel License Inquiry.
## Or
## UC Berkeley EECS Computer Science Division, 387 Soda Hall #1776,
## Berkeley, CA, 94707. Attention: P2 Group.
##
## DESCRIPTION: PlanetLab generator for chord.olg
import sys
import os
import getopt
import random
import sha
if __name__ == "__main__":
overlog = sys.argv[1]
seed = int(sys.argv[2])
node = str(sys.argv[3])
port = str(sys.argv[4])
outputOverlog = str(sys.argv[5])
shortopts = "D:"
env = {}
opts, args = getopt.getopt(sys.argv[6:], shortopts)
for o, v in opts:
if o == "-D":
d = v.split("=", 1)
env[d[0]] = d[1].replace("\"", "\\\"")
envs = ""
for var in env:
envs += "-D" + var + "=" + env[var] + " "
envs += "-DLOCALADDRESS=\\\"%s:%s\\\" " % (node, port)
m = sha.new()
m.update("%s:%s" % (node, port))
nodeid = m.hexdigest()
envs += "-DNODEID=0x%sI " % (nodeid)
command = "cpp -C -P %s %s %s" % (overlog, envs, outputOverlog)
os.system(command)
| bsd-3-clause | -8,504,691,728,586,956,000 | 26.913043 | 73 | 0.573988 | false |
lrnt/aerial | mivb.py | 1 | 7528 | import inspect, asyncio, sys, json
from aiohttp import web
from utils import get_etree
from asyncio_redis import Connection
from collections import Iterable
API_BASE_URL = 'http://m.mivb.be/api/'
API_DEFAULT_PARAMS = {'lang': 'nl'}
API_LINES_URL = API_BASE_URL + 'getlinesnew.php'
API_ROUTE_URL = API_BASE_URL + 'getitinerary.php'
API_STOP_URL = API_BASE_URL + 'getwaitingtimes.php'
def objectify(keys):
# Get all the model types in this module
types = dict(inspect.getmembers(sys.modules[__name__], inspect.isclass))
# Split the keys into typename, name
keys = [x.split(':', 1) for x in keys]
# Lookup and instantiate each object
objects = [types[typename](id) for typename, id in keys]
return objects
class Model(object):
_redis = None
@classmethod
@property
def redis(cls):
return cls._redis
def __init__(self, id):
self.id = id
self.key = '%s:%s' % (self.__class__.__name__, self.id)
def delete(self):
self.redis.delete(self.key)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.key == other.key)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<%s>' % self.key
class DictModel(Model):
def __init__(self, id):
Model.__init__(self, id)
self.dictkey = '%s:dict' % self.key
@asyncio.coroutine
def get(self, key):
return (yield from self.redis.hget(self.dictkey, key))
@asyncio.coroutine
def set(self, key, value):
return (yield from self.redis.hset(self.dictkey, key, value))
@asyncio.coroutine
def getall(self):
return (yield from self.redis.hgetall_asdict(self.dictkey))
class SetModel(Model):
def __init__(self, id):
Model.__init__(self, id)
self.setkey = '%s:set' % self.key
@asyncio.coroutine
def sadd(self, obj):
if isinstance(obj, Iterable):
objs = [i.key for i in obj]
else:
objs = [obj.key]
return (yield from self.redis.sadd(self.setkey, objs))
@asyncio.coroutine
def srem(self, obj):
if isinstance(obj, Iterable):
objs = [i.key for i in obj]
else:
objs = [obj.key]
return (yield from self.redis.srem(self.setkey, objs))
@asyncio.coroutine
def __iter__(self):
return objectify((yield from self.redis.smembers_asset(self.setkey)))
class SortedSetModel(Model):
def __init__(self, id):
Model.__init__(self, id)
self.zsetkey = '%s:zset' % self.key
@asyncio.coroutine
def zadd(self, obj, score):
return (yield from self.redis.zadd(self.zsetkey, {obj.key: score}))
@asyncio.coroutine
def zrem(self, obj):
return (yield from self.redis.zrem(self.zsetkey, [obj.key]))
@asyncio.coroutine
def __iter__(self):
dct = yield from self.redis.zrange_asdict(self.zsetkey)
lst = sorted(dct, key=dct.__getitem__)
return objectify(lst)
class Operator(SetModel):
def __init__(self):
SetModel.__init__(self, 'MIVB')
@asyncio.coroutine
def update_lines(self):
nodes = yield from get_etree('line', API_LINES_URL,
params=API_DEFAULT_PARAMS)
for node in nodes:
line = Line(node.find('id').text)
for child in node:
if child.text:
yield from line.set(child.tag, child.text)
yield from self.sadd(line)
class Line(DictModel, SetModel):
def __init__(self, id):
DictModel.__init__(self, id)
SetModel.__init__(self, id)
@asyncio.coroutine
def update_routes(self):
for iti in range(1, 3): # There are only 2 routes (1 and 2) in each line
route = Route('%s.%s' % (self.id, iti))
direction = yield from self.get('destination%s' % iti)
yield from route.set('destination', direction)
yield from route.set('line', self.id)
yield from route.set('iti', str(iti))
yield from route.update(full_update=True)
yield from self.sadd(route)
class Route(DictModel, SortedSetModel):
def __init__(self, id):
DictModel.__init__(self, id)
SortedSetModel.__init__(self, id)
@asyncio.coroutine
def _report_change(self, origin, destination):
origin = {'id': (origin.id if origin else '-1'),
'lat': (yield from origin.get('latitude') if origin else ''),
'lon': (yield from origin.get('longitude') if origin else '')}
destination = {'id': (destination.id if destination else '-1'),
'lat': \
(yield from destination.get('latitude') if destination else ''),
'lon': \
(yield from destination.get('longitude') if destination else '')}
message = {'route': self.id,
'origin': origin,
'destination': destination}
yield from self.redis.publish('mivb', json.dumps(message))
@asyncio.coroutine
def update(self, full_update=False):
params = {'line': (yield from self.get('line')),
'iti': (yield from self.get('iti'))}
params.update(API_DEFAULT_PARAMS)
nodes = yield from get_etree('stop', API_ROUTE_URL, params=params)
route_present = RoutePresent(self)
old = set((yield from route_present))
new = set()
for score, node in enumerate(nodes):
stop = Stop(node.find('id').text)
present = node.find('present')
if present is not None and present.text == 'TRUE':
new.add(stop)
if full_update:
for child in node:
if child.text:
yield from stop.set(child.tag, child.text)
yield from self.zadd(stop, score)
rem = old - new
add = new - old
if len(rem) > 0:
yield from route_present.srem(rem)
if len(add) > 0:
yield from route_present.sadd(add)
stops = yield from self
o_i = len(stops)
for s_i, s in reversed(list(enumerate(stops))):
if not s in new:
if o_i > s_i:
o_i -= 1
if s in old:
for n in reversed(stops[s_i:]):
if n in new:
break
else:
yield from self._report_change(s, None)
continue
for o in reversed(stops[:o_i]):
o_i -= 1
if o in old:
if o != s:
yield from self._report_change(o, s)
break
else:
if o_i == 0:
yield from self._report_change(None, s)
class RoutePresent(SetModel):
def __init__(self, route):
SetModel.__init__(self, route.id)
class Stop(DictModel):
def __init__(self, id):
DictModel.__init__(self, id)
@asyncio.coroutine
def update(self):
params = {'halt': self.id}
params.update(API_DEFAULT_PARAMS)
nodes = yield from get_etree('position', API_STOP_URL, params=params)
for node in nodes:
for child in node:
yield from self.set(child.tag, child.text)
| mit | -2,791,483,782,329,642,000 | 28.992032 | 80 | 0.545829 | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/quests/640_TheZeroHour/__init__.py | 1 | 3023 | # Made by Kerberos v1.0 on 2009/05/08
# this script is part of the Official L2J Datapack Project.
# Visit http://www.l2jdp.com/forum for more details.
import sys
from com.l2scoria import Config
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "640_TheZeroHour"
#NPC
Kahman = 31554
#MONSTERS
MONSTERS = range(22105,22112)+range(22113,22120)+[22121]
#ITEMS
Fang = 8085
REWARDS={
"1":[12 ,4042, 1],
"2":[6 ,4043, 1],
"3":[6 ,4044, 1],
"4":[81 ,1887,10],
"5":[33 ,1888, 5],
"6":[30 ,1889,10],
"7":[150,5550,10],
"8":[131,1890,10],
"9":[123,1893, 5],
}
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [Fang]
def onAdvEvent (self,event,npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
if event == "31554-02.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "31554-08.htm" :
st.playSound("ItemSound.quest_finish")
st.exitQuest(True)
elif event in REWARDS.keys() :
cost,item,amount = REWARDS[event]
if st.getQuestItemsCount(Fang)>=cost :
st.takeItems(Fang,cost)
st.giveItems(item, amount)
htmltext = "31554-09.htm"
else :
htmltext = "31554-06.htm"
return htmltext
def onTalk (self, npc, player) :
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
id = st.getState()
if id == CREATED :
if player.getLevel() >= 66 :
st2 = player.getQuestState("109_InSearchOfTheNest")
if st2 and st2.getState().getName() == 'Completed' :
htmltext = "31554-01.htm"
else :
htmltext = "31554-00.htm" #todo: missing retail html
else :
htmltext = "31554-00.htm"
elif st.getQuestItemsCount(Fang) >= 1 :
htmltext = "31554-04.htm"
else :
htmltext = "31554-03.htm"
return htmltext
def onKill(self, npc, player, isPet) :
st = player.getQuestState(qn)
if not st : return
st.giveItems(Fang,int(Config.RATE_DROP_QUEST))
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(640,qn,"The Zero Hour")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(Kahman)
QUEST.addTalkId(Kahman)
for i in MONSTERS :
QUEST.addKillId(i) | gpl-3.0 | 6,227,081,143,170,916,000 | 29.24 | 157 | 0.582865 | false |
swordfeng/pyjs | nodeaio/nodeaio.py | 1 | 1710 | #!/usr/bin/python3
import asyncio
import threading
loop = asyncio.get_event_loop()
class LoopThread(threading.Thread):
def __init__(self):
super(LoopThread, self).__init__()
self.finalizing = False
def run(self):
asyncio.set_event_loop(loop)
loop.run_forever()
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
class Thenable:
def __init__(self, coro):
self.resolve_handlers = []
self.reject_handlers = []
self.done = False
self.result = None
self.exception = None
self.coro = coro
def then(self, resolve, reject):
if self.done:
if self.exception != None:
reject(self.exception)
else:
resolve(self.result)
else:
self.resolve_handlers.append(resolve)
self.reject_handlers.append(reject)
async def run(self):
try:
self.result = await self.coro
except BaseException as e:
self.exception = e
self.done = True
# should have no exceptions thrown from node.js?
if self.exception != None:
for handler in self.reject_handlers:
handler(self.exception)
else:
for handler in self.resolve_handlers:
handler(self.result)
# in case of circular reference
del self.resolve_handlers
del self.reject_handlers
LoopThread().start()
def ensure_coroutine(coro):
promise = Thenable(coro)
loop.call_soon_threadsafe(asyncio.ensure_future, promise.run())
return promise
def stop():
loop.call_soon_threadsafe(loop.stop)
| mit | 5,335,262,165,422,186,000 | 29.535714 | 67 | 0.592398 | false |
smoser/granite | granite/virt/lxc/hostops.py | 1 | 2587 | # Copyright (c) 2014 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
import lxc
from granite.virt.lxc import host_utils as host_utils
from nova.openstack.common.gettextutils import _ # noqa
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
CONF = cfg.CONF
log = logging.getLogger(__name__)
VERSION_RE = re.compile(r"(?P<maj>\d+)[.]?(?P<min>\d+)?"
"[.]?(?P<mic>\d+)?(?P<extra>.*)?")
def parse_version(version):
try:
m = VERSION_RE.match(version)
ver_tup = tuple([int(m.group(n)) for n in ('maj', 'min', 'mic')])
except AttributeError:
logging.WARN("bad version: %s" % version)
ver_tup = (0, 0, 0)
return utils.convert_version_to_int(ver_tup)
class HostOps(object):
def __init__(self):
self._stats = None
def get_host_stats(self, refresh=False):
if refresh or self._stats is None:
self._update_status()
return self._stats
def get_available_resource(self, nodename):
return self._update_status()
def _update_status(self):
memory = host_utils.get_memory_info()
disk = host_utils.get_disk_info()
dic = {'vcpus': host_utils.get_cpu_count(),
'memory_mb': memory['total'],
'local_gb': disk['total'] / units.Gi,
'vcpus_used': 0,
'memory_mb_used': memory['used'],
'local_gb_used': disk['used'] / units.Gi,
'hypervisor_type': 'lxc',
'hypervisor_version': parse_version(lxc.version),
'hypervisor_hostname': CONF.host,
'cpu_info': '?',
'supported_instances': jsonutils.dumps([
('i686', 'lxc', 'lxc'),
('x86_64', 'lxc', 'lxc'),
])}
self._stats = dic
return self._stats
| apache-2.0 | -6,146,650,257,715,685,000 | 32.166667 | 78 | 0.583301 | false |
leppa/home-assistant | homeassistant/helpers/condition.py | 1 | 15835 | """Offer reusable conditions."""
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
import sys
from typing import Callable, Container, Optional, Union, cast
from homeassistant.components import zone as zone_cmp
from homeassistant.components.device_automation import (
async_get_device_automation_platform,
)
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ABOVE,
CONF_AFTER,
CONF_BEFORE,
CONF_BELOW,
CONF_CONDITION,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_STATE,
CONF_VALUE_TEMPLATE,
CONF_WEEKDAY,
CONF_ZONE,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
WEEKDAYS,
)
from homeassistant.core import HomeAssistant, State
from homeassistant.exceptions import HomeAssistantError, TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.sun import get_astral_event_date
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as dt_util
FROM_CONFIG_FORMAT = "{}_from_config"
ASYNC_FROM_CONFIG_FORMAT = "async_{}_from_config"
_LOGGER = logging.getLogger(__name__)
ConditionCheckerType = Callable[[HomeAssistant, TemplateVarsType], bool]
async def async_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Turn a condition configuration into a method.
Should be run on the event loop.
"""
for fmt in (ASYNC_FROM_CONFIG_FORMAT, FROM_CONFIG_FORMAT):
factory = getattr(
sys.modules[__name__], fmt.format(config.get(CONF_CONDITION)), None
)
if factory:
break
if factory is None:
raise HomeAssistantError(
'Invalid condition "{}" specified {}'.format(
config.get(CONF_CONDITION), config
)
)
# Check for partials to properly determine if coroutine function
check_factory = factory
while isinstance(check_factory, ft.partial):
check_factory = check_factory.func
if asyncio.iscoroutinefunction(check_factory):
return cast(
ConditionCheckerType, await factory(hass, config, config_validation)
)
return cast(ConditionCheckerType, factory(config, config_validation))
async def async_and_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'AND'."""
if config_validation:
config = cv.AND_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_and_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if not check(hass, variables):
return False
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during and-condition: %s", ex)
return False
return True
return if_and_condition
async def async_or_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Create multi condition matcher using 'OR'."""
if config_validation:
config = cv.OR_CONDITION_SCHEMA(config)
checks = [
await async_from_config(hass, entry, False) for entry in config["conditions"]
]
def if_or_condition(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test and condition."""
try:
for check in checks:
if check(hass, variables):
return True
except Exception as ex: # pylint: disable=broad-except
_LOGGER.warning("Error during or-condition: %s", ex)
return False
return if_or_condition
def numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[float] = None,
above: Optional[float] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
) -> bool:
"""Test a numeric state condition."""
return cast(
bool,
run_callback_threadsafe(
hass.loop,
async_numeric_state,
hass,
entity,
below,
above,
value_template,
variables,
).result(),
)
def async_numeric_state(
hass: HomeAssistant,
entity: Union[None, str, State],
below: Optional[float] = None,
above: Optional[float] = None,
value_template: Optional[Template] = None,
variables: TemplateVarsType = None,
) -> bool:
"""Test a numeric state condition."""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
if value_template is None:
value = entity.state
else:
variables = dict(variables or {})
variables["state"] = entity
try:
value = value_template.async_render(variables)
except TemplateError as ex:
_LOGGER.error("Template error: %s", ex)
return False
if value in (STATE_UNAVAILABLE, STATE_UNKNOWN):
return False
try:
fvalue = float(value)
except ValueError:
_LOGGER.warning(
"Value cannot be processed as a number: %s " "(Offending entity: %s)",
entity,
value,
)
return False
if below is not None and fvalue >= below:
return False
if above is not None and fvalue <= above:
return False
return True
def async_numeric_state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.NUMERIC_STATE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
below = config.get(CONF_BELOW)
above = config.get(CONF_ABOVE)
value_template = config.get(CONF_VALUE_TEMPLATE)
def if_numeric_state(
hass: HomeAssistant, variables: TemplateVarsType = None
) -> bool:
"""Test numeric state condition."""
if value_template is not None:
value_template.hass = hass
return async_numeric_state(
hass, entity_id, below, above, value_template, variables
)
return if_numeric_state
def state(
hass: HomeAssistant,
entity: Union[None, str, State],
req_state: str,
for_period: Optional[timedelta] = None,
) -> bool:
"""Test if state matches requirements.
Async friendly.
"""
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
assert isinstance(entity, State)
is_state = entity.state == req_state
if for_period is None or not is_state:
return is_state
return dt_util.utcnow() - for_period > entity.last_changed
def state_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.STATE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
req_state = cast(str, config.get(CONF_STATE))
for_period = config.get("for")
def if_state(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return state(hass, entity_id, req_state, for_period)
return if_state
def sun(
hass: HomeAssistant,
before: Optional[str] = None,
after: Optional[str] = None,
before_offset: Optional[timedelta] = None,
after_offset: Optional[timedelta] = None,
) -> bool:
"""Test if current time matches sun requirements."""
utcnow = dt_util.utcnow()
today = dt_util.as_local(utcnow).date()
before_offset = before_offset or timedelta(0)
after_offset = after_offset or timedelta(0)
sunrise_today = get_astral_event_date(hass, SUN_EVENT_SUNRISE, today)
sunset_today = get_astral_event_date(hass, SUN_EVENT_SUNSET, today)
sunrise = sunrise_today
sunset = sunset_today
if today > dt_util.as_local(
cast(datetime, sunrise_today)
).date() and SUN_EVENT_SUNRISE in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunrise_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNRISE, tomorrow)
sunrise = sunrise_tomorrow
if today > dt_util.as_local(
cast(datetime, sunset_today)
).date() and SUN_EVENT_SUNSET in (before, after):
tomorrow = dt_util.as_local(utcnow + timedelta(days=1)).date()
sunset_tomorrow = get_astral_event_date(hass, SUN_EVENT_SUNSET, tomorrow)
sunset = sunset_tomorrow
if sunrise is None and SUN_EVENT_SUNRISE in (before, after):
# There is no sunrise today
return False
if sunset is None and SUN_EVENT_SUNSET in (before, after):
# There is no sunset today
return False
if before == SUN_EVENT_SUNRISE and utcnow > cast(datetime, sunrise) + before_offset:
return False
if before == SUN_EVENT_SUNSET and utcnow > cast(datetime, sunset) + before_offset:
return False
if after == SUN_EVENT_SUNRISE and utcnow < cast(datetime, sunrise) + after_offset:
return False
if after == SUN_EVENT_SUNSET and utcnow < cast(datetime, sunset) + after_offset:
return False
return True
def sun_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with sun based condition."""
if config_validation:
config = cv.SUN_CONDITION_SCHEMA(config)
before = config.get("before")
after = config.get("after")
before_offset = config.get("before_offset")
after_offset = config.get("after_offset")
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return sun(hass, before, after, before_offset, after_offset)
return time_if
def template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
return cast(
bool,
run_callback_threadsafe(
hass.loop, async_template, hass, value_template, variables
).result(),
)
def async_template(
hass: HomeAssistant, value_template: Template, variables: TemplateVarsType = None
) -> bool:
"""Test if template condition matches."""
try:
value = value_template.async_render(variables)
except TemplateError as ex:
_LOGGER.error("Error during template condition: %s", ex)
return False
return value.lower() == "true"
def async_template_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with state based condition."""
if config_validation:
config = cv.TEMPLATE_CONDITION_SCHEMA(config)
value_template = cast(Template, config.get(CONF_VALUE_TEMPLATE))
def template_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate template based if-condition."""
value_template.hass = hass
return async_template(hass, value_template, variables)
return template_if
def time(
before: Optional[dt_util.dt.time] = None,
after: Optional[dt_util.dt.time] = None,
weekday: Union[None, str, Container[str]] = None,
) -> bool:
"""Test if local time condition matches.
Handle the fact that time is continuous and we may be testing for
a period that crosses midnight. In that case it is easier to test
for the opposite. "(23:59 <= now < 00:01)" would be the same as
"not (00:01 <= now < 23:59)".
"""
now = dt_util.now()
now_time = now.time()
if after is None:
after = dt_util.dt.time(0)
if before is None:
before = dt_util.dt.time(23, 59, 59, 999999)
if after < before:
if not after <= now_time < before:
return False
else:
if before <= now_time < after:
return False
if weekday is not None:
now_weekday = WEEKDAYS[now.weekday()]
if (
isinstance(weekday, str)
and weekday != now_weekday
or now_weekday not in weekday
):
return False
return True
def time_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with time based condition."""
if config_validation:
config = cv.TIME_CONDITION_SCHEMA(config)
before = config.get(CONF_BEFORE)
after = config.get(CONF_AFTER)
weekday = config.get(CONF_WEEKDAY)
def time_if(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Validate time based if-condition."""
return time(before, after, weekday)
return time_if
def zone(
hass: HomeAssistant,
zone_ent: Union[None, str, State],
entity: Union[None, str, State],
) -> bool:
"""Test if zone-condition matches.
Async friendly.
"""
if isinstance(zone_ent, str):
zone_ent = hass.states.get(zone_ent)
if zone_ent is None:
return False
if isinstance(entity, str):
entity = hass.states.get(entity)
if entity is None:
return False
latitude = entity.attributes.get(ATTR_LATITUDE)
longitude = entity.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
return False
return zone_cmp.zone.in_zone(
zone_ent, latitude, longitude, entity.attributes.get(ATTR_GPS_ACCURACY, 0)
)
def zone_from_config(
config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Wrap action method with zone based condition."""
if config_validation:
config = cv.ZONE_CONDITION_SCHEMA(config)
entity_id = config.get(CONF_ENTITY_ID)
zone_entity_id = config.get(CONF_ZONE)
def if_in_zone(hass: HomeAssistant, variables: TemplateVarsType = None) -> bool:
"""Test if condition."""
return zone(hass, zone_entity_id, entity_id)
return if_in_zone
async def async_device_from_config(
hass: HomeAssistant, config: ConfigType, config_validation: bool = True
) -> ConditionCheckerType:
"""Test a device condition."""
if config_validation:
config = cv.DEVICE_CONDITION_SCHEMA(config)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(
ConditionCheckerType,
platform.async_condition_from_config(config, config_validation), # type: ignore
)
async def async_validate_condition_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
condition = config[CONF_CONDITION]
if condition in ("and", "or"):
conditions = []
for sub_cond in config["conditions"]:
sub_cond = await async_validate_condition_config(hass, sub_cond)
conditions.append(sub_cond)
config["conditions"] = conditions
if condition == "device":
config = cv.DEVICE_CONDITION_SCHEMA(config)
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "condition"
)
return cast(ConfigType, platform.CONDITION_SCHEMA(config)) # type: ignore
return config
| apache-2.0 | -1,179,085,761,861,840,000 | 28.821092 | 88 | 0.645406 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.