body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
c8c0950d2af6a640cd8d29a3cb71dabd55aa46240cb8d62442b1c8c41c946352 | def accumulate_grad_ms(session, model, verbose=True):
'run the given model over its data'
start_time = time.time()
iters = 0
state = session.run(model.initial_state)
fetches = {'ms_update': model.dynamic_eval.accu_global_ms(), 'final_state': model.final_state}
for step in range(model.input.epoch_size):
feed_dict = dict()
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
state = vals['final_state']
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
print(('%.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), ((iters * model.input.batch_size) / (time.time() - start_time)))))
session.run(model.dynamic_eval.average_global_ms())
return | run the given model over its data | tensorflow_impl/model_estimator.py | accumulate_grad_ms | zivaharoni/gradual-learning-rnn | 10 | python | def accumulate_grad_ms(session, model, verbose=True):
start_time = time.time()
iters = 0
state = session.run(model.initial_state)
fetches = {'ms_update': model.dynamic_eval.accu_global_ms(), 'final_state': model.final_state}
for step in range(model.input.epoch_size):
feed_dict = dict()
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
state = vals['final_state']
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
print(('%.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), ((iters * model.input.batch_size) / (time.time() - start_time)))))
session.run(model.dynamic_eval.average_global_ms())
return | def accumulate_grad_ms(session, model, verbose=True):
start_time = time.time()
iters = 0
state = session.run(model.initial_state)
fetches = {'ms_update': model.dynamic_eval.accu_global_ms(), 'final_state': model.final_state}
for step in range(model.input.epoch_size):
feed_dict = dict()
for (j, (c, h)) in enumerate(model.initial_state):
feed_dict[c] = state[j].c
feed_dict[h] = state[j].h
feed_dict.update(model.input.get_batch((step * model.input.time_steps)))
vals = session.run(fetches, feed_dict)
state = vals['final_state']
iters += 1
if (verbose and ((step % (model.input.epoch_size // 10)) == 10)):
print(('%.3f speed: %.0f wps' % (((step * 1.0) / model.input.epoch_size), ((iters * model.input.batch_size) / (time.time() - start_time)))))
session.run(model.dynamic_eval.average_global_ms())
return<|docstring|>run the given model over its data<|endoftext|> |
e852123941a61693b6ee6c96aeff22b8c466602e27f9571422abc9bcadb4e7ae | def __init__(self, config, is_training, inputs):
'the constructor builds the tensorflow_impl graph'
self._input = inputs
vocab_size = config.vocab_size
self._gpu_devices = [i for i in range(len(get_gpu_devices(args.gpu_devices)))][0]
self._cpu_device = args.cpu_device
self._config = config
self._debug_ops = list()
self._stat_ops = list()
if config.mos:
self._mos_mask = None
self._gen_mos_mask = None
with tf.name_scope('model_variables'):
with tf.name_scope('global_step'):
self._global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.name_scope('epoch_counter'):
self._epoch_count = tf.Variable(0, name='epoch', trainable=False)
self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))
self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))
with tf.variable_scope('embedding'), tf.device(self._cpu_device):
if is_training:
logger.info('adding embedding matrix with dims [{:d}, {:d}]'.format(vocab_size, config.embedding_size))
embedding_map = tf.get_variable(name='embedding', dtype=tf.float32, initializer=tf.random_uniform(shape=[vocab_size, config.embedding_size], minval=(- 0.1), maxval=0.1, seed=seed, dtype=tf.float32))
if is_training:
logger.info('adding embedding bias with dims [{:d}]'.format(config.embedding_size))
embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data)
if (is_training and ((config.keep_prob_embed < 1) or (config.drop_i < 1))):
logger.info('adding embedding mask with dims [{:d}, {:d}, {:d}]'.format(config.batch_size, config.time_steps, config.embedding_size))
self._emb_mask = tf.placeholder(dtype=tf.float32, shape=[config.batch_size, config.time_steps, config.embedding_size], name='embedding_mask')
if (config.keep_prob_embed < 1):
if config.drop_embed_var:
logger.info('using variational embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, 1, config.embedding_size], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
logger.info('using naive embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, config.time_steps, config.embedding_size], seed=seed)
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
with tf.name_scope('out_mask_gen'):
self._gen_emb_mask = tf.ones([config.batch_size, config.time_steps, config.embedding_size])
embedding_out = (math_ops.div(embedding, (config.drop_i * config.keep_prob_embed)) * self._emb_mask)
else:
embedding_out = embedding
with tf.name_scope('inner_model'):
(loss, grads, cell, initial_state, final_state, softmax) = self.complete_model(embedding_out, embedding_map, is_training)
self._softmax = softmax
self._cell = cell
self._initial_state = initial_state
self._final_state = final_state
self._loss = loss
self._grads = grads
if is_training:
with tf.name_scope('learning_rate'):
self._lr = tf.Variable(config.lr, trainable=False, dtype=tf.float32)
self._new_lr = tf.placeholder(tf.float32, shape=[], name='new_learning_rate')
self._lr_update = tf.assign(self._lr, self._new_lr)
tvars = tf.trainable_variables()
with tf.name_scope('optimizer'):
self._optimizer = []
if (config.opt == 'sgd'):
logger.info('using SGD optimizer')
self._optimizer = SGDOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'asgd'):
logger.info('using ASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'masgd'):
logger.info('using MASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'rms'):
logger.info('using RMS optimizer')
self._optimizer = RMSpropOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'arms'):
logger.info('using ARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'marms'):
logger.info('using MARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
else:
raise ValueError((config.opt + ' is not a valid optimizer'))
if (config.dynamic_eval is not None):
tvars = tf.trainable_variables()
self._dynamic_eval = DynamicEval(config, tvars, grads)
self._train_op = self._dynamic_eval.update_op() | the constructor builds the tensorflow_impl graph | tensorflow_impl/model_estimator.py | __init__ | zivaharoni/gradual-learning-rnn | 10 | python | def __init__(self, config, is_training, inputs):
self._input = inputs
vocab_size = config.vocab_size
self._gpu_devices = [i for i in range(len(get_gpu_devices(args.gpu_devices)))][0]
self._cpu_device = args.cpu_device
self._config = config
self._debug_ops = list()
self._stat_ops = list()
if config.mos:
self._mos_mask = None
self._gen_mos_mask = None
with tf.name_scope('model_variables'):
with tf.name_scope('global_step'):
self._global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.name_scope('epoch_counter'):
self._epoch_count = tf.Variable(0, name='epoch', trainable=False)
self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))
self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))
with tf.variable_scope('embedding'), tf.device(self._cpu_device):
if is_training:
logger.info('adding embedding matrix with dims [{:d}, {:d}]'.format(vocab_size, config.embedding_size))
embedding_map = tf.get_variable(name='embedding', dtype=tf.float32, initializer=tf.random_uniform(shape=[vocab_size, config.embedding_size], minval=(- 0.1), maxval=0.1, seed=seed, dtype=tf.float32))
if is_training:
logger.info('adding embedding bias with dims [{:d}]'.format(config.embedding_size))
embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data)
if (is_training and ((config.keep_prob_embed < 1) or (config.drop_i < 1))):
logger.info('adding embedding mask with dims [{:d}, {:d}, {:d}]'.format(config.batch_size, config.time_steps, config.embedding_size))
self._emb_mask = tf.placeholder(dtype=tf.float32, shape=[config.batch_size, config.time_steps, config.embedding_size], name='embedding_mask')
if (config.keep_prob_embed < 1):
if config.drop_embed_var:
logger.info('using variational embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, 1, config.embedding_size], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
logger.info('using naive embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, config.time_steps, config.embedding_size], seed=seed)
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
with tf.name_scope('out_mask_gen'):
self._gen_emb_mask = tf.ones([config.batch_size, config.time_steps, config.embedding_size])
embedding_out = (math_ops.div(embedding, (config.drop_i * config.keep_prob_embed)) * self._emb_mask)
else:
embedding_out = embedding
with tf.name_scope('inner_model'):
(loss, grads, cell, initial_state, final_state, softmax) = self.complete_model(embedding_out, embedding_map, is_training)
self._softmax = softmax
self._cell = cell
self._initial_state = initial_state
self._final_state = final_state
self._loss = loss
self._grads = grads
if is_training:
with tf.name_scope('learning_rate'):
self._lr = tf.Variable(config.lr, trainable=False, dtype=tf.float32)
self._new_lr = tf.placeholder(tf.float32, shape=[], name='new_learning_rate')
self._lr_update = tf.assign(self._lr, self._new_lr)
tvars = tf.trainable_variables()
with tf.name_scope('optimizer'):
self._optimizer = []
if (config.opt == 'sgd'):
logger.info('using SGD optimizer')
self._optimizer = SGDOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'asgd'):
logger.info('using ASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'masgd'):
logger.info('using MASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'rms'):
logger.info('using RMS optimizer')
self._optimizer = RMSpropOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'arms'):
logger.info('using ARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'marms'):
logger.info('using MARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
else:
raise ValueError((config.opt + ' is not a valid optimizer'))
if (config.dynamic_eval is not None):
tvars = tf.trainable_variables()
self._dynamic_eval = DynamicEval(config, tvars, grads)
self._train_op = self._dynamic_eval.update_op() | def __init__(self, config, is_training, inputs):
self._input = inputs
vocab_size = config.vocab_size
self._gpu_devices = [i for i in range(len(get_gpu_devices(args.gpu_devices)))][0]
self._cpu_device = args.cpu_device
self._config = config
self._debug_ops = list()
self._stat_ops = list()
if config.mos:
self._mos_mask = None
self._gen_mos_mask = None
with tf.name_scope('model_variables'):
with tf.name_scope('global_step'):
self._global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.name_scope('epoch_counter'):
self._epoch_count = tf.Variable(0, name='epoch', trainable=False)
self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))
self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))
with tf.variable_scope('embedding'), tf.device(self._cpu_device):
if is_training:
logger.info('adding embedding matrix with dims [{:d}, {:d}]'.format(vocab_size, config.embedding_size))
embedding_map = tf.get_variable(name='embedding', dtype=tf.float32, initializer=tf.random_uniform(shape=[vocab_size, config.embedding_size], minval=(- 0.1), maxval=0.1, seed=seed, dtype=tf.float32))
if is_training:
logger.info('adding embedding bias with dims [{:d}]'.format(config.embedding_size))
embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data)
if (is_training and ((config.keep_prob_embed < 1) or (config.drop_i < 1))):
logger.info('adding embedding mask with dims [{:d}, {:d}, {:d}]'.format(config.batch_size, config.time_steps, config.embedding_size))
self._emb_mask = tf.placeholder(dtype=tf.float32, shape=[config.batch_size, config.time_steps, config.embedding_size], name='embedding_mask')
if (config.keep_prob_embed < 1):
if config.drop_embed_var:
logger.info('using variational embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, 1, config.embedding_size], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
logger.info('using naive embedding dropout')
with tf.name_scope('out_mask_gen'):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, config.time_steps, config.embedding_size], seed=seed)
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
with tf.name_scope('out_mask_gen'):
self._gen_emb_mask = tf.ones([config.batch_size, config.time_steps, config.embedding_size])
embedding_out = (math_ops.div(embedding, (config.drop_i * config.keep_prob_embed)) * self._emb_mask)
else:
embedding_out = embedding
with tf.name_scope('inner_model'):
(loss, grads, cell, initial_state, final_state, softmax) = self.complete_model(embedding_out, embedding_map, is_training)
self._softmax = softmax
self._cell = cell
self._initial_state = initial_state
self._final_state = final_state
self._loss = loss
self._grads = grads
if is_training:
with tf.name_scope('learning_rate'):
self._lr = tf.Variable(config.lr, trainable=False, dtype=tf.float32)
self._new_lr = tf.placeholder(tf.float32, shape=[], name='new_learning_rate')
self._lr_update = tf.assign(self._lr, self._new_lr)
tvars = tf.trainable_variables()
with tf.name_scope('optimizer'):
self._optimizer = []
if (config.opt == 'sgd'):
logger.info('using SGD optimizer')
self._optimizer = SGDOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'asgd'):
logger.info('using ASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'masgd'):
logger.info('using MASGD optimizer')
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'rms'):
logger.info('using RMS optimizer')
self._optimizer = RMSpropOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'arms'):
logger.info('using ARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif (config.opt == 'marms'):
logger.info('using MARMS optimizer')
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
else:
raise ValueError((config.opt + ' is not a valid optimizer'))
if (config.dynamic_eval is not None):
tvars = tf.trainable_variables()
self._dynamic_eval = DynamicEval(config, tvars, grads)
self._train_op = self._dynamic_eval.update_op()<|docstring|>the constructor builds the tensorflow_impl graph<|endoftext|> |
590a62be1b14ab6da4fc7f2c70d70c8f85904930069a245e9943e27c6d6bbefd | def complete_model(self, embedding_out, embedding_map, is_training):
' Build rest of model for a single gpu\n\n Args:\n embedding_out: the embedding representation to be processed\n\n Returns:\n loss: a list for the loss calculated for each layer.\n grads: a list for the grads calculated for each loss.\n '
targets = self._input.targets
config = self._config
batch_size = config.batch_size
time_steps = config.time_steps
vocab_size = config.vocab_size
(lstm_output, cell, state, initial_state) = self._build_rnn_graph(embedding_out, is_training)
if ((config.embedding_size == config.units_num[(- 1)]) or config.mos):
if is_training:
logger.info('tied embedding')
w_out = tf.transpose(embedding_map)
else:
if is_training:
logger.info('untied embedding')
w_out = tf.get_variable(name='w_embed_out', shape=[config.units_num[(- 1)], vocab_size], dtype=tf.float32)
b_out = tf.get_variable(name='b_out', dtype=tf.float32, initializer=tf.zeros([config.vocab_size], dtype=tf.float32))
with tf.name_scope('loss'):
with tf.name_scope('data_loss'):
if config.mos:
if is_training:
logger.info(('adding mos with %d contexts' % config.mos_context_num))
with tf.name_scope('mos'):
prior = tf.get_variable(name='mos_pi', shape=[config.units_num[(- 1)], config.mos_context_num], dtype=tf.float32)
prior = tf.matmul(lstm_output, prior)
pi = tf.nn.softmax(prior, name='mos_prior')
w_h = tf.get_variable(name='mos_w_h', shape=[config.units_num[(- 1)], (config.mos_context_num * config.embedding_size)], dtype=tf.float32)
b_h = tf.get_variable(name='mos_b_h', shape=[(config.mos_context_num * config.embedding_size)], dtype=tf.float32)
h = tf.reshape(tf.tanh((tf.matmul(lstm_output, w_h) + b_h)), [(- 1), config.embedding_size])
if is_training:
self._mos_mask = tf.placeholder(dtype=tf.float32, shape=[((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size], name='mos_mask')
if (config.variational is not None):
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([config.batch_size, 1, (config.mos_context_num * config.embedding_size)], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_mos_mask = tf.reshape(math_ops.floor(random_tensor), [((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size])
else:
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([((config.batch_size * config.mos_context_num) * config.time_steps), config.embedding_size], seed=seed)
self._gen_mos_mask = math_ops.floor(random_tensor)
h = (math_ops.div(h, config.mos_drop) * self._mos_mask)
a = (tf.matmul(h, w_out) + b_out)
a_mos = tf.reshape(tf.nn.softmax(a), [(- 1), config.mos_context_num, config.vocab_size])
pi = tf.reshape(pi, [(- 1), config.mos_context_num, 1])
weighted_softmax = tf.multiply(a_mos, pi)
softmax = tf.reduce_sum(weighted_softmax, axis=1)
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.log((softmax + 1e-08))], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
else:
if is_training:
logger.info('adding softmax layer')
logits = (tf.matmul(lstm_output, w_out) + b_out)
softmax = 1
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
raw_loss = loss
if (config.AR and is_training):
logger.info('using activation regularization')
with tf.name_scope('AR'):
loss += (config.AR * tf.reduce_mean(tf.square(tf.reshape(lstm_output, [(- 1), 1]))))
if (config.TAR and is_training):
logger.info('using temporal activation regularization')
with tf.name_scope('TAR'):
outputs_reshaped = tf.reshape(lstm_output, [config.batch_size, config.time_steps, (- 1)])
diff = (outputs_reshaped[(:, :(- 1), :)] - outputs_reshaped[(:, 1:, :)])
loss += (config.TAR * tf.reduce_mean(tf.square(tf.reshape(diff, [(- 1), 1]))))
if (config.wdecay and is_training):
logger.info('using L2 regularization')
for tvar in tf.trainable_variables():
loss += (config.wdecay * tf.reduce_sum(tf.square(tf.reshape(tvar, [(- 1), 1]))))
with tf.name_scope('compute_grads'):
grads = tf.gradients(loss, tf.trainable_variables())
final_state = state
return (raw_loss, grads, cell, initial_state, final_state, softmax) | Build rest of model for a single gpu
Args:
embedding_out: the embedding representation to be processed
Returns:
loss: a list for the loss calculated for each layer.
grads: a list for the grads calculated for each loss. | tensorflow_impl/model_estimator.py | complete_model | zivaharoni/gradual-learning-rnn | 10 | python | def complete_model(self, embedding_out, embedding_map, is_training):
' Build rest of model for a single gpu\n\n Args:\n embedding_out: the embedding representation to be processed\n\n Returns:\n loss: a list for the loss calculated for each layer.\n grads: a list for the grads calculated for each loss.\n '
targets = self._input.targets
config = self._config
batch_size = config.batch_size
time_steps = config.time_steps
vocab_size = config.vocab_size
(lstm_output, cell, state, initial_state) = self._build_rnn_graph(embedding_out, is_training)
if ((config.embedding_size == config.units_num[(- 1)]) or config.mos):
if is_training:
logger.info('tied embedding')
w_out = tf.transpose(embedding_map)
else:
if is_training:
logger.info('untied embedding')
w_out = tf.get_variable(name='w_embed_out', shape=[config.units_num[(- 1)], vocab_size], dtype=tf.float32)
b_out = tf.get_variable(name='b_out', dtype=tf.float32, initializer=tf.zeros([config.vocab_size], dtype=tf.float32))
with tf.name_scope('loss'):
with tf.name_scope('data_loss'):
if config.mos:
if is_training:
logger.info(('adding mos with %d contexts' % config.mos_context_num))
with tf.name_scope('mos'):
prior = tf.get_variable(name='mos_pi', shape=[config.units_num[(- 1)], config.mos_context_num], dtype=tf.float32)
prior = tf.matmul(lstm_output, prior)
pi = tf.nn.softmax(prior, name='mos_prior')
w_h = tf.get_variable(name='mos_w_h', shape=[config.units_num[(- 1)], (config.mos_context_num * config.embedding_size)], dtype=tf.float32)
b_h = tf.get_variable(name='mos_b_h', shape=[(config.mos_context_num * config.embedding_size)], dtype=tf.float32)
h = tf.reshape(tf.tanh((tf.matmul(lstm_output, w_h) + b_h)), [(- 1), config.embedding_size])
if is_training:
self._mos_mask = tf.placeholder(dtype=tf.float32, shape=[((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size], name='mos_mask')
if (config.variational is not None):
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([config.batch_size, 1, (config.mos_context_num * config.embedding_size)], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_mos_mask = tf.reshape(math_ops.floor(random_tensor), [((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size])
else:
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([((config.batch_size * config.mos_context_num) * config.time_steps), config.embedding_size], seed=seed)
self._gen_mos_mask = math_ops.floor(random_tensor)
h = (math_ops.div(h, config.mos_drop) * self._mos_mask)
a = (tf.matmul(h, w_out) + b_out)
a_mos = tf.reshape(tf.nn.softmax(a), [(- 1), config.mos_context_num, config.vocab_size])
pi = tf.reshape(pi, [(- 1), config.mos_context_num, 1])
weighted_softmax = tf.multiply(a_mos, pi)
softmax = tf.reduce_sum(weighted_softmax, axis=1)
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.log((softmax + 1e-08))], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
else:
if is_training:
logger.info('adding softmax layer')
logits = (tf.matmul(lstm_output, w_out) + b_out)
softmax = 1
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
raw_loss = loss
if (config.AR and is_training):
logger.info('using activation regularization')
with tf.name_scope('AR'):
loss += (config.AR * tf.reduce_mean(tf.square(tf.reshape(lstm_output, [(- 1), 1]))))
if (config.TAR and is_training):
logger.info('using temporal activation regularization')
with tf.name_scope('TAR'):
outputs_reshaped = tf.reshape(lstm_output, [config.batch_size, config.time_steps, (- 1)])
diff = (outputs_reshaped[(:, :(- 1), :)] - outputs_reshaped[(:, 1:, :)])
loss += (config.TAR * tf.reduce_mean(tf.square(tf.reshape(diff, [(- 1), 1]))))
if (config.wdecay and is_training):
logger.info('using L2 regularization')
for tvar in tf.trainable_variables():
loss += (config.wdecay * tf.reduce_sum(tf.square(tf.reshape(tvar, [(- 1), 1]))))
with tf.name_scope('compute_grads'):
grads = tf.gradients(loss, tf.trainable_variables())
final_state = state
return (raw_loss, grads, cell, initial_state, final_state, softmax) | def complete_model(self, embedding_out, embedding_map, is_training):
' Build rest of model for a single gpu\n\n Args:\n embedding_out: the embedding representation to be processed\n\n Returns:\n loss: a list for the loss calculated for each layer.\n grads: a list for the grads calculated for each loss.\n '
targets = self._input.targets
config = self._config
batch_size = config.batch_size
time_steps = config.time_steps
vocab_size = config.vocab_size
(lstm_output, cell, state, initial_state) = self._build_rnn_graph(embedding_out, is_training)
if ((config.embedding_size == config.units_num[(- 1)]) or config.mos):
if is_training:
logger.info('tied embedding')
w_out = tf.transpose(embedding_map)
else:
if is_training:
logger.info('untied embedding')
w_out = tf.get_variable(name='w_embed_out', shape=[config.units_num[(- 1)], vocab_size], dtype=tf.float32)
b_out = tf.get_variable(name='b_out', dtype=tf.float32, initializer=tf.zeros([config.vocab_size], dtype=tf.float32))
with tf.name_scope('loss'):
with tf.name_scope('data_loss'):
if config.mos:
if is_training:
logger.info(('adding mos with %d contexts' % config.mos_context_num))
with tf.name_scope('mos'):
prior = tf.get_variable(name='mos_pi', shape=[config.units_num[(- 1)], config.mos_context_num], dtype=tf.float32)
prior = tf.matmul(lstm_output, prior)
pi = tf.nn.softmax(prior, name='mos_prior')
w_h = tf.get_variable(name='mos_w_h', shape=[config.units_num[(- 1)], (config.mos_context_num * config.embedding_size)], dtype=tf.float32)
b_h = tf.get_variable(name='mos_b_h', shape=[(config.mos_context_num * config.embedding_size)], dtype=tf.float32)
h = tf.reshape(tf.tanh((tf.matmul(lstm_output, w_h) + b_h)), [(- 1), config.embedding_size])
if is_training:
self._mos_mask = tf.placeholder(dtype=tf.float32, shape=[((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size], name='mos_mask')
if (config.variational is not None):
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([config.batch_size, 1, (config.mos_context_num * config.embedding_size)], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_mos_mask = tf.reshape(math_ops.floor(random_tensor), [((config.batch_size * config.time_steps) * config.mos_context_num), config.embedding_size])
else:
with tf.name_scope('mos_mask_gen'):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([((config.batch_size * config.mos_context_num) * config.time_steps), config.embedding_size], seed=seed)
self._gen_mos_mask = math_ops.floor(random_tensor)
h = (math_ops.div(h, config.mos_drop) * self._mos_mask)
a = (tf.matmul(h, w_out) + b_out)
a_mos = tf.reshape(tf.nn.softmax(a), [(- 1), config.mos_context_num, config.vocab_size])
pi = tf.reshape(pi, [(- 1), config.mos_context_num, 1])
weighted_softmax = tf.multiply(a_mos, pi)
softmax = tf.reduce_sum(weighted_softmax, axis=1)
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.log((softmax + 1e-08))], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
else:
if is_training:
logger.info('adding softmax layer')
logits = (tf.matmul(lstm_output, w_out) + b_out)
softmax = 1
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [tf.reshape(targets, [(- 1)])], [tf.ones([(batch_size * time_steps)], dtype=tf.float32)])
loss = tf.reduce_mean(losses)
raw_loss = loss
if (config.AR and is_training):
logger.info('using activation regularization')
with tf.name_scope('AR'):
loss += (config.AR * tf.reduce_mean(tf.square(tf.reshape(lstm_output, [(- 1), 1]))))
if (config.TAR and is_training):
logger.info('using temporal activation regularization')
with tf.name_scope('TAR'):
outputs_reshaped = tf.reshape(lstm_output, [config.batch_size, config.time_steps, (- 1)])
diff = (outputs_reshaped[(:, :(- 1), :)] - outputs_reshaped[(:, 1:, :)])
loss += (config.TAR * tf.reduce_mean(tf.square(tf.reshape(diff, [(- 1), 1]))))
if (config.wdecay and is_training):
logger.info('using L2 regularization')
for tvar in tf.trainable_variables():
loss += (config.wdecay * tf.reduce_sum(tf.square(tf.reshape(tvar, [(- 1), 1]))))
with tf.name_scope('compute_grads'):
grads = tf.gradients(loss, tf.trainable_variables())
final_state = state
return (raw_loss, grads, cell, initial_state, final_state, softmax)<|docstring|>Build rest of model for a single gpu
Args:
embedding_out: the embedding representation to be processed
Returns:
loss: a list for the loss calculated for each layer.
grads: a list for the grads calculated for each loss.<|endoftext|> |
b750d15f2c53a098ff0bec51ab0a5d9945e5f482f022e7513615a4396b9c8306 | def compute_prior_loss(z, alpha=1.0):
'\n\n Computes prior loss according to Creswell 2016\n\n :param z: latent vector\n :param alpha: weight of prior loss\n :return: log probability of the gaussian latent variables\n '
pdf = torch.distributions.Normal(0, 1)
logProb = pdf.log_prob(z.view(1, (- 1))).sum(dim=1)
prior_loss = ((- alpha) * logProb)
return prior_loss | Computes prior loss according to Creswell 2016
:param z: latent vector
:param alpha: weight of prior loss
:return: log probability of the gaussian latent variables | seisgan/fwi/layers.py | compute_prior_loss | LukasMosser/stochastic_seismic_waveform_inversion | 20 | python | def compute_prior_loss(z, alpha=1.0):
'\n\n Computes prior loss according to Creswell 2016\n\n :param z: latent vector\n :param alpha: weight of prior loss\n :return: log probability of the gaussian latent variables\n '
pdf = torch.distributions.Normal(0, 1)
logProb = pdf.log_prob(z.view(1, (- 1))).sum(dim=1)
prior_loss = ((- alpha) * logProb)
return prior_loss | def compute_prior_loss(z, alpha=1.0):
'\n\n Computes prior loss according to Creswell 2016\n\n :param z: latent vector\n :param alpha: weight of prior loss\n :return: log probability of the gaussian latent variables\n '
pdf = torch.distributions.Normal(0, 1)
logProb = pdf.log_prob(z.view(1, (- 1))).sum(dim=1)
prior_loss = ((- alpha) * logProb)
return prior_loss<|docstring|>Computes prior loss according to Creswell 2016
:param z: latent vector
:param alpha: weight of prior loss
:return: log probability of the gaussian latent variables<|endoftext|> |
be2bde26d2fe787d2cfcdb9e0f24b9139a12a24e9c7ce003abea2d765f84d1d0 | def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):
'\n Aligns vector a to vector b with axis angle rotation\n '
if np.array_equal(a, b):
return (None, None)
axis_ = np.cross(a, b)
axis_ = (axis_ / np.linalg.norm(axis_))
angle = np.arccos(np.dot(a, b))
return (axis_, angle) | Aligns vector a to vector b with axis angle rotation | utils/pcd_utils.py | align_vector_to_another | maorp/NeuralGraph | 117 | python | def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):
'\n \n '
if np.array_equal(a, b):
return (None, None)
axis_ = np.cross(a, b)
axis_ = (axis_ / np.linalg.norm(axis_))
angle = np.arccos(np.dot(a, b))
return (axis_, angle) | def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):
'\n \n '
if np.array_equal(a, b):
return (None, None)
axis_ = np.cross(a, b)
axis_ = (axis_ / np.linalg.norm(axis_))
angle = np.arccos(np.dot(a, b))
return (axis_, angle)<|docstring|>Aligns vector a to vector b with axis angle rotation<|endoftext|> |
bbb5ba3f1bf0a93b8914bd55f165c60289c0d64e9de52632f17b3523d0ab505d | def normalize(a, axis=(- 1), order=2):
'Normalizes a numpy array of points'
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[(l2 == 0)] = 1
return ((a / np.expand_dims(l2, axis)), l2) | Normalizes a numpy array of points | utils/pcd_utils.py | normalize | maorp/NeuralGraph | 117 | python | def normalize(a, axis=(- 1), order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[(l2 == 0)] = 1
return ((a / np.expand_dims(l2, axis)), l2) | def normalize(a, axis=(- 1), order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[(l2 == 0)] = 1
return ((a / np.expand_dims(l2, axis)), l2)<|docstring|>Normalizes a numpy array of points<|endoftext|> |
e8712aa87d8624e18411b81f3fbe5c0066a30fc4a1fd62fddba37477406ae990 | def get_or_sync(self, method_name, *args):
'\n Try to get data from the cache, or call the api and store the result.\n :param string method_name: Name of the API method to sync the data\n :param tuple args: *args for the API method\n :return dict: JSON returned by the API or from the db\n '
logger.info((u'get_or_sync, %s(%s)' % (method_name, args)))
unique_type = ('%s.%s' % (self.type, method_name))
result = JSONKeyValue.get(unique_type, self.key)
if (result != 0):
return result
logger.info(u'Querying the API')
result = self.mapping[method_name](*args)
JSONKeyValue.set(unique_type, self.key, result)
return result | Try to get data from the cache, or call the api and store the result.
:param string method_name: Name of the API method to sync the data
:param tuple args: *args for the API method
:return dict: JSON returned by the API or from the db | middleware/mii_cache_wrapper.py | get_or_sync | MiiRaGe/miilibrary | 0 | python | def get_or_sync(self, method_name, *args):
'\n Try to get data from the cache, or call the api and store the result.\n :param string method_name: Name of the API method to sync the data\n :param tuple args: *args for the API method\n :return dict: JSON returned by the API or from the db\n '
logger.info((u'get_or_sync, %s(%s)' % (method_name, args)))
unique_type = ('%s.%s' % (self.type, method_name))
result = JSONKeyValue.get(unique_type, self.key)
if (result != 0):
return result
logger.info(u'Querying the API')
result = self.mapping[method_name](*args)
JSONKeyValue.set(unique_type, self.key, result)
return result | def get_or_sync(self, method_name, *args):
'\n Try to get data from the cache, or call the api and store the result.\n :param string method_name: Name of the API method to sync the data\n :param tuple args: *args for the API method\n :return dict: JSON returned by the API or from the db\n '
logger.info((u'get_or_sync, %s(%s)' % (method_name, args)))
unique_type = ('%s.%s' % (self.type, method_name))
result = JSONKeyValue.get(unique_type, self.key)
if (result != 0):
return result
logger.info(u'Querying the API')
result = self.mapping[method_name](*args)
JSONKeyValue.set(unique_type, self.key, result)
return result<|docstring|>Try to get data from the cache, or call the api and store the result.
:param string method_name: Name of the API method to sync the data
:param tuple args: *args for the API method
:return dict: JSON returned by the API or from the db<|endoftext|> |
9f55ab7ac22e36ca0656741398af03402836eb0d5fc1b43feca2cbeff8227d1a | def get_movie_name(self, name, year=None):
'\n Return the result of movie query by name/year to The Movie DB\n :param string name: name of the movie\n :param integer year: year of the movie\n :return dict: JSON returned by the API with information about the movie\n '
self.key = {'name': name, 'year': year}
return self.get_or_sync('get_movie_name', name, year) | Return the result of movie query by name/year to The Movie DB
:param string name: name of the movie
:param integer year: year of the movie
:return dict: JSON returned by the API with information about the movie | middleware/mii_cache_wrapper.py | get_movie_name | MiiRaGe/miilibrary | 0 | python | def get_movie_name(self, name, year=None):
'\n Return the result of movie query by name/year to The Movie DB\n :param string name: name of the movie\n :param integer year: year of the movie\n :return dict: JSON returned by the API with information about the movie\n '
self.key = {'name': name, 'year': year}
return self.get_or_sync('get_movie_name', name, year) | def get_movie_name(self, name, year=None):
'\n Return the result of movie query by name/year to The Movie DB\n :param string name: name of the movie\n :param integer year: year of the movie\n :return dict: JSON returned by the API with information about the movie\n '
self.key = {'name': name, 'year': year}
return self.get_or_sync('get_movie_name', name, year)<|docstring|>Return the result of movie query by name/year to The Movie DB
:param string name: name of the movie
:param integer year: year of the movie
:return dict: JSON returned by the API with information about the movie<|endoftext|> |
e792a1e224eba109f4e6b42c0337298ca8bf8a44ab00c22e2f1e1d008ddfee7c | def get_movie_imdb_id(self, tmdb_id):
'\n Return the result of movie query by name/year to The Movie DB\n :param string tmdb_id: The Movie Database ID (gotten from name/year query)\n :return dict: JSON returned by the API with the IMDB ID of the movie\n '
self.key = {'id': tmdb_id}
return self.get_or_sync('get_movie_imdb_id', tmdb_id) | Return the result of movie query by name/year to The Movie DB
:param string tmdb_id: The Movie Database ID (gotten from name/year query)
:return dict: JSON returned by the API with the IMDB ID of the movie | middleware/mii_cache_wrapper.py | get_movie_imdb_id | MiiRaGe/miilibrary | 0 | python | def get_movie_imdb_id(self, tmdb_id):
'\n Return the result of movie query by name/year to The Movie DB\n :param string tmdb_id: The Movie Database ID (gotten from name/year query)\n :return dict: JSON returned by the API with the IMDB ID of the movie\n '
self.key = {'id': tmdb_id}
return self.get_or_sync('get_movie_imdb_id', tmdb_id) | def get_movie_imdb_id(self, tmdb_id):
'\n Return the result of movie query by name/year to The Movie DB\n :param string tmdb_id: The Movie Database ID (gotten from name/year query)\n :return dict: JSON returned by the API with the IMDB ID of the movie\n '
self.key = {'id': tmdb_id}
return self.get_or_sync('get_movie_imdb_id', tmdb_id)<|docstring|>Return the result of movie query by name/year to The Movie DB
:param string tmdb_id: The Movie Database ID (gotten from name/year query)
:return dict: JSON returned by the API with the IMDB ID of the movie<|endoftext|> |
58755d3fbe71a4b6fd0e78ff82b08364f6399faca877eb073018ad1107627c0c | def get_imdb_information(self, imdb_id):
'\n Get all the imdb information from opensubtitle api\n :param string imdb_id: IMDB ID to get the information on\n :return dict: All the information about a movie from IMDB\n '
self.key = {'id': imdb_id}
return self.get_or_sync('get_imdb_information', imdb_id) | Get all the imdb information from opensubtitle api
:param string imdb_id: IMDB ID to get the information on
:return dict: All the information about a movie from IMDB | middleware/mii_cache_wrapper.py | get_imdb_information | MiiRaGe/miilibrary | 0 | python | def get_imdb_information(self, imdb_id):
'\n Get all the imdb information from opensubtitle api\n :param string imdb_id: IMDB ID to get the information on\n :return dict: All the information about a movie from IMDB\n '
self.key = {'id': imdb_id}
return self.get_or_sync('get_imdb_information', imdb_id) | def get_imdb_information(self, imdb_id):
'\n Get all the imdb information from opensubtitle api\n :param string imdb_id: IMDB ID to get the information on\n :return dict: All the information about a movie from IMDB\n '
self.key = {'id': imdb_id}
return self.get_or_sync('get_imdb_information', imdb_id)<|docstring|>Get all the imdb information from opensubtitle api
:param string imdb_id: IMDB ID to get the information on
:return dict: All the information about a movie from IMDB<|endoftext|> |
75c940d98789cfc2f2ef33b2e8dda4258099a1195fd83ac809483d9e99005195 | def get_movie_name(self, movie_hash, number=''):
"\n Return the movie name (and other information) from a hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param number: Type of API method to call (can be either '' or '2' differences are unknown)\n :return dict: Return the movie information as JSON\n "
self.key = {'movie_hash': movie_hash}
return self.get_or_sync(('get_movie_names%s' % number), [movie_hash]) | Return the movie name (and other information) from a hash
:param string movie_hash: String representing the special hash used by open subtitle
:param number: Type of API method to call (can be either '' or '2' differences are unknown)
:return dict: Return the movie information as JSON | middleware/mii_cache_wrapper.py | get_movie_name | MiiRaGe/miilibrary | 0 | python | def get_movie_name(self, movie_hash, number=):
"\n Return the movie name (and other information) from a hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param number: Type of API method to call (can be either or '2' differences are unknown)\n :return dict: Return the movie information as JSON\n "
self.key = {'movie_hash': movie_hash}
return self.get_or_sync(('get_movie_names%s' % number), [movie_hash]) | def get_movie_name(self, movie_hash, number=):
"\n Return the movie name (and other information) from a hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param number: Type of API method to call (can be either or '2' differences are unknown)\n :return dict: Return the movie information as JSON\n "
self.key = {'movie_hash': movie_hash}
return self.get_or_sync(('get_movie_names%s' % number), [movie_hash])<|docstring|>Return the movie name (and other information) from a hash
:param string movie_hash: String representing the special hash used by open subtitle
:param number: Type of API method to call (can be either '' or '2' differences are unknown)
:return dict: Return the movie information as JSON<|endoftext|> |
69a0451eddda23f59034a429480fd4963ec2d16e3ac70003ca660964d18414b0 | def get_subtitles(self, movie_hash, file_size):
'\n Get the list of subtitles associated to a file hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param str file_size: Size of the movie\n :return dict: Return the JSON containing information about different available subtitles\n '
self.key = {'movie_hash': movie_hash, 'file_size': file_size}
return self.get_or_sync('get_subtitles', movie_hash, file_size, '') | Get the list of subtitles associated to a file hash
:param string movie_hash: String representing the special hash used by open subtitle
:param str file_size: Size of the movie
:return dict: Return the JSON containing information about different available subtitles | middleware/mii_cache_wrapper.py | get_subtitles | MiiRaGe/miilibrary | 0 | python | def get_subtitles(self, movie_hash, file_size):
'\n Get the list of subtitles associated to a file hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param str file_size: Size of the movie\n :return dict: Return the JSON containing information about different available subtitles\n '
self.key = {'movie_hash': movie_hash, 'file_size': file_size}
return self.get_or_sync('get_subtitles', movie_hash, file_size, ) | def get_subtitles(self, movie_hash, file_size):
'\n Get the list of subtitles associated to a file hash\n :param string movie_hash: String representing the special hash used by open subtitle\n :param str file_size: Size of the movie\n :return dict: Return the JSON containing information about different available subtitles\n '
self.key = {'movie_hash': movie_hash, 'file_size': file_size}
return self.get_or_sync('get_subtitles', movie_hash, file_size, )<|docstring|>Get the list of subtitles associated to a file hash
:param string movie_hash: String representing the special hash used by open subtitle
:param str file_size: Size of the movie
:return dict: Return the JSON containing information about different available subtitles<|endoftext|> |
2458934458a85b75b1112b5417dc61e2d0831dbd3b126aebb66892bb3ecde31e | def poly_fit(traj, traj_len, threshold):
'\n Input:\n - traj: Numpy array of shape (2, traj_len)\n - traj_len: Len of trajectory\n - threshold: Minimum error to be considered for non linear traj\n Output:\n - int: 1 -> Non Linear 0-> Linear\n '
t = np.linspace(0, (traj_len - 1), traj_len)
res_x = np.polyfit(t, traj[(0, (- traj_len):)], 2, full=True)[1]
res_y = np.polyfit(t, traj[(1, (- traj_len):)], 2, full=True)[1]
if ((res_x + res_y) >= threshold):
return 1.0
else:
return 0.0 | Input:
- traj: Numpy array of shape (2, traj_len)
- traj_len: Len of trajectory
- threshold: Minimum error to be considered for non linear traj
Output:
- int: 1 -> Non Linear 0-> Linear | utils.py | poly_fit | sidharthsinha/social-nce-stgcnn | 11 | python | def poly_fit(traj, traj_len, threshold):
'\n Input:\n - traj: Numpy array of shape (2, traj_len)\n - traj_len: Len of trajectory\n - threshold: Minimum error to be considered for non linear traj\n Output:\n - int: 1 -> Non Linear 0-> Linear\n '
t = np.linspace(0, (traj_len - 1), traj_len)
res_x = np.polyfit(t, traj[(0, (- traj_len):)], 2, full=True)[1]
res_y = np.polyfit(t, traj[(1, (- traj_len):)], 2, full=True)[1]
if ((res_x + res_y) >= threshold):
return 1.0
else:
return 0.0 | def poly_fit(traj, traj_len, threshold):
'\n Input:\n - traj: Numpy array of shape (2, traj_len)\n - traj_len: Len of trajectory\n - threshold: Minimum error to be considered for non linear traj\n Output:\n - int: 1 -> Non Linear 0-> Linear\n '
t = np.linspace(0, (traj_len - 1), traj_len)
res_x = np.polyfit(t, traj[(0, (- traj_len):)], 2, full=True)[1]
res_y = np.polyfit(t, traj[(1, (- traj_len):)], 2, full=True)[1]
if ((res_x + res_y) >= threshold):
return 1.0
else:
return 0.0<|docstring|>Input:
- traj: Numpy array of shape (2, traj_len)
- traj_len: Len of trajectory
- threshold: Minimum error to be considered for non linear traj
Output:
- int: 1 -> Non Linear 0-> Linear<|endoftext|> |
6e15ad26d081a3c519541cdbbe2bd751bb7cf9bc3d9d25d89f85769e3596e464 | def interpolate_traj(traj, num_interp=4):
'\n Add linearly interpolated points of a trajectory\n '
sz = traj.shape
dense = np.zeros((sz[0], (((sz[1] - 1) * (num_interp + 1)) + 1), 2))
dense[(:, :1, :)] = traj[(:, :1)]
for i in range((num_interp + 1)):
ratio = ((i + 1) / (num_interp + 1))
dense[(:, (i + 1)::(num_interp + 1), :)] = ((traj[(:, 0:(- 1))] * (1 - ratio)) + (traj[(:, 1:)] * ratio))
return dense | Add linearly interpolated points of a trajectory | utils.py | interpolate_traj | sidharthsinha/social-nce-stgcnn | 11 | python | def interpolate_traj(traj, num_interp=4):
'\n \n '
sz = traj.shape
dense = np.zeros((sz[0], (((sz[1] - 1) * (num_interp + 1)) + 1), 2))
dense[(:, :1, :)] = traj[(:, :1)]
for i in range((num_interp + 1)):
ratio = ((i + 1) / (num_interp + 1))
dense[(:, (i + 1)::(num_interp + 1), :)] = ((traj[(:, 0:(- 1))] * (1 - ratio)) + (traj[(:, 1:)] * ratio))
return dense | def interpolate_traj(traj, num_interp=4):
'\n \n '
sz = traj.shape
dense = np.zeros((sz[0], (((sz[1] - 1) * (num_interp + 1)) + 1), 2))
dense[(:, :1, :)] = traj[(:, :1)]
for i in range((num_interp + 1)):
ratio = ((i + 1) / (num_interp + 1))
dense[(:, (i + 1)::(num_interp + 1), :)] = ((traj[(:, 0:(- 1))] * (1 - ratio)) + (traj[(:, 1:)] * ratio))
return dense<|docstring|>Add linearly interpolated points of a trajectory<|endoftext|> |
5ab936af6e6aeaf7805b135f6b162b12f8ef6120b9a68ed15349abadebfd0991 | def compute_col(predicted_traj, predicted_trajs_all, thres=0.2):
'\n Input:\n predicted_trajs: predicted trajectory of the primary agents, [12, 2]\n predicted_trajs_all: predicted trajectory of all agents in the scene, [num_person, 12, 2]\n '
ph = predicted_traj.shape[0]
num_interp = 4
assert (predicted_trajs_all.shape[0] > 1)
dense_all = interpolate_traj(predicted_trajs_all, num_interp)
dense_ego = interpolate_traj(predicted_traj[(None, :)], num_interp)
distances = np.linalg.norm((dense_all - dense_ego), axis=(- 1))
mask = (distances[(:, 0)] > 0)
return (distances[mask].min(axis=0) < thres) | Input:
predicted_trajs: predicted trajectory of the primary agents, [12, 2]
predicted_trajs_all: predicted trajectory of all agents in the scene, [num_person, 12, 2] | utils.py | compute_col | sidharthsinha/social-nce-stgcnn | 11 | python | def compute_col(predicted_traj, predicted_trajs_all, thres=0.2):
'\n Input:\n predicted_trajs: predicted trajectory of the primary agents, [12, 2]\n predicted_trajs_all: predicted trajectory of all agents in the scene, [num_person, 12, 2]\n '
ph = predicted_traj.shape[0]
num_interp = 4
assert (predicted_trajs_all.shape[0] > 1)
dense_all = interpolate_traj(predicted_trajs_all, num_interp)
dense_ego = interpolate_traj(predicted_traj[(None, :)], num_interp)
distances = np.linalg.norm((dense_all - dense_ego), axis=(- 1))
mask = (distances[(:, 0)] > 0)
return (distances[mask].min(axis=0) < thres) | def compute_col(predicted_traj, predicted_trajs_all, thres=0.2):
'\n Input:\n predicted_trajs: predicted trajectory of the primary agents, [12, 2]\n predicted_trajs_all: predicted trajectory of all agents in the scene, [num_person, 12, 2]\n '
ph = predicted_traj.shape[0]
num_interp = 4
assert (predicted_trajs_all.shape[0] > 1)
dense_all = interpolate_traj(predicted_trajs_all, num_interp)
dense_ego = interpolate_traj(predicted_traj[(None, :)], num_interp)
distances = np.linalg.norm((dense_all - dense_ego), axis=(- 1))
mask = (distances[(:, 0)] > 0)
return (distances[mask].min(axis=0) < thres)<|docstring|>Input:
predicted_trajs: predicted trajectory of the primary agents, [12, 2]
predicted_trajs_all: predicted trajectory of all agents in the scene, [num_person, 12, 2]<|endoftext|> |
a654625a8a4f5871cc7ba5c737b280ab634632df3b5847fb46dcd147a4cfc339 | def __init__(self, data_dir, obs_len=8, pred_len=8, skip=1, threshold=0.002, min_ped=1, delim='\t', norm_lap_matr=True):
'\n Args:\n - data_dir: Directory containing dataset files in the format\n <frame_id> <ped_id> <x> <y>\n - obs_len: Number of time-steps in input trajectories\n - pred_len: Number of time-steps in output trajectories\n - skip: Number of frames to skip while making the dataset\n - threshold: Minimum error to be considered for non linear traj\n when using a linear predictor\n - min_ped: Minimum number of pedestrians that should be in a seqeunce\n - delim: Delimiter in the dataset files\n '
super(TrajectoryDataset, self).__init__()
self.max_peds_in_frame = 0
self.data_dir = data_dir
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = (self.obs_len + self.pred_len)
self.delim = delim
self.norm_lap_matr = norm_lap_matr
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, _path) for _path in all_files]
num_peds_in_seq = []
seq_list = []
seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
for path in all_files:
if ('graph_data.dat' in path):
continue
data = read_file(path, delim)
frames = np.unique(data[(:, 0)]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[((frame == data[(:, 0)]), :)])
num_sequences = int(math.ceil((((len(frames) - self.seq_len) + 1) / skip)))
for person_idx in range(0, ((num_sequences * self.skip) + 1), skip):
curr_seq_data = np.concatenate(frame_data[person_idx:(person_idx + self.seq_len)], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[(:, 1)])
self.max_peds_in_frame = max(self.max_peds_in_frame, len(peds_in_curr_seq))
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq), self.seq_len))
num_peds_considered = 0
_non_linear_ped = []
for (_, ped_id) in enumerate(peds_in_curr_seq):
curr_ped_seq = curr_seq_data[((curr_seq_data[(:, 1)] == ped_id), :)]
curr_ped_seq = np.around(curr_ped_seq, decimals=4)
pad_front = (frames.index(curr_ped_seq[(0, 0)]) - person_idx)
pad_end = ((frames.index(curr_ped_seq[((- 1), 0)]) - person_idx) + 1)
if ((pad_end - pad_front) != self.seq_len):
continue
curr_ped_seq = np.transpose(curr_ped_seq[(:, 2:)])
curr_ped_seq = curr_ped_seq
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
rel_curr_ped_seq[(:, 1:)] = (curr_ped_seq[(:, 1:)] - curr_ped_seq[(:, :(- 1))])
_idx = num_peds_considered
curr_seq[(_idx, :, pad_front:pad_end)] = curr_ped_seq
curr_seq_rel[(_idx, :, pad_front:pad_end)] = rel_curr_ped_seq
_non_linear_ped.append(poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[(_idx, pad_front:pad_end)] = 1
num_peds_considered += 1
if (num_peds_considered > min_ped):
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
self.obs_traj = torch.from_numpy(seq_list[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj = torch.from_numpy(seq_list[(:, :, self.obs_len:)]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(seq_list_rel[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(seq_list_rel[(:, :, self.obs_len:)]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = ([0] + np.cumsum(num_peds_in_seq).tolist())
self.seq_start_end = [(start, end) for (start, end) in zip(cum_start_idx, cum_start_idx[1:])]
graph_data_path = os.path.join(self.data_dir, 'graph_data.dat')
if (not os.path.exists(graph_data_path)):
self.v_obs = []
self.A_obs = []
self.v_pred = []
self.A_pred = []
print('Processing Data .....')
pbar = tqdm(total=len(self.seq_start_end))
for ss in range(len(self.seq_start_end)):
pbar.update(1)
(start, end) = self.seq_start_end[ss]
(v_, a_) = seq_to_graph(self.obs_traj[(start:end, :)], self.obs_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_obs.append(v_.clone())
self.A_obs.append(a_.clone())
(v_, a_) = seq_to_graph(self.pred_traj[(start:end, :)], self.pred_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_pred.append(v_.clone())
self.A_pred.append(a_.clone())
pbar.close()
graph_data = {'v_obs': self.v_obs, 'A_obs': self.A_obs, 'v_pred': self.v_pred, 'A_pred': self.A_pred}
torch.save(graph_data, graph_data_path)
else:
graph_data = torch.load(graph_data_path)
(self.v_obs, self.A_obs, self.v_pred, self.A_pred) = (graph_data['v_obs'], graph_data['A_obs'], graph_data['v_pred'], graph_data['A_pred'])
print('Loaded pre-processed graph data at {:s}.'.format(graph_data_path))
self.safe_traj_masks = []
for batch_idx in range(len(self.seq_start_end)):
(start, end) = self.seq_start_end[batch_idx]
pred_traj_gt = self.pred_traj[(start:end, :)]
num_person = pred_traj_gt.size(0)
safety_gt = torch.zeros(num_person).bool()
label_tarj_all = pred_traj_gt.permute(0, 2, 1).cpu().numpy()
for person_idx in range(num_person):
label_traj_primary = label_tarj_all[person_idx]
cur_traj_col_free = np.logical_not(compute_col(label_traj_primary, label_tarj_all).max())
safety_gt[person_idx] = (True if cur_traj_col_free else False)
self.safe_traj_masks.append(safety_gt) | Args:
- data_dir: Directory containing dataset files in the format
<frame_id> <ped_id> <x> <y>
- obs_len: Number of time-steps in input trajectories
- pred_len: Number of time-steps in output trajectories
- skip: Number of frames to skip while making the dataset
- threshold: Minimum error to be considered for non linear traj
when using a linear predictor
- min_ped: Minimum number of pedestrians that should be in a seqeunce
- delim: Delimiter in the dataset files | utils.py | __init__ | sidharthsinha/social-nce-stgcnn | 11 | python | def __init__(self, data_dir, obs_len=8, pred_len=8, skip=1, threshold=0.002, min_ped=1, delim='\t', norm_lap_matr=True):
'\n Args:\n - data_dir: Directory containing dataset files in the format\n <frame_id> <ped_id> <x> <y>\n - obs_len: Number of time-steps in input trajectories\n - pred_len: Number of time-steps in output trajectories\n - skip: Number of frames to skip while making the dataset\n - threshold: Minimum error to be considered for non linear traj\n when using a linear predictor\n - min_ped: Minimum number of pedestrians that should be in a seqeunce\n - delim: Delimiter in the dataset files\n '
super(TrajectoryDataset, self).__init__()
self.max_peds_in_frame = 0
self.data_dir = data_dir
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = (self.obs_len + self.pred_len)
self.delim = delim
self.norm_lap_matr = norm_lap_matr
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, _path) for _path in all_files]
num_peds_in_seq = []
seq_list = []
seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
for path in all_files:
if ('graph_data.dat' in path):
continue
data = read_file(path, delim)
frames = np.unique(data[(:, 0)]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[((frame == data[(:, 0)]), :)])
num_sequences = int(math.ceil((((len(frames) - self.seq_len) + 1) / skip)))
for person_idx in range(0, ((num_sequences * self.skip) + 1), skip):
curr_seq_data = np.concatenate(frame_data[person_idx:(person_idx + self.seq_len)], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[(:, 1)])
self.max_peds_in_frame = max(self.max_peds_in_frame, len(peds_in_curr_seq))
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq), self.seq_len))
num_peds_considered = 0
_non_linear_ped = []
for (_, ped_id) in enumerate(peds_in_curr_seq):
curr_ped_seq = curr_seq_data[((curr_seq_data[(:, 1)] == ped_id), :)]
curr_ped_seq = np.around(curr_ped_seq, decimals=4)
pad_front = (frames.index(curr_ped_seq[(0, 0)]) - person_idx)
pad_end = ((frames.index(curr_ped_seq[((- 1), 0)]) - person_idx) + 1)
if ((pad_end - pad_front) != self.seq_len):
continue
curr_ped_seq = np.transpose(curr_ped_seq[(:, 2:)])
curr_ped_seq = curr_ped_seq
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
rel_curr_ped_seq[(:, 1:)] = (curr_ped_seq[(:, 1:)] - curr_ped_seq[(:, :(- 1))])
_idx = num_peds_considered
curr_seq[(_idx, :, pad_front:pad_end)] = curr_ped_seq
curr_seq_rel[(_idx, :, pad_front:pad_end)] = rel_curr_ped_seq
_non_linear_ped.append(poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[(_idx, pad_front:pad_end)] = 1
num_peds_considered += 1
if (num_peds_considered > min_ped):
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
self.obs_traj = torch.from_numpy(seq_list[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj = torch.from_numpy(seq_list[(:, :, self.obs_len:)]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(seq_list_rel[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(seq_list_rel[(:, :, self.obs_len:)]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = ([0] + np.cumsum(num_peds_in_seq).tolist())
self.seq_start_end = [(start, end) for (start, end) in zip(cum_start_idx, cum_start_idx[1:])]
graph_data_path = os.path.join(self.data_dir, 'graph_data.dat')
if (not os.path.exists(graph_data_path)):
self.v_obs = []
self.A_obs = []
self.v_pred = []
self.A_pred = []
print('Processing Data .....')
pbar = tqdm(total=len(self.seq_start_end))
for ss in range(len(self.seq_start_end)):
pbar.update(1)
(start, end) = self.seq_start_end[ss]
(v_, a_) = seq_to_graph(self.obs_traj[(start:end, :)], self.obs_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_obs.append(v_.clone())
self.A_obs.append(a_.clone())
(v_, a_) = seq_to_graph(self.pred_traj[(start:end, :)], self.pred_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_pred.append(v_.clone())
self.A_pred.append(a_.clone())
pbar.close()
graph_data = {'v_obs': self.v_obs, 'A_obs': self.A_obs, 'v_pred': self.v_pred, 'A_pred': self.A_pred}
torch.save(graph_data, graph_data_path)
else:
graph_data = torch.load(graph_data_path)
(self.v_obs, self.A_obs, self.v_pred, self.A_pred) = (graph_data['v_obs'], graph_data['A_obs'], graph_data['v_pred'], graph_data['A_pred'])
print('Loaded pre-processed graph data at {:s}.'.format(graph_data_path))
self.safe_traj_masks = []
for batch_idx in range(len(self.seq_start_end)):
(start, end) = self.seq_start_end[batch_idx]
pred_traj_gt = self.pred_traj[(start:end, :)]
num_person = pred_traj_gt.size(0)
safety_gt = torch.zeros(num_person).bool()
label_tarj_all = pred_traj_gt.permute(0, 2, 1).cpu().numpy()
for person_idx in range(num_person):
label_traj_primary = label_tarj_all[person_idx]
cur_traj_col_free = np.logical_not(compute_col(label_traj_primary, label_tarj_all).max())
safety_gt[person_idx] = (True if cur_traj_col_free else False)
self.safe_traj_masks.append(safety_gt) | def __init__(self, data_dir, obs_len=8, pred_len=8, skip=1, threshold=0.002, min_ped=1, delim='\t', norm_lap_matr=True):
'\n Args:\n - data_dir: Directory containing dataset files in the format\n <frame_id> <ped_id> <x> <y>\n - obs_len: Number of time-steps in input trajectories\n - pred_len: Number of time-steps in output trajectories\n - skip: Number of frames to skip while making the dataset\n - threshold: Minimum error to be considered for non linear traj\n when using a linear predictor\n - min_ped: Minimum number of pedestrians that should be in a seqeunce\n - delim: Delimiter in the dataset files\n '
super(TrajectoryDataset, self).__init__()
self.max_peds_in_frame = 0
self.data_dir = data_dir
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = (self.obs_len + self.pred_len)
self.delim = delim
self.norm_lap_matr = norm_lap_matr
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, _path) for _path in all_files]
num_peds_in_seq = []
seq_list = []
seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
for path in all_files:
if ('graph_data.dat' in path):
continue
data = read_file(path, delim)
frames = np.unique(data[(:, 0)]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[((frame == data[(:, 0)]), :)])
num_sequences = int(math.ceil((((len(frames) - self.seq_len) + 1) / skip)))
for person_idx in range(0, ((num_sequences * self.skip) + 1), skip):
curr_seq_data = np.concatenate(frame_data[person_idx:(person_idx + self.seq_len)], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[(:, 1)])
self.max_peds_in_frame = max(self.max_peds_in_frame, len(peds_in_curr_seq))
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq), self.seq_len))
num_peds_considered = 0
_non_linear_ped = []
for (_, ped_id) in enumerate(peds_in_curr_seq):
curr_ped_seq = curr_seq_data[((curr_seq_data[(:, 1)] == ped_id), :)]
curr_ped_seq = np.around(curr_ped_seq, decimals=4)
pad_front = (frames.index(curr_ped_seq[(0, 0)]) - person_idx)
pad_end = ((frames.index(curr_ped_seq[((- 1), 0)]) - person_idx) + 1)
if ((pad_end - pad_front) != self.seq_len):
continue
curr_ped_seq = np.transpose(curr_ped_seq[(:, 2:)])
curr_ped_seq = curr_ped_seq
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
rel_curr_ped_seq[(:, 1:)] = (curr_ped_seq[(:, 1:)] - curr_ped_seq[(:, :(- 1))])
_idx = num_peds_considered
curr_seq[(_idx, :, pad_front:pad_end)] = curr_ped_seq
curr_seq_rel[(_idx, :, pad_front:pad_end)] = rel_curr_ped_seq
_non_linear_ped.append(poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[(_idx, pad_front:pad_end)] = 1
num_peds_considered += 1
if (num_peds_considered > min_ped):
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
self.obs_traj = torch.from_numpy(seq_list[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj = torch.from_numpy(seq_list[(:, :, self.obs_len:)]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(seq_list_rel[(:, :, :self.obs_len)]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(seq_list_rel[(:, :, self.obs_len:)]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = ([0] + np.cumsum(num_peds_in_seq).tolist())
self.seq_start_end = [(start, end) for (start, end) in zip(cum_start_idx, cum_start_idx[1:])]
graph_data_path = os.path.join(self.data_dir, 'graph_data.dat')
if (not os.path.exists(graph_data_path)):
self.v_obs = []
self.A_obs = []
self.v_pred = []
self.A_pred = []
print('Processing Data .....')
pbar = tqdm(total=len(self.seq_start_end))
for ss in range(len(self.seq_start_end)):
pbar.update(1)
(start, end) = self.seq_start_end[ss]
(v_, a_) = seq_to_graph(self.obs_traj[(start:end, :)], self.obs_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_obs.append(v_.clone())
self.A_obs.append(a_.clone())
(v_, a_) = seq_to_graph(self.pred_traj[(start:end, :)], self.pred_traj_rel[(start:end, :)], self.norm_lap_matr)
self.v_pred.append(v_.clone())
self.A_pred.append(a_.clone())
pbar.close()
graph_data = {'v_obs': self.v_obs, 'A_obs': self.A_obs, 'v_pred': self.v_pred, 'A_pred': self.A_pred}
torch.save(graph_data, graph_data_path)
else:
graph_data = torch.load(graph_data_path)
(self.v_obs, self.A_obs, self.v_pred, self.A_pred) = (graph_data['v_obs'], graph_data['A_obs'], graph_data['v_pred'], graph_data['A_pred'])
print('Loaded pre-processed graph data at {:s}.'.format(graph_data_path))
self.safe_traj_masks = []
for batch_idx in range(len(self.seq_start_end)):
(start, end) = self.seq_start_end[batch_idx]
pred_traj_gt = self.pred_traj[(start:end, :)]
num_person = pred_traj_gt.size(0)
safety_gt = torch.zeros(num_person).bool()
label_tarj_all = pred_traj_gt.permute(0, 2, 1).cpu().numpy()
for person_idx in range(num_person):
label_traj_primary = label_tarj_all[person_idx]
cur_traj_col_free = np.logical_not(compute_col(label_traj_primary, label_tarj_all).max())
safety_gt[person_idx] = (True if cur_traj_col_free else False)
self.safe_traj_masks.append(safety_gt)<|docstring|>Args:
- data_dir: Directory containing dataset files in the format
<frame_id> <ped_id> <x> <y>
- obs_len: Number of time-steps in input trajectories
- pred_len: Number of time-steps in output trajectories
- skip: Number of frames to skip while making the dataset
- threshold: Minimum error to be considered for non linear traj
when using a linear predictor
- min_ped: Minimum number of pedestrians that should be in a seqeunce
- delim: Delimiter in the dataset files<|endoftext|> |
225bd748e84680841def5c78be593fe6eff0edb9964ee39af0ee4c899cca326b | def get_resolved(doc, clusters):
' Return a list of utterrances text where the coref are resolved to the most representative mention'
resolved = list((tok.text_with_ws for tok in doc))
sent_subjs = [t for t in doc if (t.dep_ == 'nsubj')]
for cluster in clusters:
for coref in cluster:
if ((coref != cluster.main) and any([(tok in sent_subjs) for tok in coref])):
resolved[coref.start] = (cluster.main.text + doc[(coref.end - 1)].whitespace_)
for i in range((coref.start + 1), coref.end):
resolved[i] = ''
return ''.join(resolved) | Return a list of utterrances text where the coref are resolved to the most representative mention | nlp_helpers/features.py | get_resolved | 4398TempleSpring2020/cscapstoneproject-infinitetrivia | 1 | python | def get_resolved(doc, clusters):
' '
resolved = list((tok.text_with_ws for tok in doc))
sent_subjs = [t for t in doc if (t.dep_ == 'nsubj')]
for cluster in clusters:
for coref in cluster:
if ((coref != cluster.main) and any([(tok in sent_subjs) for tok in coref])):
resolved[coref.start] = (cluster.main.text + doc[(coref.end - 1)].whitespace_)
for i in range((coref.start + 1), coref.end):
resolved[i] =
return .join(resolved) | def get_resolved(doc, clusters):
' '
resolved = list((tok.text_with_ws for tok in doc))
sent_subjs = [t for t in doc if (t.dep_ == 'nsubj')]
for cluster in clusters:
for coref in cluster:
if ((coref != cluster.main) and any([(tok in sent_subjs) for tok in coref])):
resolved[coref.start] = (cluster.main.text + doc[(coref.end - 1)].whitespace_)
for i in range((coref.start + 1), coref.end):
resolved[i] =
return .join(resolved)<|docstring|>Return a list of utterrances text where the coref are resolved to the most representative mention<|endoftext|> |
ffa475e3fc7cf281984087ebb90d9e53239ae6a4202fb30267efe3eb2a357e83 | def _ping(self) -> bool:
'Test if the device is listening.'
assert (self.socket is not None)
resp = None
try:
self.socket.sendall(b'PINGPING')
resp = self.socket.recv(8)
except Exception:
pass
return (resp == b'PONGPONG') | Test if the device is listening. | hwilib/devices/trezorlib/transport/udp.py | _ping | cjackie/HWI | 285 | python | def _ping(self) -> bool:
assert (self.socket is not None)
resp = None
try:
self.socket.sendall(b'PINGPING')
resp = self.socket.recv(8)
except Exception:
pass
return (resp == b'PONGPONG') | def _ping(self) -> bool:
assert (self.socket is not None)
resp = None
try:
self.socket.sendall(b'PINGPING')
resp = self.socket.recv(8)
except Exception:
pass
return (resp == b'PONGPONG')<|docstring|>Test if the device is listening.<|endoftext|> |
038c28f817768be8530ea2b8db3b781237cae61eff82ae1b5ba960cce0c06f30 | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted with the ``cliquet migrate`` command.\n '
raise NotImplementedError | Create every necessary objects (like tables or indices) in the
backend.
This is excuted with the ``cliquet migrate`` command. | cliquet/permission/__init__.py | initialize_schema | ravitejavalluri/cliquet | 89 | python | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted with the ``cliquet migrate`` command.\n '
raise NotImplementedError | def initialize_schema(self):
'Create every necessary objects (like tables or indices) in the\n backend.\n\n This is excuted with the ``cliquet migrate`` command.\n '
raise NotImplementedError<|docstring|>Create every necessary objects (like tables or indices) in the
backend.
This is excuted with the ``cliquet migrate`` command.<|endoftext|> |
5028373aee8cccfe01c78eafd84566fd7e574c31650d7656dc33ff79bb428b08 | def flush(self):
'Delete all data stored in the permission backend.'
raise NotImplementedError | Delete all data stored in the permission backend. | cliquet/permission/__init__.py | flush | ravitejavalluri/cliquet | 89 | python | def flush(self):
raise NotImplementedError | def flush(self):
raise NotImplementedError<|docstring|>Delete all data stored in the permission backend.<|endoftext|> |
d6d8afc691dcbb6b39be2130df09610ec44b5fd832a2dd57b524cac5095ebd13 | def add_user_principal(self, user_id, principal):
'Add an additional principal to a user.\n\n :param str user_id: The user_id to add the principal to.\n :param str principal: The principal to add.\n '
raise NotImplementedError | Add an additional principal to a user.
:param str user_id: The user_id to add the principal to.
:param str principal: The principal to add. | cliquet/permission/__init__.py | add_user_principal | ravitejavalluri/cliquet | 89 | python | def add_user_principal(self, user_id, principal):
'Add an additional principal to a user.\n\n :param str user_id: The user_id to add the principal to.\n :param str principal: The principal to add.\n '
raise NotImplementedError | def add_user_principal(self, user_id, principal):
'Add an additional principal to a user.\n\n :param str user_id: The user_id to add the principal to.\n :param str principal: The principal to add.\n '
raise NotImplementedError<|docstring|>Add an additional principal to a user.
:param str user_id: The user_id to add the principal to.
:param str principal: The principal to add.<|endoftext|> |
5352825f04c0e48cbd7186de92077305f99e70c039e56ed00cc2cba44538523d | def remove_user_principal(self, user_id, principal):
'Remove an additional principal from a user.\n\n :param str user_id: The user_id to remove the principal to.\n :param str principal: The principal to remove.\n '
raise NotImplementedError | Remove an additional principal from a user.
:param str user_id: The user_id to remove the principal to.
:param str principal: The principal to remove. | cliquet/permission/__init__.py | remove_user_principal | ravitejavalluri/cliquet | 89 | python | def remove_user_principal(self, user_id, principal):
'Remove an additional principal from a user.\n\n :param str user_id: The user_id to remove the principal to.\n :param str principal: The principal to remove.\n '
raise NotImplementedError | def remove_user_principal(self, user_id, principal):
'Remove an additional principal from a user.\n\n :param str user_id: The user_id to remove the principal to.\n :param str principal: The principal to remove.\n '
raise NotImplementedError<|docstring|>Remove an additional principal from a user.
:param str user_id: The user_id to remove the principal to.
:param str principal: The principal to remove.<|endoftext|> |
131b5088f863baf4f32b4825a57b65677ff5175f4c49698b0a769cc42c3b605a | def remove_principal(self, principal):
'Remove a principal from every user.\n\n :param str principal: The principal to remove.\n '
raise NotImplementedError | Remove a principal from every user.
:param str principal: The principal to remove. | cliquet/permission/__init__.py | remove_principal | ravitejavalluri/cliquet | 89 | python | def remove_principal(self, principal):
'Remove a principal from every user.\n\n :param str principal: The principal to remove.\n '
raise NotImplementedError | def remove_principal(self, principal):
'Remove a principal from every user.\n\n :param str principal: The principal to remove.\n '
raise NotImplementedError<|docstring|>Remove a principal from every user.
:param str principal: The principal to remove.<|endoftext|> |
76fc7403b9175a2080a78007853379833790ecc72154cfec00bfb6e9d8b882be | def user_principals(self, user_id):
'Return the set of additionnal principals given to a user.\n\n :param str user_id: The user_id to get the list of groups for.\n :returns: The list of group principals the user is in.\n :rtype: set\n\n '
raise NotImplementedError | Return the set of additionnal principals given to a user.
:param str user_id: The user_id to get the list of groups for.
:returns: The list of group principals the user is in.
:rtype: set | cliquet/permission/__init__.py | user_principals | ravitejavalluri/cliquet | 89 | python | def user_principals(self, user_id):
'Return the set of additionnal principals given to a user.\n\n :param str user_id: The user_id to get the list of groups for.\n :returns: The list of group principals the user is in.\n :rtype: set\n\n '
raise NotImplementedError | def user_principals(self, user_id):
'Return the set of additionnal principals given to a user.\n\n :param str user_id: The user_id to get the list of groups for.\n :returns: The list of group principals the user is in.\n :rtype: set\n\n '
raise NotImplementedError<|docstring|>Return the set of additionnal principals given to a user.
:param str user_id: The user_id to get the list of groups for.
:returns: The list of group principals the user is in.
:rtype: set<|endoftext|> |
60a318d5ea95eef382acdf8a1c8c23f49e23445487023b630aba0aca2988c122 | def add_principal_to_ace(self, object_id, permission, principal):
'Add a principal to an Access Control Entry.\n\n :param str object_id: The object to add the permission principal to.\n :param str permission: The permission to add the principal to.\n :param str principal: The principal to add to the ACE.\n '
raise NotImplementedError | Add a principal to an Access Control Entry.
:param str object_id: The object to add the permission principal to.
:param str permission: The permission to add the principal to.
:param str principal: The principal to add to the ACE. | cliquet/permission/__init__.py | add_principal_to_ace | ravitejavalluri/cliquet | 89 | python | def add_principal_to_ace(self, object_id, permission, principal):
'Add a principal to an Access Control Entry.\n\n :param str object_id: The object to add the permission principal to.\n :param str permission: The permission to add the principal to.\n :param str principal: The principal to add to the ACE.\n '
raise NotImplementedError | def add_principal_to_ace(self, object_id, permission, principal):
'Add a principal to an Access Control Entry.\n\n :param str object_id: The object to add the permission principal to.\n :param str permission: The permission to add the principal to.\n :param str principal: The principal to add to the ACE.\n '
raise NotImplementedError<|docstring|>Add a principal to an Access Control Entry.
:param str object_id: The object to add the permission principal to.
:param str permission: The permission to add the principal to.
:param str principal: The principal to add to the ACE.<|endoftext|> |
7430d67e2bd5ee061bb3a5f4e929552e469fa536140ad3d6ca88c4d84aa2e02a | def remove_principal_from_ace(self, object_id, permission, principal):
'Remove a principal to an Access Control Entry.\n\n :param str object_id: The object to remove the permission principal to.\n :param str permission: The permission that should be removed.\n :param str principal: The principal to remove to the ACE.\n '
raise NotImplementedError | Remove a principal to an Access Control Entry.
:param str object_id: The object to remove the permission principal to.
:param str permission: The permission that should be removed.
:param str principal: The principal to remove to the ACE. | cliquet/permission/__init__.py | remove_principal_from_ace | ravitejavalluri/cliquet | 89 | python | def remove_principal_from_ace(self, object_id, permission, principal):
'Remove a principal to an Access Control Entry.\n\n :param str object_id: The object to remove the permission principal to.\n :param str permission: The permission that should be removed.\n :param str principal: The principal to remove to the ACE.\n '
raise NotImplementedError | def remove_principal_from_ace(self, object_id, permission, principal):
'Remove a principal to an Access Control Entry.\n\n :param str object_id: The object to remove the permission principal to.\n :param str permission: The permission that should be removed.\n :param str principal: The principal to remove to the ACE.\n '
raise NotImplementedError<|docstring|>Remove a principal to an Access Control Entry.
:param str object_id: The object to remove the permission principal to.
:param str permission: The permission that should be removed.
:param str principal: The principal to remove to the ACE.<|endoftext|> |
00d1e077731ae0b634afe9fadda85b2746c1617e3d53144cc165693a9f6ae83e | def object_permission_principals(self, object_id, permission):
'Return the set of principals of a bound permission\n (unbound permission + object id).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError | Return the set of principals of a bound permission
(unbound permission + object id).
:param str object_id: The object_id the permission is set to.
:param str permission: The permission to query.
:returns: The list of user principals
:rtype: set | cliquet/permission/__init__.py | object_permission_principals | ravitejavalluri/cliquet | 89 | python | def object_permission_principals(self, object_id, permission):
'Return the set of principals of a bound permission\n (unbound permission + object id).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError | def object_permission_principals(self, object_id, permission):
'Return the set of principals of a bound permission\n (unbound permission + object id).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError<|docstring|>Return the set of principals of a bound permission
(unbound permission + object id).
:param str object_id: The object_id the permission is set to.
:param str permission: The permission to query.
:returns: The list of user principals
:rtype: set<|endoftext|> |
5433b63cc02d3cb236b637b0e0b725fca7b4bdbe2a067dfff5a8833f3ea34b5e | def principals_accessible_objects(self, principals, permission, object_id_match=None, get_bound_permissions=None):
"Return the list of objects id where the specified `principals`\n have the specified `permission`.\n\n :param list principal: List of user principals\n :param str permission: The permission to query.\n :param str object_id_match: Filter object ids based on a pattern\n (e.g. ``'*articles*'``).\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n :returns: The list of object ids\n :rtype: set\n\n "
raise NotImplementedError | Return the list of objects id where the specified `principals`
have the specified `permission`.
:param list principal: List of user principals
:param str permission: The permission to query.
:param str object_id_match: Filter object ids based on a pattern
(e.g. ``'*articles*'``).
:param function get_bound_permissions:
The methods to call in order to generate the list of permission to
verify against. (ie: if you can write, you can read)
:returns: The list of object ids
:rtype: set | cliquet/permission/__init__.py | principals_accessible_objects | ravitejavalluri/cliquet | 89 | python | def principals_accessible_objects(self, principals, permission, object_id_match=None, get_bound_permissions=None):
"Return the list of objects id where the specified `principals`\n have the specified `permission`.\n\n :param list principal: List of user principals\n :param str permission: The permission to query.\n :param str object_id_match: Filter object ids based on a pattern\n (e.g. ``'*articles*'``).\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n :returns: The list of object ids\n :rtype: set\n\n "
raise NotImplementedError | def principals_accessible_objects(self, principals, permission, object_id_match=None, get_bound_permissions=None):
"Return the list of objects id where the specified `principals`\n have the specified `permission`.\n\n :param list principal: List of user principals\n :param str permission: The permission to query.\n :param str object_id_match: Filter object ids based on a pattern\n (e.g. ``'*articles*'``).\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n :returns: The list of object ids\n :rtype: set\n\n "
raise NotImplementedError<|docstring|>Return the list of objects id where the specified `principals`
have the specified `permission`.
:param list principal: List of user principals
:param str permission: The permission to query.
:param str object_id_match: Filter object ids based on a pattern
(e.g. ``'*articles*'``).
:param function get_bound_permissions:
The methods to call in order to generate the list of permission to
verify against. (ie: if you can write, you can read)
:returns: The list of object ids
:rtype: set<|endoftext|> |
982cdd5bae47b3f2197f9a128179763d9b21bc7966d025372b35d287fbe7224f | def object_permission_authorized_principals(self, object_id, permission, get_bound_permissions=None):
'Return the full set of authorized principals for a given\n permission + object (bound permission).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError | Return the full set of authorized principals for a given
permission + object (bound permission).
:param str object_id: The object_id the permission is set to.
:param str permission: The permission to query.
:param function get_bound_permissions:
The methods to call in order to generate the list of permission to
verify against. (ie: if you can write, you can read)
:returns: The list of user principals
:rtype: set | cliquet/permission/__init__.py | object_permission_authorized_principals | ravitejavalluri/cliquet | 89 | python | def object_permission_authorized_principals(self, object_id, permission, get_bound_permissions=None):
'Return the full set of authorized principals for a given\n permission + object (bound permission).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError | def object_permission_authorized_principals(self, object_id, permission, get_bound_permissions=None):
'Return the full set of authorized principals for a given\n permission + object (bound permission).\n\n :param str object_id: The object_id the permission is set to.\n :param str permission: The permission to query.\n :param function get_bound_permissions:\n The methods to call in order to generate the list of permission to\n verify against. (ie: if you can write, you can read)\n\n :returns: The list of user principals\n :rtype: set\n\n '
raise NotImplementedError<|docstring|>Return the full set of authorized principals for a given
permission + object (bound permission).
:param str object_id: The object_id the permission is set to.
:param str permission: The permission to query.
:param function get_bound_permissions:
The methods to call in order to generate the list of permission to
verify against. (ie: if you can write, you can read)
:returns: The list of user principals
:rtype: set<|endoftext|> |
6125c881e699601e9f4d387ed95d7cc847b24e73f974055e9f89412969c78e2a | def check_permission(self, object_id, permission, principals, get_bound_permissions=None):
'Test if a principal set have got a permission on an object.\n\n :param str object_id:\n The identifier of the object concerned by the permission.\n :param str permission: The permission to test.\n :param set principals:\n A set of user principals to test the permission against.\n :param function get_bound_permissions:\n The method to call in order to generate the set of\n permission to verify against. (ie: if you can write, you can read)\n\n '
principals = set(principals)
authorized_principals = self.object_permission_authorized_principals(object_id, permission, get_bound_permissions)
return (len((authorized_principals & principals)) > 0) | Test if a principal set have got a permission on an object.
:param str object_id:
The identifier of the object concerned by the permission.
:param str permission: The permission to test.
:param set principals:
A set of user principals to test the permission against.
:param function get_bound_permissions:
The method to call in order to generate the set of
permission to verify against. (ie: if you can write, you can read) | cliquet/permission/__init__.py | check_permission | ravitejavalluri/cliquet | 89 | python | def check_permission(self, object_id, permission, principals, get_bound_permissions=None):
'Test if a principal set have got a permission on an object.\n\n :param str object_id:\n The identifier of the object concerned by the permission.\n :param str permission: The permission to test.\n :param set principals:\n A set of user principals to test the permission against.\n :param function get_bound_permissions:\n The method to call in order to generate the set of\n permission to verify against. (ie: if you can write, you can read)\n\n '
principals = set(principals)
authorized_principals = self.object_permission_authorized_principals(object_id, permission, get_bound_permissions)
return (len((authorized_principals & principals)) > 0) | def check_permission(self, object_id, permission, principals, get_bound_permissions=None):
'Test if a principal set have got a permission on an object.\n\n :param str object_id:\n The identifier of the object concerned by the permission.\n :param str permission: The permission to test.\n :param set principals:\n A set of user principals to test the permission against.\n :param function get_bound_permissions:\n The method to call in order to generate the set of\n permission to verify against. (ie: if you can write, you can read)\n\n '
principals = set(principals)
authorized_principals = self.object_permission_authorized_principals(object_id, permission, get_bound_permissions)
return (len((authorized_principals & principals)) > 0)<|docstring|>Test if a principal set have got a permission on an object.
:param str object_id:
The identifier of the object concerned by the permission.
:param str permission: The permission to test.
:param set principals:
A set of user principals to test the permission against.
:param function get_bound_permissions:
The method to call in order to generate the set of
permission to verify against. (ie: if you can write, you can read)<|endoftext|> |
8bd3224c58fc6cf541f0b18652e459d84bb0ea8e250e88f32f94129360ca1198 | def object_permissions(self, object_id, permissions=None):
'Return the set of principals for each object permission.\n\n :param str object_id: The object_id the permission is set to.\n :param list permissions: List of permissions to retrieve.\n If not define will try to find them all.\n :returns: The dictionnary with the list of user principals for\n each object permissions\n :rtype: dict\n\n '
raise NotImplementedError | Return the set of principals for each object permission.
:param str object_id: The object_id the permission is set to.
:param list permissions: List of permissions to retrieve.
If not define will try to find them all.
:returns: The dictionnary with the list of user principals for
each object permissions
:rtype: dict | cliquet/permission/__init__.py | object_permissions | ravitejavalluri/cliquet | 89 | python | def object_permissions(self, object_id, permissions=None):
'Return the set of principals for each object permission.\n\n :param str object_id: The object_id the permission is set to.\n :param list permissions: List of permissions to retrieve.\n If not define will try to find them all.\n :returns: The dictionnary with the list of user principals for\n each object permissions\n :rtype: dict\n\n '
raise NotImplementedError | def object_permissions(self, object_id, permissions=None):
'Return the set of principals for each object permission.\n\n :param str object_id: The object_id the permission is set to.\n :param list permissions: List of permissions to retrieve.\n If not define will try to find them all.\n :returns: The dictionnary with the list of user principals for\n each object permissions\n :rtype: dict\n\n '
raise NotImplementedError<|docstring|>Return the set of principals for each object permission.
:param str object_id: The object_id the permission is set to.
:param list permissions: List of permissions to retrieve.
If not define will try to find them all.
:returns: The dictionnary with the list of user principals for
each object permissions
:rtype: dict<|endoftext|> |
a0a78a1670739f125950b1d02c36cf93e26246396d972eaa6723f2b8d26f8871 | def replace_object_permissions(self, object_id, permissions):
'Replace given object permissions.\n\n :param str object_id: The object to replace permissions to.\n :param str permissions: The permissions dict to replace.\n '
raise NotImplementedError | Replace given object permissions.
:param str object_id: The object to replace permissions to.
:param str permissions: The permissions dict to replace. | cliquet/permission/__init__.py | replace_object_permissions | ravitejavalluri/cliquet | 89 | python | def replace_object_permissions(self, object_id, permissions):
'Replace given object permissions.\n\n :param str object_id: The object to replace permissions to.\n :param str permissions: The permissions dict to replace.\n '
raise NotImplementedError | def replace_object_permissions(self, object_id, permissions):
'Replace given object permissions.\n\n :param str object_id: The object to replace permissions to.\n :param str permissions: The permissions dict to replace.\n '
raise NotImplementedError<|docstring|>Replace given object permissions.
:param str object_id: The object to replace permissions to.
:param str permissions: The permissions dict to replace.<|endoftext|> |
078bf956a12085a739f6c815623d6277924efc4607dd6c23e766fe254a750274 | def delete_object_permissions(self, *object_id_list):
'Delete all listed object permissions.\n\n :param str object_id: Remove given objects permissions.\n '
raise NotImplementedError | Delete all listed object permissions.
:param str object_id: Remove given objects permissions. | cliquet/permission/__init__.py | delete_object_permissions | ravitejavalluri/cliquet | 89 | python | def delete_object_permissions(self, *object_id_list):
'Delete all listed object permissions.\n\n :param str object_id: Remove given objects permissions.\n '
raise NotImplementedError | def delete_object_permissions(self, *object_id_list):
'Delete all listed object permissions.\n\n :param str object_id: Remove given objects permissions.\n '
raise NotImplementedError<|docstring|>Delete all listed object permissions.
:param str object_id: Remove given objects permissions.<|endoftext|> |
b786eed27901e5d55ea12d86cf2464e7b1adb8059dfe9c11666bdb6934290870 | def ping(request):
'Test the permission backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if asbool(request.registry.settings.get('readonly')):
backend.user_principals(__HEARTBEAT_KEY__)
else:
backend.add_user_principal(__HEARTBEAT_KEY__, 'alive')
backend.remove_user_principal(__HEARTBEAT_KEY__, 'alive')
except:
logger.exception('Heartbeat Error')
return False
return True | Test the permission backend is operationnal.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool | cliquet/permission/__init__.py | ping | ravitejavalluri/cliquet | 89 | python | def ping(request):
'Test the permission backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if asbool(request.registry.settings.get('readonly')):
backend.user_principals(__HEARTBEAT_KEY__)
else:
backend.add_user_principal(__HEARTBEAT_KEY__, 'alive')
backend.remove_user_principal(__HEARTBEAT_KEY__, 'alive')
except:
logger.exception('Heartbeat Error')
return False
return True | def ping(request):
'Test the permission backend is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n '
try:
if asbool(request.registry.settings.get('readonly')):
backend.user_principals(__HEARTBEAT_KEY__)
else:
backend.add_user_principal(__HEARTBEAT_KEY__, 'alive')
backend.remove_user_principal(__HEARTBEAT_KEY__, 'alive')
except:
logger.exception('Heartbeat Error')
return False
return True<|docstring|>Test the permission backend is operationnal.
:param request: current request object
:type request: :class:`~pyramid:pyramid.request.Request`
:returns: ``True`` is everything is ok, ``False`` otherwise.
:rtype: bool<|endoftext|> |
91429b4c2524e055346559c93a400d8827cc0752bb548a99d889a996158e811b | def load_mock_data(apps, schema_editor):
'\n Fixtures will be removed on django 1.9.\n Using data migrations instead as suggested\n in the documentation.\n '
fixture_file = os.path.join(fixture_dir, fixture_filename)
with open(fixture_file, 'rb') as fixture:
objects = serializers.deserialize('json', fixture, ignorenonexistent=True)
for obj in objects:
obj.save() | Fixtures will be removed on django 1.9.
Using data migrations instead as suggested
in the documentation. | demo/example/foo/migrations/0002_auto_20151110_1101.py | load_mock_data | swappsco/django-plans | 13 | python | def load_mock_data(apps, schema_editor):
'\n Fixtures will be removed on django 1.9.\n Using data migrations instead as suggested\n in the documentation.\n '
fixture_file = os.path.join(fixture_dir, fixture_filename)
with open(fixture_file, 'rb') as fixture:
objects = serializers.deserialize('json', fixture, ignorenonexistent=True)
for obj in objects:
obj.save() | def load_mock_data(apps, schema_editor):
'\n Fixtures will be removed on django 1.9.\n Using data migrations instead as suggested\n in the documentation.\n '
fixture_file = os.path.join(fixture_dir, fixture_filename)
with open(fixture_file, 'rb') as fixture:
objects = serializers.deserialize('json', fixture, ignorenonexistent=True)
for obj in objects:
obj.save()<|docstring|>Fixtures will be removed on django 1.9.
Using data migrations instead as suggested
in the documentation.<|endoftext|> |
27f10be5ff60f830e977f0cac81f84c2a78c63b069c1613aac3aa5d148122958 | async def red_delete_data_for_user(self, **kwargs):
' Nothing to delete '
return | Nothing to delete | pnw/pnw.py | red_delete_data_for_user | ltzmax/kennnyshiwa-cogs | 21 | python | async def red_delete_data_for_user(self, **kwargs):
' '
return | async def red_delete_data_for_user(self, **kwargs):
' '
return<|docstring|>Nothing to delete<|endoftext|> |
fe31776cec119942f9673641748a4522890bb48e42c77e153251f2654ae19808 | async def initialize(self) -> None:
'\n Move the API keys from cog stored config to core bot config if they exist.\n '
pnw_key = (await self.config.pnw_key())
if hasattr(self.bot, 'get_shared_api_tokens'):
if ((pnw_key is not None) and ('pnw' not in (await self.bot.get_shared_api_tokens()))):
(await self.bot.set.shared_api_tokens('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear())
elif ((pnw_key is not None) and ('pnw' not in (await self.bot.db.api_tokens()))):
(await self.bot.db.api_tokens.set_raw('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear()) | Move the API keys from cog stored config to core bot config if they exist. | pnw/pnw.py | initialize | ltzmax/kennnyshiwa-cogs | 21 | python | async def initialize(self) -> None:
'\n \n '
pnw_key = (await self.config.pnw_key())
if hasattr(self.bot, 'get_shared_api_tokens'):
if ((pnw_key is not None) and ('pnw' not in (await self.bot.get_shared_api_tokens()))):
(await self.bot.set.shared_api_tokens('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear())
elif ((pnw_key is not None) and ('pnw' not in (await self.bot.db.api_tokens()))):
(await self.bot.db.api_tokens.set_raw('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear()) | async def initialize(self) -> None:
'\n \n '
pnw_key = (await self.config.pnw_key())
if hasattr(self.bot, 'get_shared_api_tokens'):
if ((pnw_key is not None) and ('pnw' not in (await self.bot.get_shared_api_tokens()))):
(await self.bot.set.shared_api_tokens('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear())
elif ((pnw_key is not None) and ('pnw' not in (await self.bot.db.api_tokens()))):
(await self.bot.db.api_tokens.set_raw('pnw', value={'api_key': pnw_key}))
(await self.config.pnw_key.clear())<|docstring|>Move the API keys from cog stored config to core bot config if they exist.<|endoftext|> |
78416584a6b15958fb2403835923e8d0e9fa38403755f9e57240eed431d787f0 | def escape_query(self, query) -> str:
'Escape mentions from queries'
return query.replace('`', "'") | Escape mentions from queries | pnw/pnw.py | escape_query | ltzmax/kennnyshiwa-cogs | 21 | python | def escape_query(self, query) -> str:
return query.replace('`', "'") | def escape_query(self, query) -> str:
return query.replace('`', "'")<|docstring|>Escape mentions from queries<|endoftext|> |
932038d0e5af3a2b74a204a66ed7bf90b7a9c8c30c7f0ff475fe2fc8f3c41854 | @checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwkey(self, ctx):
'\n Explain how to set PNW API key.\n Note: You have to have a PNW account to get a api key\n '
message = "So first, to get a PNW api key you need to have an account otherwise you can't use this cog.\n\nTo find your API key:\n1. Login on PNW [here](https://politicsandwar.com/login/)\n2. Go on your [account](https://politicsandwar.com/account/)\n3. Scroll the bottom and copy the Key\n4. Use in DM `{}set api pnw api_key your_api_key_here`\n6. There you go! You can now use the PNW cog.".format(ctx.prefix)
(await ctx.maybe_send_embed(message)) | Explain how to set PNW API key.
Note: You have to have a PNW account to get a api key | pnw/pnw.py | pnwkey | ltzmax/kennnyshiwa-cogs | 21 | python | @checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwkey(self, ctx):
'\n Explain how to set PNW API key.\n Note: You have to have a PNW account to get a api key\n '
message = "So first, to get a PNW api key you need to have an account otherwise you can't use this cog.\n\nTo find your API key:\n1. Login on PNW [here](https://politicsandwar.com/login/)\n2. Go on your [account](https://politicsandwar.com/account/)\n3. Scroll the bottom and copy the Key\n4. Use in DM `{}set api pnw api_key your_api_key_here`\n6. There you go! You can now use the PNW cog.".format(ctx.prefix)
(await ctx.maybe_send_embed(message)) | @checks.is_owner()
@commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwkey(self, ctx):
'\n Explain how to set PNW API key.\n Note: You have to have a PNW account to get a api key\n '
message = "So first, to get a PNW api key you need to have an account otherwise you can't use this cog.\n\nTo find your API key:\n1. Login on PNW [here](https://politicsandwar.com/login/)\n2. Go on your [account](https://politicsandwar.com/account/)\n3. Scroll the bottom and copy the Key\n4. Use in DM `{}set api pnw api_key your_api_key_here`\n6. There you go! You can now use the PNW cog.".format(ctx.prefix)
(await ctx.maybe_send_embed(message))<|docstring|>Explain how to set PNW API key.
Note: You have to have a PNW account to get a api key<|endoftext|> |
5693d93e64cc6587a86cf3a385c4a70f6151d236e1fb7e358dfde35163b056af | @staticmethod
async def do_lookup(ctx, nid) -> list:
'\n Run Nation lookup.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/nation/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % nid)) as r:
data = (await r.json())
if (not data):
return
return data | Run Nation lookup. | pnw/pnw.py | do_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def do_lookup(ctx, nid) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/nation/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % nid)) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def do_lookup(ctx, nid) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/nation/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % nid)) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Run Nation lookup.<|endoftext|> |
9988ed546853ad41c44da26f9ee1f17cfe3ecbc8d8e8c2c5e9d266eee749585c | @staticmethod
async def nations_lookup(ctx):
'\n Lookup all nations.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'https://politicsandwar.com/api/nations/?vm=true&key={}'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data | Lookup all nations. | pnw/pnw.py | nations_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def nations_lookup(ctx):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'https://politicsandwar.com/api/nations/?vm=true&key={}'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def nations_lookup(ctx):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'https://politicsandwar.com/api/nations/?vm=true&key={}'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Lookup all nations.<|endoftext|> |
86801e19af5cd5cf9a9d3918b44e39915e8c8439b34a1161cbe9a8f346ccc0a5 | @staticmethod
async def alliances_lookup(ctx):
'\n Run Alliance Lookup.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliances/?key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data | Run Alliance Lookup. | pnw/pnw.py | alliances_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def alliances_lookup(ctx):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliances/?key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def alliances_lookup(ctx):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliances/?key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Run Alliance Lookup.<|endoftext|> |
f0c6c94424f52d550198e81969c3279d2484a0ca7da2ed18a541dc3fa6ba900b | @staticmethod
async def alliance_lookup(ctx, alid: str) -> list:
'\n Run Alliance Lookup.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return None
return data | Run Alliance Lookup. | pnw/pnw.py | alliance_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def alliance_lookup(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return None
return data | @staticmethod
async def alliance_lookup(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return None
return data<|docstring|>Run Alliance Lookup.<|endoftext|> |
1bf0064bbc653473ef546f0661644f5c4494178f915cd32ada95d673ba17665b | @staticmethod
async def city_api(ctx, alid: str) -> list:
'\n Run City Lookup.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/city/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data | Run City Lookup. | pnw/pnw.py | city_api | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def city_api(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/city/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def city_api(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/city/id=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Run City Lookup.<|endoftext|> |
a53d87e1584cb3c4400d1adf3b49251f5422f6ab0941ad737f1896b7f6c410e6 | @staticmethod
async def tradeprice_lookup(ctx, query):
'\n Lookup resources trading info.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/tradeprice/resource=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % query)) as r:
data = (await r.json())
if (not data):
return
return data | Lookup resources trading info. | pnw/pnw.py | tradeprice_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def tradeprice_lookup(ctx, query):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/tradeprice/resource=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % query)) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def tradeprice_lookup(ctx, query):
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/tradeprice/resource=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % query)) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Lookup resources trading info.<|endoftext|> |
c371a2c7e66a4e11be48077a3e16677f898591ff6dd03712cb2f9f332b4522ea | @staticmethod
async def bank_lookup(ctx, alid: str) -> list:
'\n Run Bank Lookup.\n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance-bank/?allianceid=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data | Run Bank Lookup. | pnw/pnw.py | bank_lookup | ltzmax/kennnyshiwa-cogs | 21 | python | @staticmethod
async def bank_lookup(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance-bank/?allianceid=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data | @staticmethod
async def bank_lookup(ctx, alid: str) -> list:
'\n \n '
if hasattr(ctx.bot, 'get_shared_api_tokens'):
api = (await ctx.bot.get_shared_api_tokens('pnw'))
pnw_key = api.get('api_key')
else:
api = (await ctx.bot.db.api_tokens.get_raw('pnw'))
pnw_key = api['api_key']
if (not pnw_key):
return (await ctx.send('You need to set an API key! Check ``{}pwnkey`` for instructions\n'.format(ctx.prefix)))
base_url = 'http://politicsandwar.com/api/alliance-bank/?allianceid=%s&key={}&$format=json'.format(pnw_key)
async with aiohttp.ClientSession() as session:
async with session.get((base_url % alid)) as r:
data = (await r.json())
if (not data):
return
return data<|docstring|>Run Bank Lookup.<|endoftext|> |
3102d716150d72f7c37afe5462b090a0b1f66c7317ead41942e616169a6c16c5 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def nation(self, ctx, *, name):
'\n Look up a nation.\n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(''.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
continent = nation_data['continent']
color = nation_data['color']
leadername = nation_data['leadername']
nationrank = nation_data['nationrank']
score = nation_data['score']
alliance = nation_data['alliance']
last_active = nation_data['minutessinceactive']
domestic_policy = nation_data['domestic_policy']
war_policy = nation_data['war_policy']
founded = nation_data['founded']
age = nation_data['daysold']
flag = nation_data['flagurl']
cities = nation_data['cities']
embed = discord.Embed(title='Nation Info for {}'.format(name), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Leader Name', value=leadername, inline=True)
embed.add_field(name='Color', value=color, inline=True)
embed.add_field(name='Rank', value=nationrank, inline=True)
embed.add_field(name='Score', value=score, inline=True)
embed.add_field(name='Alliance', value=alliance, inline=True)
embed.add_field(name='Continent', value=continent, inline=True)
embed.add_field(name='Domestic Policy', value=domestic_policy, inline=True)
embed.add_field(name='War Policy', value=war_policy, inline=True)
embed.add_field(name='Age of nation', value=f'{age} Days Old', inline=True)
embed.add_field(name='Number of Cities', value=cities, inline=True)
embed.set_image(url=flag)
embed.set_footer(text='Last active: {} minutes ago | Founded {}'.format(last_active, founded))
(await ctx.send(embed=embed)) | Look up a nation. | pnw/pnw.py | nation | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def nation(self, ctx, *, name):
'\n \n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
continent = nation_data['continent']
color = nation_data['color']
leadername = nation_data['leadername']
nationrank = nation_data['nationrank']
score = nation_data['score']
alliance = nation_data['alliance']
last_active = nation_data['minutessinceactive']
domestic_policy = nation_data['domestic_policy']
war_policy = nation_data['war_policy']
founded = nation_data['founded']
age = nation_data['daysold']
flag = nation_data['flagurl']
cities = nation_data['cities']
embed = discord.Embed(title='Nation Info for {}'.format(name), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Leader Name', value=leadername, inline=True)
embed.add_field(name='Color', value=color, inline=True)
embed.add_field(name='Rank', value=nationrank, inline=True)
embed.add_field(name='Score', value=score, inline=True)
embed.add_field(name='Alliance', value=alliance, inline=True)
embed.add_field(name='Continent', value=continent, inline=True)
embed.add_field(name='Domestic Policy', value=domestic_policy, inline=True)
embed.add_field(name='War Policy', value=war_policy, inline=True)
embed.add_field(name='Age of nation', value=f'{age} Days Old', inline=True)
embed.add_field(name='Number of Cities', value=cities, inline=True)
embed.set_image(url=flag)
embed.set_footer(text='Last active: {} minutes ago | Founded {}'.format(last_active, founded))
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def nation(self, ctx, *, name):
'\n \n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
continent = nation_data['continent']
color = nation_data['color']
leadername = nation_data['leadername']
nationrank = nation_data['nationrank']
score = nation_data['score']
alliance = nation_data['alliance']
last_active = nation_data['minutessinceactive']
domestic_policy = nation_data['domestic_policy']
war_policy = nation_data['war_policy']
founded = nation_data['founded']
age = nation_data['daysold']
flag = nation_data['flagurl']
cities = nation_data['cities']
embed = discord.Embed(title='Nation Info for {}'.format(name), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Leader Name', value=leadername, inline=True)
embed.add_field(name='Color', value=color, inline=True)
embed.add_field(name='Rank', value=nationrank, inline=True)
embed.add_field(name='Score', value=score, inline=True)
embed.add_field(name='Alliance', value=alliance, inline=True)
embed.add_field(name='Continent', value=continent, inline=True)
embed.add_field(name='Domestic Policy', value=domestic_policy, inline=True)
embed.add_field(name='War Policy', value=war_policy, inline=True)
embed.add_field(name='Age of nation', value=f'{age} Days Old', inline=True)
embed.add_field(name='Number of Cities', value=cities, inline=True)
embed.set_image(url=flag)
embed.set_footer(text='Last active: {} minutes ago | Founded {}'.format(last_active, founded))
(await ctx.send(embed=embed))<|docstring|>Look up a nation.<|endoftext|> |
913f65f6aa1a663de93d145adf91e362f85533cd7a1e50907e18949639d2c7ca | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def alliance(self, ctx, *, name):
'\n Lookup an Alliance with an ID.\n '
async with ctx.typing():
name = self.escape_query(''.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
alliance_data = (await self.alliance_lookup(ctx, alid))
if (not alliance_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (alliance_data is None):
(await ctx.send("Can't find that alliance"))
return
try:
if alliance_data['error']:
(await ctx.send("Can't find that alliance"))
return
except:
pass
name = alliance_data['name']
allianceid = alliance_data['allianceid']
if (alliance_data['irc'] == ''):
chat = 'No Discord/IRC listed'
else:
chat = alliance_data['irc']
if (alliance_data['forumurl'] == ''):
forum = 'No forum link listed'
else:
forum = alliance_data['forumurl']
embed = discord.Embed(title='Alliance Info for {} - {}'.format(name, allianceid), url='https://politicsandwar.com/alliance/id={}'.format(alid), color=(await ctx.embed_color()))
embed.add_field(name='Chat', value=chat, inline=False)
embed.add_field(name='Forum Link:', value=forum, inline=False)
embed.add_field(name='Score:', value=alliance_data['score'])
embed.add_field(name='Members:', value=alliance_data['members'])
embed.add_field(name='Cities:', value=alliance_data['cities'])
embed.add_field(name='Soldiers:', value=alliance_data['soldiers'])
embed.add_field(name='Tanks:', value=alliance_data['tanks'])
embed.add_field(name='Aircraft:', value=alliance_data['aircraft'])
embed.add_field(name='Ships:', value=alliance_data['ships'])
embed.add_field(name='Missiles:', value=alliance_data['missiles'])
embed.add_field(name='Nukes:', value=alliance_data['nukes'])
embed.add_field(name='Treasures', value=alliance_data['treasures'])
embed.set_image(url=alliance_data['flagurl'])
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed)) | Lookup an Alliance with an ID. | pnw/pnw.py | alliance | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def alliance(self, ctx, *, name):
'\n \n '
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
alliance_data = (await self.alliance_lookup(ctx, alid))
if (not alliance_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (alliance_data is None):
(await ctx.send("Can't find that alliance"))
return
try:
if alliance_data['error']:
(await ctx.send("Can't find that alliance"))
return
except:
pass
name = alliance_data['name']
allianceid = alliance_data['allianceid']
if (alliance_data['irc'] == ):
chat = 'No Discord/IRC listed'
else:
chat = alliance_data['irc']
if (alliance_data['forumurl'] == ):
forum = 'No forum link listed'
else:
forum = alliance_data['forumurl']
embed = discord.Embed(title='Alliance Info for {} - {}'.format(name, allianceid), url='https://politicsandwar.com/alliance/id={}'.format(alid), color=(await ctx.embed_color()))
embed.add_field(name='Chat', value=chat, inline=False)
embed.add_field(name='Forum Link:', value=forum, inline=False)
embed.add_field(name='Score:', value=alliance_data['score'])
embed.add_field(name='Members:', value=alliance_data['members'])
embed.add_field(name='Cities:', value=alliance_data['cities'])
embed.add_field(name='Soldiers:', value=alliance_data['soldiers'])
embed.add_field(name='Tanks:', value=alliance_data['tanks'])
embed.add_field(name='Aircraft:', value=alliance_data['aircraft'])
embed.add_field(name='Ships:', value=alliance_data['ships'])
embed.add_field(name='Missiles:', value=alliance_data['missiles'])
embed.add_field(name='Nukes:', value=alliance_data['nukes'])
embed.add_field(name='Treasures', value=alliance_data['treasures'])
embed.set_image(url=alliance_data['flagurl'])
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def alliance(self, ctx, *, name):
'\n \n '
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
alliance_data = (await self.alliance_lookup(ctx, alid))
if (not alliance_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (alliance_data is None):
(await ctx.send("Can't find that alliance"))
return
try:
if alliance_data['error']:
(await ctx.send("Can't find that alliance"))
return
except:
pass
name = alliance_data['name']
allianceid = alliance_data['allianceid']
if (alliance_data['irc'] == ):
chat = 'No Discord/IRC listed'
else:
chat = alliance_data['irc']
if (alliance_data['forumurl'] == ):
forum = 'No forum link listed'
else:
forum = alliance_data['forumurl']
embed = discord.Embed(title='Alliance Info for {} - {}'.format(name, allianceid), url='https://politicsandwar.com/alliance/id={}'.format(alid), color=(await ctx.embed_color()))
embed.add_field(name='Chat', value=chat, inline=False)
embed.add_field(name='Forum Link:', value=forum, inline=False)
embed.add_field(name='Score:', value=alliance_data['score'])
embed.add_field(name='Members:', value=alliance_data['members'])
embed.add_field(name='Cities:', value=alliance_data['cities'])
embed.add_field(name='Soldiers:', value=alliance_data['soldiers'])
embed.add_field(name='Tanks:', value=alliance_data['tanks'])
embed.add_field(name='Aircraft:', value=alliance_data['aircraft'])
embed.add_field(name='Ships:', value=alliance_data['ships'])
embed.add_field(name='Missiles:', value=alliance_data['missiles'])
embed.add_field(name='Nukes:', value=alliance_data['nukes'])
embed.add_field(name='Treasures', value=alliance_data['treasures'])
embed.set_image(url=alliance_data['flagurl'])
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed))<|docstring|>Lookup an Alliance with an ID.<|endoftext|> |
fd661e28dd768eb9554583501505739145044ab34f43dff7dcc5a1a3b8eb627e | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def cityinfo(self, ctx, *, id):
'\n Provides information about the alliance linked to the ID you have given.\n '
data = (await self.city_api(ctx, id))
try:
success = data['success']
if (success == False):
if data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
try:
if (data['success'] == True):
pass
except:
(await ctx.send("That city doesn't exist"))
return
nation = (await self.do_lookup(ctx, data['nationid']))
embed = discord.Embed(name='City Info', color=(await ctx.embed_color()), description=f"[{data['nation']} - {data['leader']}](https://politicsandwar.com/nation/id={data['nationid']})")
embed.add_field(name=f"{data['name']} - General Info", inline=False, value=box(f'''
Infra {data['infrastructure']}
Land {data['land']}
Crime {data['crime']}
Disease {data['disease']}
Pollution {data['pollution']}
Commerce {data['commerce']}'''))
embed.add_field(name='Improvements - Power', value=box(f'''
Coal Power Plants {data['imp_coalpower']}
Oil Power Plants {data['imp_oilpower']}
Nuclear Power Plants {data['imp_nuclearpower']}
Wind Power Plants {data['imp_windpower']}'''))
if (nation['continent'] == 'Europe'):
resources = box(f'''
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}''')
elif (nation['continent'] == 'Asia'):
resources = box(f'''
Oil Wells {data['imp_oilwell']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}''')
elif (nation['continent'] == 'Africa'):
resources = box(f'''
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}''')
elif (nation['continent'] == 'South America'):
resources = box(f'''
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}''')
elif (nation['continent'] == 'North America'):
resources = box(f'''
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}''')
elif (nation['continent'] == 'Australia'):
resources = box(f'''
Coal Mines {data['imp_coalmine']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}''')
embed.add_field(name='Improvements - Resources', value=f'{resources}')
embed.add_field(name='Improvements - Production', value=box(f'''
Gas Refinerys {data['imp_gasrefinery']}
Steel Mills {data['imp_steelmill']}
Aluminum Refinerys {data['imp_aluminumrefinery']}
Munitions Factorys {data['imp_munitionsfactory']}'''))
embed.add_field(name='Improvements - Civil', value=box(f'''
Police Stations {data['imp_policestation']}
Hospitals {data['imp_hospital']}
Recycling Centers {data['imp_recyclingcenter']}
Subways {data['imp_subway']}'''))
embed.add_field(name='Improvements - Commerce', value=box(f'''
Supermarkets {data['imp_supermarket']}
Banks {data['imp_bank']}
Malls {data['imp_mall']}
Stadiums {data['imp_stadium']}'''))
embed.add_field(name='Improvements - Military', value=box(f'''
Barracks {data['imp_barracks']}
Factories {data['imp_factory']}
Hangars {data['imp_hangar']}
Drydocks {data['imp_drydock']}'''))
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed)) | Provides information about the alliance linked to the ID you have given. | pnw/pnw.py | cityinfo | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def cityinfo(self, ctx, *, id):
'\n \n '
data = (await self.city_api(ctx, id))
try:
success = data['success']
if (success == False):
if data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
try:
if (data['success'] == True):
pass
except:
(await ctx.send("That city doesn't exist"))
return
nation = (await self.do_lookup(ctx, data['nationid']))
embed = discord.Embed(name='City Info', color=(await ctx.embed_color()), description=f"[{data['nation']} - {data['leader']}](https://politicsandwar.com/nation/id={data['nationid']})")
embed.add_field(name=f"{data['name']} - General Info", inline=False, value=box(f'
Infra {data['infrastructure']}
Land {data['land']}
Crime {data['crime']}
Disease {data['disease']}
Pollution {data['pollution']}
Commerce {data['commerce']}'))
embed.add_field(name='Improvements - Power', value=box(f'
Coal Power Plants {data['imp_coalpower']}
Oil Power Plants {data['imp_oilpower']}
Nuclear Power Plants {data['imp_nuclearpower']}
Wind Power Plants {data['imp_windpower']}'))
if (nation['continent'] == 'Europe'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Asia'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Africa'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'South America'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'North America'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Australia'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
embed.add_field(name='Improvements - Resources', value=f'{resources}')
embed.add_field(name='Improvements - Production', value=box(f'
Gas Refinerys {data['imp_gasrefinery']}
Steel Mills {data['imp_steelmill']}
Aluminum Refinerys {data['imp_aluminumrefinery']}
Munitions Factorys {data['imp_munitionsfactory']}'))
embed.add_field(name='Improvements - Civil', value=box(f'
Police Stations {data['imp_policestation']}
Hospitals {data['imp_hospital']}
Recycling Centers {data['imp_recyclingcenter']}
Subways {data['imp_subway']}'))
embed.add_field(name='Improvements - Commerce', value=box(f'
Supermarkets {data['imp_supermarket']}
Banks {data['imp_bank']}
Malls {data['imp_mall']}
Stadiums {data['imp_stadium']}'))
embed.add_field(name='Improvements - Military', value=box(f'
Barracks {data['imp_barracks']}
Factories {data['imp_factory']}
Hangars {data['imp_hangar']}
Drydocks {data['imp_drydock']}'))
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def cityinfo(self, ctx, *, id):
'\n \n '
data = (await self.city_api(ctx, id))
try:
success = data['success']
if (success == False):
if data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
try:
if (data['success'] == True):
pass
except:
(await ctx.send("That city doesn't exist"))
return
nation = (await self.do_lookup(ctx, data['nationid']))
embed = discord.Embed(name='City Info', color=(await ctx.embed_color()), description=f"[{data['nation']} - {data['leader']}](https://politicsandwar.com/nation/id={data['nationid']})")
embed.add_field(name=f"{data['name']} - General Info", inline=False, value=box(f'
Infra {data['infrastructure']}
Land {data['land']}
Crime {data['crime']}
Disease {data['disease']}
Pollution {data['pollution']}
Commerce {data['commerce']}'))
embed.add_field(name='Improvements - Power', value=box(f'
Coal Power Plants {data['imp_coalpower']}
Oil Power Plants {data['imp_oilpower']}
Nuclear Power Plants {data['imp_nuclearpower']}
Wind Power Plants {data['imp_windpower']}'))
if (nation['continent'] == 'Europe'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Asia'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Africa'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'South America'):
resources = box(f'
Oil Wells {data['imp_oilwell']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'North America'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Iron Mines {data['imp_ironmine']}
Uranium Mines {data['imp_uramine']}
Farms {data['imp_farm']}')
elif (nation['continent'] == 'Australia'):
resources = box(f'
Coal Mines {data['imp_coalmine']}
Bauxite Mines {data['imp_bauxitemine']}
Lead Mines {data['imp_leadmine']}
Farms {data['imp_farm']}')
embed.add_field(name='Improvements - Resources', value=f'{resources}')
embed.add_field(name='Improvements - Production', value=box(f'
Gas Refinerys {data['imp_gasrefinery']}
Steel Mills {data['imp_steelmill']}
Aluminum Refinerys {data['imp_aluminumrefinery']}
Munitions Factorys {data['imp_munitionsfactory']}'))
embed.add_field(name='Improvements - Civil', value=box(f'
Police Stations {data['imp_policestation']}
Hospitals {data['imp_hospital']}
Recycling Centers {data['imp_recyclingcenter']}
Subways {data['imp_subway']}'))
embed.add_field(name='Improvements - Commerce', value=box(f'
Supermarkets {data['imp_supermarket']}
Banks {data['imp_bank']}
Malls {data['imp_mall']}
Stadiums {data['imp_stadium']}'))
embed.add_field(name='Improvements - Military', value=box(f'
Barracks {data['imp_barracks']}
Factories {data['imp_factory']}
Hangars {data['imp_hangar']}
Drydocks {data['imp_drydock']}'))
embed.set_footer(text='Info Provided By http://politicsandwar.com/api/')
(await ctx.send(embed=embed))<|docstring|>Provides information about the alliance linked to the ID you have given.<|endoftext|> |
163e01b1907ededc5b21ff51b2e318210093268e78baeecc58ec0025ee40bdf8 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def tradeprice(self, ctx, *, query):
'\n Lookup current avg trading price for a resource including last high and low values.\n\n By default this looks up the price of steel, any incorrect searches will also return steel. \n '
async with ctx.typing():
query = self.escape_query(''.join(query))
trade_data = (await self.tradeprice_lookup(ctx, query))
try:
success = trade_data['success']
if (success == False):
if trade_data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not trade_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
resource = trade_data['resource']
avgprice = trade_data['avgprice']
marketindex = trade_data['marketindex']
highestbuyamount = trade_data['highestbuy']['amount']
highestbuyprice = trade_data['highestbuy']['price']
highestbuytotal = trade_data['highestbuy']['totalvalue']
highestbuynation = trade_data['highestbuy']['nationid']
highestbuydate = trade_data['highestbuy']['date']
lowestbuyamount = trade_data['lowestbuy']['amount']
lowestbuyprice = trade_data['lowestbuy']['price']
lowestbuytotal = trade_data['lowestbuy']['totalvalue']
lowestbuynation = trade_data['lowestbuy']['nationid']
lowestbuydate = trade_data['lowestbuy']['date']
embed = discord.Embed(title=f'Trade info for {resource}', description=f'''Current Avg Price: ``{avgprice}``
Market Index: ``{marketindex}``''', color=(await ctx.embed_color()))
embed.add_field(name=f'Highest Buy amount: ``{highestbuyamount}``', value=f'''Price: {highestbuyprice}
Total value: {highestbuytotal}
By nation: {highestbuynation}
At {highestbuydate}''', inline=True)
embed.add_field(name=f'Lowest Buy amount: ``{lowestbuyamount}``', value=f'''Price: {lowestbuyprice}
Total value: {lowestbuytotal}
By nation: {lowestbuynation}
At {lowestbuydate}''', inline=True)
(await ctx.send(embed=embed)) | Lookup current avg trading price for a resource including last high and low values.
By default this looks up the price of steel, any incorrect searches will also return steel. | pnw/pnw.py | tradeprice | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def tradeprice(self, ctx, *, query):
'\n Lookup current avg trading price for a resource including last high and low values.\n\n By default this looks up the price of steel, any incorrect searches will also return steel. \n '
async with ctx.typing():
query = self.escape_query(.join(query))
trade_data = (await self.tradeprice_lookup(ctx, query))
try:
success = trade_data['success']
if (success == False):
if trade_data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not trade_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
resource = trade_data['resource']
avgprice = trade_data['avgprice']
marketindex = trade_data['marketindex']
highestbuyamount = trade_data['highestbuy']['amount']
highestbuyprice = trade_data['highestbuy']['price']
highestbuytotal = trade_data['highestbuy']['totalvalue']
highestbuynation = trade_data['highestbuy']['nationid']
highestbuydate = trade_data['highestbuy']['date']
lowestbuyamount = trade_data['lowestbuy']['amount']
lowestbuyprice = trade_data['lowestbuy']['price']
lowestbuytotal = trade_data['lowestbuy']['totalvalue']
lowestbuynation = trade_data['lowestbuy']['nationid']
lowestbuydate = trade_data['lowestbuy']['date']
embed = discord.Embed(title=f'Trade info for {resource}', description=f'Current Avg Price: ``{avgprice}``
Market Index: ``{marketindex}``', color=(await ctx.embed_color()))
embed.add_field(name=f'Highest Buy amount: ``{highestbuyamount}``', value=f'Price: {highestbuyprice}
Total value: {highestbuytotal}
By nation: {highestbuynation}
At {highestbuydate}', inline=True)
embed.add_field(name=f'Lowest Buy amount: ``{lowestbuyamount}``', value=f'Price: {lowestbuyprice}
Total value: {lowestbuytotal}
By nation: {lowestbuynation}
At {lowestbuydate}', inline=True)
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def tradeprice(self, ctx, *, query):
'\n Lookup current avg trading price for a resource including last high and low values.\n\n By default this looks up the price of steel, any incorrect searches will also return steel. \n '
async with ctx.typing():
query = self.escape_query(.join(query))
trade_data = (await self.tradeprice_lookup(ctx, query))
try:
success = trade_data['success']
if (success == False):
if trade_data['general_message']:
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
if (not trade_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
resource = trade_data['resource']
avgprice = trade_data['avgprice']
marketindex = trade_data['marketindex']
highestbuyamount = trade_data['highestbuy']['amount']
highestbuyprice = trade_data['highestbuy']['price']
highestbuytotal = trade_data['highestbuy']['totalvalue']
highestbuynation = trade_data['highestbuy']['nationid']
highestbuydate = trade_data['highestbuy']['date']
lowestbuyamount = trade_data['lowestbuy']['amount']
lowestbuyprice = trade_data['lowestbuy']['price']
lowestbuytotal = trade_data['lowestbuy']['totalvalue']
lowestbuynation = trade_data['lowestbuy']['nationid']
lowestbuydate = trade_data['lowestbuy']['date']
embed = discord.Embed(title=f'Trade info for {resource}', description=f'Current Avg Price: ``{avgprice}``
Market Index: ``{marketindex}``', color=(await ctx.embed_color()))
embed.add_field(name=f'Highest Buy amount: ``{highestbuyamount}``', value=f'Price: {highestbuyprice}
Total value: {highestbuytotal}
By nation: {highestbuynation}
At {highestbuydate}', inline=True)
embed.add_field(name=f'Lowest Buy amount: ``{lowestbuyamount}``', value=f'Price: {lowestbuyprice}
Total value: {lowestbuytotal}
By nation: {lowestbuynation}
At {lowestbuydate}', inline=True)
(await ctx.send(embed=embed))<|docstring|>Lookup current avg trading price for a resource including last high and low values.
By default this looks up the price of steel, any incorrect searches will also return steel.<|endoftext|> |
c48cf2e568e66342761a76672b8996aae4d3d2d0e857b0fdc145f8aaea957dda | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def bankinfo(self, ctx, *, name):
'\n Lookup bank info for your alliance.\n \n Only available if you have the ability to view the bank data in game,\n and the api key must be set to your api key to work.\n \n '
async with ctx.typing():
name = self.escape_query(''.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
bank_data = (await self.bank_lookup(ctx, alid))
if (not bank_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (bank_data['success'] == False):
(await ctx.send('Unable to access this information. You are not in this alliance.'))
return
else:
pass
name = bank_data['alliance_bank_contents'][0]['name']
alid = bank_data['alliance_bank_contents'][0]['alliance_id']
money = bank_data['alliance_bank_contents'][0]['money']
food = bank_data['alliance_bank_contents'][0]['food']
coal = bank_data['alliance_bank_contents'][0]['coal']
oil = bank_data['alliance_bank_contents'][0]['oil']
uranium = bank_data['alliance_bank_contents'][0]['uranium']
iron = bank_data['alliance_bank_contents'][0]['iron']
bauxite = bank_data['alliance_bank_contents'][0]['bauxite']
lead = bank_data['alliance_bank_contents'][0]['lead']
gasoline = bank_data['alliance_bank_contents'][0]['gasoline']
munitions = bank_data['alliance_bank_contents'][0]['munitions']
steel = bank_data['alliance_bank_contents'][0]['steel']
aluminum = bank_data['alliance_bank_contents'][0]['aluminum']
embed = discord.Embed(title=f'Bank Information for {name}', description=f'Total Money = {money}', color=(await ctx.embed_color()))
embed.add_field(name='Alliance ID', value=alid)
embed.add_field(name='Food', value=food)
embed.add_field(name='Coal', value=coal)
embed.add_field(name='Oil', value=oil)
embed.add_field(name='Uranium', value=uranium)
embed.add_field(name='Iron', value=iron)
embed.add_field(name='Bauxite', value=bauxite)
embed.add_field(name='Lead', value=lead)
embed.add_field(name='Gasoline', value=gasoline)
embed.add_field(name='Munitions', value=munitions)
embed.add_field(name='Steel', value=steel)
embed.add_field(name='Aluminum', value=aluminum)
(await ctx.send(embed=embed)) | Lookup bank info for your alliance.
Only available if you have the ability to view the bank data in game,
and the api key must be set to your api key to work. | pnw/pnw.py | bankinfo | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def bankinfo(self, ctx, *, name):
'\n Lookup bank info for your alliance.\n \n Only available if you have the ability to view the bank data in game,\n and the api key must be set to your api key to work.\n \n '
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
bank_data = (await self.bank_lookup(ctx, alid))
if (not bank_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (bank_data['success'] == False):
(await ctx.send('Unable to access this information. You are not in this alliance.'))
return
else:
pass
name = bank_data['alliance_bank_contents'][0]['name']
alid = bank_data['alliance_bank_contents'][0]['alliance_id']
money = bank_data['alliance_bank_contents'][0]['money']
food = bank_data['alliance_bank_contents'][0]['food']
coal = bank_data['alliance_bank_contents'][0]['coal']
oil = bank_data['alliance_bank_contents'][0]['oil']
uranium = bank_data['alliance_bank_contents'][0]['uranium']
iron = bank_data['alliance_bank_contents'][0]['iron']
bauxite = bank_data['alliance_bank_contents'][0]['bauxite']
lead = bank_data['alliance_bank_contents'][0]['lead']
gasoline = bank_data['alliance_bank_contents'][0]['gasoline']
munitions = bank_data['alliance_bank_contents'][0]['munitions']
steel = bank_data['alliance_bank_contents'][0]['steel']
aluminum = bank_data['alliance_bank_contents'][0]['aluminum']
embed = discord.Embed(title=f'Bank Information for {name}', description=f'Total Money = {money}', color=(await ctx.embed_color()))
embed.add_field(name='Alliance ID', value=alid)
embed.add_field(name='Food', value=food)
embed.add_field(name='Coal', value=coal)
embed.add_field(name='Oil', value=oil)
embed.add_field(name='Uranium', value=uranium)
embed.add_field(name='Iron', value=iron)
embed.add_field(name='Bauxite', value=bauxite)
embed.add_field(name='Lead', value=lead)
embed.add_field(name='Gasoline', value=gasoline)
embed.add_field(name='Munitions', value=munitions)
embed.add_field(name='Steel', value=steel)
embed.add_field(name='Aluminum', value=aluminum)
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def bankinfo(self, ctx, *, name):
'\n Lookup bank info for your alliance.\n \n Only available if you have the ability to view the bank data in game,\n and the api key must be set to your api key to work.\n \n '
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
alliances_data = (await self.alliances_lookup(ctx))
success = alliances_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in alliances_data['alliances']:
if (name.lower() == I['name'].lower()):
key = True
alid = I['id']
if (key == True):
pass
else:
alid = name
bank_data = (await self.bank_lookup(ctx, alid))
if (not bank_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
if (bank_data['success'] == False):
(await ctx.send('Unable to access this information. You are not in this alliance.'))
return
else:
pass
name = bank_data['alliance_bank_contents'][0]['name']
alid = bank_data['alliance_bank_contents'][0]['alliance_id']
money = bank_data['alliance_bank_contents'][0]['money']
food = bank_data['alliance_bank_contents'][0]['food']
coal = bank_data['alliance_bank_contents'][0]['coal']
oil = bank_data['alliance_bank_contents'][0]['oil']
uranium = bank_data['alliance_bank_contents'][0]['uranium']
iron = bank_data['alliance_bank_contents'][0]['iron']
bauxite = bank_data['alliance_bank_contents'][0]['bauxite']
lead = bank_data['alliance_bank_contents'][0]['lead']
gasoline = bank_data['alliance_bank_contents'][0]['gasoline']
munitions = bank_data['alliance_bank_contents'][0]['munitions']
steel = bank_data['alliance_bank_contents'][0]['steel']
aluminum = bank_data['alliance_bank_contents'][0]['aluminum']
embed = discord.Embed(title=f'Bank Information for {name}', description=f'Total Money = {money}', color=(await ctx.embed_color()))
embed.add_field(name='Alliance ID', value=alid)
embed.add_field(name='Food', value=food)
embed.add_field(name='Coal', value=coal)
embed.add_field(name='Oil', value=oil)
embed.add_field(name='Uranium', value=uranium)
embed.add_field(name='Iron', value=iron)
embed.add_field(name='Bauxite', value=bauxite)
embed.add_field(name='Lead', value=lead)
embed.add_field(name='Gasoline', value=gasoline)
embed.add_field(name='Munitions', value=munitions)
embed.add_field(name='Steel', value=steel)
embed.add_field(name='Aluminum', value=aluminum)
(await ctx.send(embed=embed))<|docstring|>Lookup bank info for your alliance.
Only available if you have the ability to view the bank data in game,
and the api key must be set to your api key to work.<|endoftext|> |
966c492eb6d4cb828671b098400daa554dd477f3e77a9013907fd592cd3f4451 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def top50(self, ctx):
'\n Show Top 50 Alliances\n '
top50 = (await self.alliances_lookup(ctx))
success = top50['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
output = ''
for alliance in top50['alliances'][0:50]:
arank = alliance['rank']
aname = alliance['name']
aid = alliance['id']
if (len(aname) > 20):
aname = (aname[0:20] + '...')
output = f'''{output}
{f'{arank}':<{5}}{f'{aname}':<{25}}{f'{aid}':>{5}}'''
(await ctx.send(embed=discord.Embed(title='Top 50 Alliances\n', description=(('```Rank Name ID' + output) + '```'), color=(await ctx.embed_color())).set_footer(text='Info Provided By http://politicsandwar.com/api/'))) | Show Top 50 Alliances | pnw/pnw.py | top50 | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def top50(self, ctx):
'\n \n '
top50 = (await self.alliances_lookup(ctx))
success = top50['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
output =
for alliance in top50['alliances'][0:50]:
arank = alliance['rank']
aname = alliance['name']
aid = alliance['id']
if (len(aname) > 20):
aname = (aname[0:20] + '...')
output = f'{output}
{f'{arank}':<{5}}{f'{aname}':<{25}}{f'{aid}':>{5}}'
(await ctx.send(embed=discord.Embed(title='Top 50 Alliances\n', description=(('```Rank Name ID' + output) + '```'), color=(await ctx.embed_color())).set_footer(text='Info Provided By http://politicsandwar.com/api/'))) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def top50(self, ctx):
'\n \n '
top50 = (await self.alliances_lookup(ctx))
success = top50['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
output =
for alliance in top50['alliances'][0:50]:
arank = alliance['rank']
aname = alliance['name']
aid = alliance['id']
if (len(aname) > 20):
aname = (aname[0:20] + '...')
output = f'{output}
{f'{arank}':<{5}}{f'{aname}':<{25}}{f'{aid}':>{5}}'
(await ctx.send(embed=discord.Embed(title='Top 50 Alliances\n', description=(('```Rank Name ID' + output) + '```'), color=(await ctx.embed_color())).set_footer(text='Info Provided By http://politicsandwar.com/api/')))<|docstring|>Show Top 50 Alliances<|endoftext|> |
fb6f2e539e9aa7991b0087a7f4aed07ceea0a12468fc922ae4d0a182b519e8ed | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def infra(self, ctx, input: float, tobuy: float, urban=None, cce=None):
'\n Provides the cost of infra accurate to +/- $100,000. Provide urban and/or cce as a command variable to trigger urbanization and cce infra discounts.\n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 100):
count = 0
factor = 0
cost = 0
r = ((tobuy / 100) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 100)
if (tobuy >= 100):
buying = 100
tobuy = (tobuy - 100)
else:
buying = tobuy
x = ((((factor - 10) ** 2.2) / 710) + 300)
cost = (cost + (x * buying))
else:
x = ((((input - 10) ** 2.2) / 710) + 300)
cost = (x * tobuy)
vars = ['urban', 'cce']
if urban:
urban = urban.lower()
if (urban in vars):
cost = (cost - (cost * 0.05))
if cce:
cce = cce.lower()
if (cce in vars):
cost = (cost - (cost * 0.05))
embed = discord.Embed(title='Infra Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are currently at the max amount of infrastructure')) | Provides the cost of infra accurate to +/- $100,000. Provide urban and/or cce as a command variable to trigger urbanization and cce infra discounts. | pnw/pnw.py | infra | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def infra(self, ctx, input: float, tobuy: float, urban=None, cce=None):
'\n \n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 100):
count = 0
factor = 0
cost = 0
r = ((tobuy / 100) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 100)
if (tobuy >= 100):
buying = 100
tobuy = (tobuy - 100)
else:
buying = tobuy
x = ((((factor - 10) ** 2.2) / 710) + 300)
cost = (cost + (x * buying))
else:
x = ((((input - 10) ** 2.2) / 710) + 300)
cost = (x * tobuy)
vars = ['urban', 'cce']
if urban:
urban = urban.lower()
if (urban in vars):
cost = (cost - (cost * 0.05))
if cce:
cce = cce.lower()
if (cce in vars):
cost = (cost - (cost * 0.05))
embed = discord.Embed(title='Infra Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are currently at the max amount of infrastructure')) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def infra(self, ctx, input: float, tobuy: float, urban=None, cce=None):
'\n \n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 100):
count = 0
factor = 0
cost = 0
r = ((tobuy / 100) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 100)
if (tobuy >= 100):
buying = 100
tobuy = (tobuy - 100)
else:
buying = tobuy
x = ((((factor - 10) ** 2.2) / 710) + 300)
cost = (cost + (x * buying))
else:
x = ((((input - 10) ** 2.2) / 710) + 300)
cost = (x * tobuy)
vars = ['urban', 'cce']
if urban:
urban = urban.lower()
if (urban in vars):
cost = (cost - (cost * 0.05))
if cce:
cce = cce.lower()
if (cce in vars):
cost = (cost - (cost * 0.05))
embed = discord.Embed(title='Infra Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are currently at the max amount of infrastructure'))<|docstring|>Provides the cost of infra accurate to +/- $100,000. Provide urban and/or cce as a command variable to trigger urbanization and cce infra discounts.<|endoftext|> |
88da1159700b2896b08d1e2c82117cd8529f210493b618a171403295f49afe63 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def land(self, ctx, input: float, tobuy: float):
'\n Provides the cost of land accurate to +/- $100,000.\n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 500):
count = 0
factor = 0
cost = 0
r = ((tobuy // 500) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 500)
if (tobuy >= 500):
buying = 500
tobuy = (tobuy - 500)
else:
buying = tobuy
x = ((0.002 * ((factor - 20) ** 2)) + 50)
cost = (cost + (x * buying))
else:
x = ((0.002 * ((input - 20) ** 2)) + 50)
cost = (x * tobuy)
embed = discord.Embed(title='Land Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of land')) | Provides the cost of land accurate to +/- $100,000. | pnw/pnw.py | land | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def land(self, ctx, input: float, tobuy: float):
'\n \n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 500):
count = 0
factor = 0
cost = 0
r = ((tobuy // 500) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 500)
if (tobuy >= 500):
buying = 500
tobuy = (tobuy - 500)
else:
buying = tobuy
x = ((0.002 * ((factor - 20) ** 2)) + 50)
cost = (cost + (x * buying))
else:
x = ((0.002 * ((input - 20) ** 2)) + 50)
cost = (x * tobuy)
embed = discord.Embed(title='Land Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of land')) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def land(self, ctx, input: float, tobuy: float):
'\n \n '
if (input < 10000):
if (tobuy < 10000):
if (tobuy > 500):
count = 0
factor = 0
cost = 0
r = ((tobuy // 500) + 1.0)
for _ in range(int(r)):
factor = (input + count)
count = (count + 500)
if (tobuy >= 500):
buying = 500
tobuy = (tobuy - 500)
else:
buying = tobuy
x = ((0.002 * ((factor - 20) ** 2)) + 50)
cost = (cost + (x * buying))
else:
x = ((0.002 * ((input - 20) ** 2)) + 50)
cost = (x * tobuy)
embed = discord.Embed(title='Land Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of land'))<|docstring|>Provides the cost of land accurate to +/- $100,000.<|endoftext|> |
14520f6eabd623e3a9b012e687e5b01a60e9d89b6bd46be29db7aad54acedf49 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def citycost(self, ctx, city: int):
'\n Provides the cost of the next city accurate to +/- $100,000.\n '
if (city < 100):
cost = (((50000 * ((city - 1) ** 3)) + (150000 * city)) + 75000)
embed = discord.Embed(title='City Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of cities')) | Provides the cost of the next city accurate to +/- $100,000. | pnw/pnw.py | citycost | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def citycost(self, ctx, city: int):
'\n \n '
if (city < 100):
cost = (((50000 * ((city - 1) ** 3)) + (150000 * city)) + 75000)
embed = discord.Embed(title='City Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of cities')) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def citycost(self, ctx, city: int):
'\n \n '
if (city < 100):
cost = (((50000 * ((city - 1) ** 3)) + (150000 * city)) + 75000)
embed = discord.Embed(title='City Cost Calculator', description='To accomidate for discrepincies in this calculator, please ensure you are capable of paying +/- $100,000 what is given here!', color=(await ctx.embed_color()))
embed.add_field(name='Total:', value=f'${cost:,.2f}')
embed.set_footer(text='Results generated based on equations provided by http://politicsandwar.wikia.com/wiki/')
(await ctx.send(embed=embed))
else:
(await ctx.send('You are at the max amount of cities'))<|docstring|>Provides the cost of the next city accurate to +/- $100,000.<|endoftext|> |
eef1a885f7b71fbea42204304e8f6b4e9663c640aee94d762eb369d43e5b1c83 | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def military(self, ctx, *, name):
'\n Military Lookup\n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(''.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
score = nation_data['score']
soldiers = nation_data['soldiers']
tank = nation_data['tanks']
aircraft = nation_data['aircraft']
ships = nation_data['ships']
missiles = nation_data['missiles']
nukes = nation_data['nukes']
if (nation_data['allianceposition'] == '5'):
alliancepos = 'Leader'
elif (nation_data['allianceposition'] == '4'):
alliancepos = 'Vice Leader'
elif (nation_data['allianceposition'] == '3'):
alliancepos = 'Officer'
elif (nation_data['allianceposition'] == '2'):
alliancepos = 'Member'
elif (nation_data['allianceposition'] == '1'):
alliancepos = 'Applicant'
elif (nation_data['allianceposition'] == '0'):
alliancepos = 'None'
if (nation_data['allianceid'] == '0'):
allianceid = 'None'
else:
allianceid = nation_data['allianceid']
intscore = float(score)
low_defense_range = int((intscore * 0.57143))
high_defense_range = int((intscore * 1.33335))
low_offense_range = int((intscore * 0.75))
high_offense_range = int((intscore * 1.75))
embed = discord.Embed(title='Military Info for {}'.format(name), description='Alliance Name: {}\nAlliance ID: {}\nAlliance Position: {}'.format(nation_data['alliance'], allianceid, alliancepos), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Military Stats', value=f'''
Score: {score}
Soldiers: {soldiers}
Tanks: {tank}
Aircraft: {aircraft}
Ships: {ships}
Missiles: {missiles}
Nukes: {nukes}
Defense Range: {low_defense_range} - {high_defense_range}
Offense Range: {low_offense_range} - {high_offense_range}''')
(await ctx.send(embed=embed)) | Military Lookup | pnw/pnw.py | military | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def military(self, ctx, *, name):
'\n \n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
score = nation_data['score']
soldiers = nation_data['soldiers']
tank = nation_data['tanks']
aircraft = nation_data['aircraft']
ships = nation_data['ships']
missiles = nation_data['missiles']
nukes = nation_data['nukes']
if (nation_data['allianceposition'] == '5'):
alliancepos = 'Leader'
elif (nation_data['allianceposition'] == '4'):
alliancepos = 'Vice Leader'
elif (nation_data['allianceposition'] == '3'):
alliancepos = 'Officer'
elif (nation_data['allianceposition'] == '2'):
alliancepos = 'Member'
elif (nation_data['allianceposition'] == '1'):
alliancepos = 'Applicant'
elif (nation_data['allianceposition'] == '0'):
alliancepos = 'None'
if (nation_data['allianceid'] == '0'):
allianceid = 'None'
else:
allianceid = nation_data['allianceid']
intscore = float(score)
low_defense_range = int((intscore * 0.57143))
high_defense_range = int((intscore * 1.33335))
low_offense_range = int((intscore * 0.75))
high_offense_range = int((intscore * 1.75))
embed = discord.Embed(title='Military Info for {}'.format(name), description='Alliance Name: {}\nAlliance ID: {}\nAlliance Position: {}'.format(nation_data['alliance'], allianceid, alliancepos), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Military Stats', value=f'
Score: {score}
Soldiers: {soldiers}
Tanks: {tank}
Aircraft: {aircraft}
Ships: {ships}
Missiles: {missiles}
Nukes: {nukes}
Defense Range: {low_defense_range} - {high_defense_range}
Offense Range: {low_offense_range} - {high_offense_range}')
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def military(self, ctx, *, name):
'\n \n '
(await ctx.send('This may take a while.....'))
async with ctx.typing():
name = self.escape_query(.join(name))
key = False
nations_data = (await self.nations_lookup(ctx))
success = nations_data['success']
try:
if (success == False):
(await ctx.send(f'Your api seems to be invalid, make sure its correct and follow the instructions in {ctx.prefix}pnwkey'))
return
except:
pass
for I in nations_data['nations']:
if (name.lower() == I['nation'].lower()):
key = True
nid = I['nationid']
if (key == True):
pass
else:
nid = name
nation_data = (await self.do_lookup(ctx, nid))
if (not nation_data):
(await ctx.send("I can't get the data from the API. Try again later."))
return
success = nation_data['success']
if (success == False):
(await ctx.send('No such nation exists! Please enter a vaild nation ID'))
return
name = nation_data['name']
nationid = nation_data['nationid']
score = nation_data['score']
soldiers = nation_data['soldiers']
tank = nation_data['tanks']
aircraft = nation_data['aircraft']
ships = nation_data['ships']
missiles = nation_data['missiles']
nukes = nation_data['nukes']
if (nation_data['allianceposition'] == '5'):
alliancepos = 'Leader'
elif (nation_data['allianceposition'] == '4'):
alliancepos = 'Vice Leader'
elif (nation_data['allianceposition'] == '3'):
alliancepos = 'Officer'
elif (nation_data['allianceposition'] == '2'):
alliancepos = 'Member'
elif (nation_data['allianceposition'] == '1'):
alliancepos = 'Applicant'
elif (nation_data['allianceposition'] == '0'):
alliancepos = 'None'
if (nation_data['allianceid'] == '0'):
allianceid = 'None'
else:
allianceid = nation_data['allianceid']
intscore = float(score)
low_defense_range = int((intscore * 0.57143))
high_defense_range = int((intscore * 1.33335))
low_offense_range = int((intscore * 0.75))
high_offense_range = int((intscore * 1.75))
embed = discord.Embed(title='Military Info for {}'.format(name), description='Alliance Name: {}\nAlliance ID: {}\nAlliance Position: {}'.format(nation_data['alliance'], allianceid, alliancepos), url='https://politicsandwar.com/nation/id={}'.format(nationid), color=(await ctx.embed_color()))
embed.add_field(name='Military Stats', value=f'
Score: {score}
Soldiers: {soldiers}
Tanks: {tank}
Aircraft: {aircraft}
Ships: {ships}
Missiles: {missiles}
Nukes: {nukes}
Defense Range: {low_defense_range} - {high_defense_range}
Offense Range: {low_offense_range} - {high_offense_range}')
(await ctx.send(embed=embed))<|docstring|>Military Lookup<|endoftext|> |
dee3aac588b5a9c4244ddb4c6e2cbf682040205c833f6833edf8c2069888f35c | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwcredits(self, ctx):
'\n Credits for the PNW cog\n '
embed = discord.Embed(title='Credits go to Reqiuem bot/Kyle Tyo for various aspects of this PNW cog', description='Reqiuem can be found here, https://gitlab.com/AnakiKaiver297/Requiem-Project, specific thanks for the various calculations and name searching for alliances/nations', color=(await ctx.embed_color()))
(await ctx.send(embed=embed)) | Credits for the PNW cog | pnw/pnw.py | pnwcredits | ltzmax/kennnyshiwa-cogs | 21 | python | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwcredits(self, ctx):
'\n \n '
embed = discord.Embed(title='Credits go to Reqiuem bot/Kyle Tyo for various aspects of this PNW cog', description='Reqiuem can be found here, https://gitlab.com/AnakiKaiver297/Requiem-Project, specific thanks for the various calculations and name searching for alliances/nations', color=(await ctx.embed_color()))
(await ctx.send(embed=embed)) | @commands.bot_has_permissions(embed_links=True)
@commands.command()
async def pnwcredits(self, ctx):
'\n \n '
embed = discord.Embed(title='Credits go to Reqiuem bot/Kyle Tyo for various aspects of this PNW cog', description='Reqiuem can be found here, https://gitlab.com/AnakiKaiver297/Requiem-Project, specific thanks for the various calculations and name searching for alliances/nations', color=(await ctx.embed_color()))
(await ctx.send(embed=embed))<|docstring|>Credits for the PNW cog<|endoftext|> |
880a1f92f3d5b31b8ec46363bf22e7a0cc5c1e204b35ac25e71f046aa9dd7f4d | def __init__(self, id=None, parent_id=None, object_type=None, object_name=None, object_alias_name=None, select=None):
'DatabaseInfo - a model defined in huaweicloud sdk'
self._id = None
self._parent_id = None
self._object_type = None
self._object_name = None
self._object_alias_name = None
self._select = None
self.discriminator = None
self.id = id
if (parent_id is not None):
self.parent_id = parent_id
self.object_type = object_type
self.object_name = object_name
if (object_alias_name is not None):
self.object_alias_name = object_alias_name
if (select is not None):
self.select = select | DatabaseInfo - a model defined in huaweicloud sdk | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | __init__ | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def __init__(self, id=None, parent_id=None, object_type=None, object_name=None, object_alias_name=None, select=None):
self._id = None
self._parent_id = None
self._object_type = None
self._object_name = None
self._object_alias_name = None
self._select = None
self.discriminator = None
self.id = id
if (parent_id is not None):
self.parent_id = parent_id
self.object_type = object_type
self.object_name = object_name
if (object_alias_name is not None):
self.object_alias_name = object_alias_name
if (select is not None):
self.select = select | def __init__(self, id=None, parent_id=None, object_type=None, object_name=None, object_alias_name=None, select=None):
self._id = None
self._parent_id = None
self._object_type = None
self._object_name = None
self._object_alias_name = None
self._select = None
self.discriminator = None
self.id = id
if (parent_id is not None):
self.parent_id = parent_id
self.object_type = object_type
self.object_name = object_name
if (object_alias_name is not None):
self.object_alias_name = object_alias_name
if (select is not None):
self.select = select<|docstring|>DatabaseInfo - a model defined in huaweicloud sdk<|endoftext|> |
e6e4abacb507b78ed72f2202bd492f0f118888896e6426ac5fbdcb68ea4b306d | @property
def id(self):
'Gets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :return: The id of this DatabaseInfo.\n :rtype: str\n '
return self._id | Gets the id of this DatabaseInfo.
object_type为database时,为库名;object_type为table或者view时,字段值参考示例。
:return: The id of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | id | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def id(self):
'Gets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :return: The id of this DatabaseInfo.\n :rtype: str\n '
return self._id | @property
def id(self):
'Gets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :return: The id of this DatabaseInfo.\n :rtype: str\n '
return self._id<|docstring|>Gets the id of this DatabaseInfo.
object_type为database时,为库名;object_type为table或者view时,字段值参考示例。
:return: The id of this DatabaseInfo.
:rtype: str<|endoftext|> |
c8da316152db30074db703a43a857fd1344d9925337e6a21dd8c5849a7204750 | @id.setter
def id(self, id):
'Sets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :param id: The id of this DatabaseInfo.\n :type: str\n '
self._id = id | Sets the id of this DatabaseInfo.
object_type为database时,为库名;object_type为table或者view时,字段值参考示例。
:param id: The id of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | id | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @id.setter
def id(self, id):
'Sets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :param id: The id of this DatabaseInfo.\n :type: str\n '
self._id = id | @id.setter
def id(self, id):
'Sets the id of this DatabaseInfo.\n\n object_type为database时,为库名;object_type为table或者view时,字段值参考示例。\n\n :param id: The id of this DatabaseInfo.\n :type: str\n '
self._id = id<|docstring|>Sets the id of this DatabaseInfo.
object_type为database时,为库名;object_type为table或者view时,字段值参考示例。
:param id: The id of this DatabaseInfo.
:type: str<|endoftext|> |
e8b1e4e719b8201d27a50eabbee04de2c42bb68956192b6317dee08772fcd57e | @property
def parent_id(self):
'Gets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :return: The parent_id of this DatabaseInfo.\n :rtype: str\n '
return self._parent_id | Gets the parent_id of this DatabaseInfo.
object_type为table或view时需要填写,为库名
:return: The parent_id of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | parent_id | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def parent_id(self):
'Gets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :return: The parent_id of this DatabaseInfo.\n :rtype: str\n '
return self._parent_id | @property
def parent_id(self):
'Gets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :return: The parent_id of this DatabaseInfo.\n :rtype: str\n '
return self._parent_id<|docstring|>Gets the parent_id of this DatabaseInfo.
object_type为table或view时需要填写,为库名
:return: The parent_id of this DatabaseInfo.
:rtype: str<|endoftext|> |
9988210e50a44e56e2f723fab5ddf0d5b1881984719f0b2b0f4aa6c4411836da | @parent_id.setter
def parent_id(self, parent_id):
'Sets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :param parent_id: The parent_id of this DatabaseInfo.\n :type: str\n '
self._parent_id = parent_id | Sets the parent_id of this DatabaseInfo.
object_type为table或view时需要填写,为库名
:param parent_id: The parent_id of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | parent_id | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @parent_id.setter
def parent_id(self, parent_id):
'Sets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :param parent_id: The parent_id of this DatabaseInfo.\n :type: str\n '
self._parent_id = parent_id | @parent_id.setter
def parent_id(self, parent_id):
'Sets the parent_id of this DatabaseInfo.\n\n object_type为table或view时需要填写,为库名\n\n :param parent_id: The parent_id of this DatabaseInfo.\n :type: str\n '
self._parent_id = parent_id<|docstring|>Sets the parent_id of this DatabaseInfo.
object_type为table或view时需要填写,为库名
:param parent_id: The parent_id of this DatabaseInfo.
:type: str<|endoftext|> |
0aa204f398cee1f168af84615d68bf9ea996ccead6a99f7deb5babdcf51affe0 | @property
def object_type(self):
'Gets the object_type of this DatabaseInfo.\n\n 类型\n\n :return: The object_type of this DatabaseInfo.\n :rtype: str\n '
return self._object_type | Gets the object_type of this DatabaseInfo.
类型
:return: The object_type of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_type | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def object_type(self):
'Gets the object_type of this DatabaseInfo.\n\n 类型\n\n :return: The object_type of this DatabaseInfo.\n :rtype: str\n '
return self._object_type | @property
def object_type(self):
'Gets the object_type of this DatabaseInfo.\n\n 类型\n\n :return: The object_type of this DatabaseInfo.\n :rtype: str\n '
return self._object_type<|docstring|>Gets the object_type of this DatabaseInfo.
类型
:return: The object_type of this DatabaseInfo.
:rtype: str<|endoftext|> |
5b6e89bf25465a9ca5ebc6efbbe963a44f658eed2a45666f54c1ea7974af4863 | @object_type.setter
def object_type(self, object_type):
'Sets the object_type of this DatabaseInfo.\n\n 类型\n\n :param object_type: The object_type of this DatabaseInfo.\n :type: str\n '
self._object_type = object_type | Sets the object_type of this DatabaseInfo.
类型
:param object_type: The object_type of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_type | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @object_type.setter
def object_type(self, object_type):
'Sets the object_type of this DatabaseInfo.\n\n 类型\n\n :param object_type: The object_type of this DatabaseInfo.\n :type: str\n '
self._object_type = object_type | @object_type.setter
def object_type(self, object_type):
'Sets the object_type of this DatabaseInfo.\n\n 类型\n\n :param object_type: The object_type of this DatabaseInfo.\n :type: str\n '
self._object_type = object_type<|docstring|>Sets the object_type of this DatabaseInfo.
类型
:param object_type: The object_type of this DatabaseInfo.
:type: str<|endoftext|> |
36d0e932d021b39eb93402dae09e85eae18408d57bc8f06d8d649de48811338c | @property
def object_name(self):
'Gets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :return: The object_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_name | Gets the object_name of this DatabaseInfo.
数据库对象名称,库名、表名、视图名
:return: The object_name of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_name | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def object_name(self):
'Gets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :return: The object_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_name | @property
def object_name(self):
'Gets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :return: The object_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_name<|docstring|>Gets the object_name of this DatabaseInfo.
数据库对象名称,库名、表名、视图名
:return: The object_name of this DatabaseInfo.
:rtype: str<|endoftext|> |
281b870a5838a16ea227ad3d77314628a922197e74f24de04717accf7534f39f | @object_name.setter
def object_name(self, object_name):
'Sets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :param object_name: The object_name of this DatabaseInfo.\n :type: str\n '
self._object_name = object_name | Sets the object_name of this DatabaseInfo.
数据库对象名称,库名、表名、视图名
:param object_name: The object_name of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_name | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @object_name.setter
def object_name(self, object_name):
'Sets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :param object_name: The object_name of this DatabaseInfo.\n :type: str\n '
self._object_name = object_name | @object_name.setter
def object_name(self, object_name):
'Sets the object_name of this DatabaseInfo.\n\n 数据库对象名称,库名、表名、视图名\n\n :param object_name: The object_name of this DatabaseInfo.\n :type: str\n '
self._object_name = object_name<|docstring|>Sets the object_name of this DatabaseInfo.
数据库对象名称,库名、表名、视图名
:param object_name: The object_name of this DatabaseInfo.
:type: str<|endoftext|> |
c63cbe6ca78bcd86a234c3e8bc52f76ca467bf593c42081bace5c06740944d6e | @property
def object_alias_name(self):
'Gets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :return: The object_alias_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_alias_name | Gets the object_alias_name of this DatabaseInfo.
别名,映射的新名称。
:return: The object_alias_name of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_alias_name | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def object_alias_name(self):
'Gets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :return: The object_alias_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_alias_name | @property
def object_alias_name(self):
'Gets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :return: The object_alias_name of this DatabaseInfo.\n :rtype: str\n '
return self._object_alias_name<|docstring|>Gets the object_alias_name of this DatabaseInfo.
别名,映射的新名称。
:return: The object_alias_name of this DatabaseInfo.
:rtype: str<|endoftext|> |
da4ce008d732f50b549a56aadeda0706ed9ae17efacd408bc1128335487c024d | @object_alias_name.setter
def object_alias_name(self, object_alias_name):
'Sets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :param object_alias_name: The object_alias_name of this DatabaseInfo.\n :type: str\n '
self._object_alias_name = object_alias_name | Sets the object_alias_name of this DatabaseInfo.
别名,映射的新名称。
:param object_alias_name: The object_alias_name of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | object_alias_name | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @object_alias_name.setter
def object_alias_name(self, object_alias_name):
'Sets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :param object_alias_name: The object_alias_name of this DatabaseInfo.\n :type: str\n '
self._object_alias_name = object_alias_name | @object_alias_name.setter
def object_alias_name(self, object_alias_name):
'Sets the object_alias_name of this DatabaseInfo.\n\n 别名,映射的新名称。\n\n :param object_alias_name: The object_alias_name of this DatabaseInfo.\n :type: str\n '
self._object_alias_name = object_alias_name<|docstring|>Sets the object_alias_name of this DatabaseInfo.
别名,映射的新名称。
:param object_alias_name: The object_alias_name of this DatabaseInfo.
:type: str<|endoftext|> |
5f8e1564292edd3e3112444db7aa17b1e65b2ff192ed262cd809c2392346a46b | @property
def select(self):
'Gets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :return: The select of this DatabaseInfo.\n :rtype: str\n '
return self._select | Gets the select of this DatabaseInfo.
是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false
:return: The select of this DatabaseInfo.
:rtype: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | select | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @property
def select(self):
'Gets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :return: The select of this DatabaseInfo.\n :rtype: str\n '
return self._select | @property
def select(self):
'Gets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :return: The select of this DatabaseInfo.\n :rtype: str\n '
return self._select<|docstring|>Gets the select of this DatabaseInfo.
是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false
:return: The select of this DatabaseInfo.
:rtype: str<|endoftext|> |
97b510df20f2dbdd192599d253b5237f809b1faf8b457922c4b9a61fc7535849 | @select.setter
def select(self, select):
'Sets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :param select: The select of this DatabaseInfo.\n :type: str\n '
self._select = select | Sets the select of this DatabaseInfo.
是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false
:param select: The select of this DatabaseInfo.
:type: str | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | select | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | @select.setter
def select(self, select):
'Sets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :param select: The select of this DatabaseInfo.\n :type: str\n '
self._select = select | @select.setter
def select(self, select):
'Sets the select of this DatabaseInfo.\n\n 是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false\n\n :param select: The select of this DatabaseInfo.\n :type: str\n '
self._select = select<|docstring|>Sets the select of this DatabaseInfo.
是否选中,值为true会进行迁移,false该数据库对象不会迁移,partial为迁移库下面的部分表,不填默认为false
:param select: The select of this DatabaseInfo.
:type: str<|endoftext|> |
23795442a46e2cd10dec98fded44ed9172a29971e98983a30ad89baa6c9c0a03 | def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | Returns the model properties as a dict | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | to_dict | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99 | def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | Returns the string representation of the model | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | to_str | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def to_str(self):
return pprint.pformat(self.to_dict()) | def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703 | def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | For `print` and `pprint` | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | __repr__ | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def __repr__(self):
return self.to_str() | def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
625e44cfb1abbb5465c009e459009ccdcb1d9442cf22dd9a9fa35c53c5710fe0 | def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, DatabaseInfo)):
return False
return (self.__dict__ == other.__dict__) | Returns true if both objects are equal | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | __eq__ | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def __eq__(self, other):
if (not isinstance(other, DatabaseInfo)):
return False
return (self.__dict__ == other.__dict__) | def __eq__(self, other):
if (not isinstance(other, DatabaseInfo)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|> |
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42 | def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | Returns true if both objects are not equal | huaweicloud-sdk-drs/huaweicloudsdkdrs/v3/model/database_info.py | __ne__ | NQLoong/huaweicloud-sdk-python-v3 | 1 | python | def __ne__(self, other):
return (not (self == other)) | def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|> |
66d33b19eb490f8a4d971b4e5ecb9a3dfc8aa1c796e9ca9475c82a615e650109 | def _maybe_cache(arg, format, cache, tz, convert_listlike):
'\n Create a cache of unique dates from an array of dates\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n format : string\n Strftime format to parse time\n cache : boolean\n True attempts to create a cache of converted values\n tz : string\n Timezone of the dates\n convert_listlike : function\n Conversion function to apply on dates\n\n Returns\n -------\n cache_array : Series\n Cache of converted, unique dates. Can be empty\n '
from pandas import Series
cache_array = Series()
if cache:
from pandas import Index
if (not Index(arg).is_unique):
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format, tz=tz)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array | Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
tz : string
Timezone of the dates
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty | venv/Lib/site-packages/pandas/core/tools/datetimes.py | _maybe_cache | shehzadulislam/Quiz2Shehzad | 69 | python | def _maybe_cache(arg, format, cache, tz, convert_listlike):
'\n Create a cache of unique dates from an array of dates\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n format : string\n Strftime format to parse time\n cache : boolean\n True attempts to create a cache of converted values\n tz : string\n Timezone of the dates\n convert_listlike : function\n Conversion function to apply on dates\n\n Returns\n -------\n cache_array : Series\n Cache of converted, unique dates. Can be empty\n '
from pandas import Series
cache_array = Series()
if cache:
from pandas import Index
if (not Index(arg).is_unique):
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format, tz=tz)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array | def _maybe_cache(arg, format, cache, tz, convert_listlike):
'\n Create a cache of unique dates from an array of dates\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n format : string\n Strftime format to parse time\n cache : boolean\n True attempts to create a cache of converted values\n tz : string\n Timezone of the dates\n convert_listlike : function\n Conversion function to apply on dates\n\n Returns\n -------\n cache_array : Series\n Cache of converted, unique dates. Can be empty\n '
from pandas import Series
cache_array = Series()
if cache:
from pandas import Index
if (not Index(arg).is_unique):
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format, tz=tz)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array<|docstring|>Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
tz : string
Timezone of the dates
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty<|endoftext|> |
756cca70d974e14d4aa44c3c18a9614af4234a9eea288800c3df667aaed533cb | def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"\n Convert array of dates with a cache and box the result\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n cache_array : Series\n Cache of converted, unique dates\n box : boolean\n True boxes result as an Index-like, False returns an ndarray\n errors : string\n 'ignore' plus box=True will convert result to Index\n name : string, default None\n Name for a DatetimeIndex\n\n Returns\n -------\n result : datetime of converted dates\n Returns:\n\n - Index-like if box=True\n - ndarray if box=False\n "
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if (errors == 'ignore'):
return Index(result)
else:
return DatetimeIndex(result, name=name)
return result.values | Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False | venv/Lib/site-packages/pandas/core/tools/datetimes.py | _convert_and_box_cache | shehzadulislam/Quiz2Shehzad | 69 | python | def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"\n Convert array of dates with a cache and box the result\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n cache_array : Series\n Cache of converted, unique dates\n box : boolean\n True boxes result as an Index-like, False returns an ndarray\n errors : string\n 'ignore' plus box=True will convert result to Index\n name : string, default None\n Name for a DatetimeIndex\n\n Returns\n -------\n result : datetime of converted dates\n Returns:\n\n - Index-like if box=True\n - ndarray if box=False\n "
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if (errors == 'ignore'):
return Index(result)
else:
return DatetimeIndex(result, name=name)
return result.values | def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"\n Convert array of dates with a cache and box the result\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n cache_array : Series\n Cache of converted, unique dates\n box : boolean\n True boxes result as an Index-like, False returns an ndarray\n errors : string\n 'ignore' plus box=True will convert result to Index\n name : string, default None\n Name for a DatetimeIndex\n\n Returns\n -------\n result : datetime of converted dates\n Returns:\n\n - Index-like if box=True\n - ndarray if box=False\n "
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if (errors == 'ignore'):
return Index(result)
else:
return DatetimeIndex(result, name=name)
return result.values<|docstring|>Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False<|endoftext|> |
81db84e863d3af90219e7d9bb54c329e45574e90f481344e7617016ff7be8b5a | def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False):
'\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n\n .. versionadded:: 0.18.1\n\n or DataFrame/dict-like\n\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as NaT\n - If \'ignore\', then invalid parsing will return the input\n dayfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil beahavior).\n\n .. versionadded:: 0.16.1\n\n utc : boolean, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n box : boolean, default True\n\n - If True returns a DatetimeIndex\n - If False returns ndarray of values.\n format : string, default None\n strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse\n all the way up to nanoseconds.\n exact : boolean, True by default\n\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : string, default \'ns\'\n unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit=\'ms\' and origin=\'unix\' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n origin : scalar, default is \'unix\'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If \'unix\' (or POSIX) time; origin is set to 1970-01-01.\n - If \'julian\', unit must be \'D\', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n\n .. versionadded:: 0.20.0\n cache : boolean, default False\n If True, use a cache of unique, converted dates to apply the datetime\n conversion. May produce sigificant speed-up when parsing duplicate date\n strings, especially ones with timezone offsets.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n\n Examples\n --------\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like [\'year\', \'month\', \'day\', \'minute\', \'second\',\n \'ms\', \'us\', \'ns\']) or plurals of the same\n\n >>> df = pd.DataFrame({\'year\': [2015, 2016],\n \'month\': [2, 3],\n \'day\': [4, 5]})\n >>> pd.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html\n #timeseries-timestamp-limits>`_, passing errors=\'ignore\'\n will return the original input instead of raising any exception.\n\n Passing errors=\'coerce\' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'ignore\')\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'coerce\')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = pd.Series([\'3/11/2000\', \'3/12/2000\', \'3/13/2000\']*1000)\n\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n dtype: object\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=True)\n 100 loops, best of 3: 10.4 ms per loop\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=False)\n 1 loop, best of 3: 471 ms per loop\n\n Using a unix epoch time\n\n >>> pd.to_datetime(1490195805, unit=\'s\')\n Timestamp(\'2017-03-22 15:16:45\')\n >>> pd.to_datetime(1490195805433502912, unit=\'ns\')\n Timestamp(\'2017-03-22 15:16:45.433502912\')\n\n .. warning:: For float arg, precision rounding might happen. To prevent\n unexpected behavior use a fixed-width exact type.\n\n Using a non-unix epoch origin\n\n >>> pd.to_datetime([1, 2, 3], unit=\'D\',\n origin=pd.Timestamp(\'1960-01-01\'))\n 0 1960-01-02\n 1 1960-01-03\n 2 1960-01-04\n\n See also\n --------\n pandas.DataFrame.astype : Cast argument to a specified dtype.\n pandas.to_timedelta : Convert argument to timedelta.\n '
from pandas.core.indexes.datetimes import DatetimeIndex
tz = ('utc' if utc else None)
def _convert_listlike(arg, box, format, name=None, tz=tz):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
if is_datetime64tz_dtype(arg):
if (not isinstance(arg, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if utc:
arg = arg.tz_convert(None).tz_localize('UTC')
return arg
elif is_datetime64_ns_dtype(arg):
if (box and (not isinstance(arg, DatetimeIndex))):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif (unit is not None):
if (format is not None):
raise ValueError('cannot specify both format and unit')
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if box:
if (errors == 'ignore'):
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
require_iso8601 = False
if (infer_datetime_format and (format is None)):
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if (format is not None):
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = (not infer_datetime_format)
format = None
try:
result = None
if (format is not None):
if (format == '%Y%m%d'):
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
if (result is None):
try:
result = array_strptime(arg, format, exact=exact, errors=errors)
except tslib.OutOfBoundsDatetime:
if (errors == 'raise'):
raise
result = arg
except ValueError:
if (not infer_datetime_format):
if (errors == 'raise'):
raise
result = arg
if ((result is None) and ((format is None) or infer_datetime_format)):
result = tslib.array_to_datetime(arg, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601)
if (is_datetime64_dtype(result) and box):
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
(values, tz) = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if (arg is None):
return None
if (origin == 'julian'):
original = arg
j0 = tslib.Timestamp(0).to_julian_date()
if (unit != 'D'):
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = (arg - j0)
except:
raise ValueError("incompatible 'arg' type for given 'origin'='julian'")
j_max = (tslib.Timestamp.max.to_julian_date() - j0)
j_min = (tslib.Timestamp.min.to_julian_date() - j0)
if (np.any((arg > j_max)) or np.any((arg < j_min))):
raise tslib.OutOfBoundsDatetime("{original} is Out of Bounds for origin='julian'".format(original=original))
elif (origin not in ['unix', 'julian']):
original = arg
if (not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg)))):
raise ValueError("'{arg}' is not compatible with origin='{origin}'; it must be numeric with a unit specified ".format(arg=arg, origin=origin))
try:
offset = tslib.Timestamp(origin)
except tslib.OutOfBoundsDatetime:
raise tslib.OutOfBoundsDatetime('origin {origin} is Out of Bounds'.format(origin=origin))
except ValueError:
raise ValueError('origin {origin} cannot be converted to a Timestamp'.format(origin=origin))
if (offset.tz is not None):
raise ValueError('origin offset {} must be tz-naive'.format(offset))
offset -= tslib.Timestamp(0)
offset = (offset // tslib.Timedelta(1, unit=unit))
if (is_list_like(arg) and (not isinstance(arg, (ABCSeries, ABCIndexClass, np.ndarray)))):
arg = np.asarray(arg)
arg = (arg + offset)
if isinstance(arg, tslib.Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = arg.map(cache_array)
else:
from pandas import Series
values = _convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name)
else:
result = _convert_listlike(arg, box, format, name=arg.name)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = _convert_listlike(arg, box, format)
else:
result = _convert_listlike(np.array([arg]), box, format)[0]
return result | Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce sigificant speed-up when parsing duplicate date
strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_timedelta : Convert argument to timedelta. | venv/Lib/site-packages/pandas/core/tools/datetimes.py | to_datetime | shehzadulislam/Quiz2Shehzad | 69 | python | def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False):
'\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n\n .. versionadded:: 0.18.1\n\n or DataFrame/dict-like\n\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as NaT\n - If \'ignore\', then invalid parsing will return the input\n dayfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil beahavior).\n\n .. versionadded:: 0.16.1\n\n utc : boolean, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n box : boolean, default True\n\n - If True returns a DatetimeIndex\n - If False returns ndarray of values.\n format : string, default None\n strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse\n all the way up to nanoseconds.\n exact : boolean, True by default\n\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : string, default \'ns\'\n unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit=\'ms\' and origin=\'unix\' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n origin : scalar, default is \'unix\'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If \'unix\' (or POSIX) time; origin is set to 1970-01-01.\n - If \'julian\', unit must be \'D\', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n\n .. versionadded:: 0.20.0\n cache : boolean, default False\n If True, use a cache of unique, converted dates to apply the datetime\n conversion. May produce sigificant speed-up when parsing duplicate date\n strings, especially ones with timezone offsets.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n\n Examples\n --------\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like [\'year\', \'month\', \'day\', \'minute\', \'second\',\n \'ms\', \'us\', \'ns\']) or plurals of the same\n\n >>> df = pd.DataFrame({\'year\': [2015, 2016],\n \'month\': [2, 3],\n \'day\': [4, 5]})\n >>> pd.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html\n #timeseries-timestamp-limits>`_, passing errors=\'ignore\'\n will return the original input instead of raising any exception.\n\n Passing errors=\'coerce\' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'ignore\')\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'coerce\')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = pd.Series([\'3/11/2000\', \'3/12/2000\', \'3/13/2000\']*1000)\n\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n dtype: object\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=True)\n 100 loops, best of 3: 10.4 ms per loop\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=False)\n 1 loop, best of 3: 471 ms per loop\n\n Using a unix epoch time\n\n >>> pd.to_datetime(1490195805, unit=\'s\')\n Timestamp(\'2017-03-22 15:16:45\')\n >>> pd.to_datetime(1490195805433502912, unit=\'ns\')\n Timestamp(\'2017-03-22 15:16:45.433502912\')\n\n .. warning:: For float arg, precision rounding might happen. To prevent\n unexpected behavior use a fixed-width exact type.\n\n Using a non-unix epoch origin\n\n >>> pd.to_datetime([1, 2, 3], unit=\'D\',\n origin=pd.Timestamp(\'1960-01-01\'))\n 0 1960-01-02\n 1 1960-01-03\n 2 1960-01-04\n\n See also\n --------\n pandas.DataFrame.astype : Cast argument to a specified dtype.\n pandas.to_timedelta : Convert argument to timedelta.\n '
from pandas.core.indexes.datetimes import DatetimeIndex
tz = ('utc' if utc else None)
def _convert_listlike(arg, box, format, name=None, tz=tz):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
if is_datetime64tz_dtype(arg):
if (not isinstance(arg, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if utc:
arg = arg.tz_convert(None).tz_localize('UTC')
return arg
elif is_datetime64_ns_dtype(arg):
if (box and (not isinstance(arg, DatetimeIndex))):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif (unit is not None):
if (format is not None):
raise ValueError('cannot specify both format and unit')
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if box:
if (errors == 'ignore'):
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
require_iso8601 = False
if (infer_datetime_format and (format is None)):
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if (format is not None):
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = (not infer_datetime_format)
format = None
try:
result = None
if (format is not None):
if (format == '%Y%m%d'):
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
if (result is None):
try:
result = array_strptime(arg, format, exact=exact, errors=errors)
except tslib.OutOfBoundsDatetime:
if (errors == 'raise'):
raise
result = arg
except ValueError:
if (not infer_datetime_format):
if (errors == 'raise'):
raise
result = arg
if ((result is None) and ((format is None) or infer_datetime_format)):
result = tslib.array_to_datetime(arg, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601)
if (is_datetime64_dtype(result) and box):
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
(values, tz) = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if (arg is None):
return None
if (origin == 'julian'):
original = arg
j0 = tslib.Timestamp(0).to_julian_date()
if (unit != 'D'):
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = (arg - j0)
except:
raise ValueError("incompatible 'arg' type for given 'origin'='julian'")
j_max = (tslib.Timestamp.max.to_julian_date() - j0)
j_min = (tslib.Timestamp.min.to_julian_date() - j0)
if (np.any((arg > j_max)) or np.any((arg < j_min))):
raise tslib.OutOfBoundsDatetime("{original} is Out of Bounds for origin='julian'".format(original=original))
elif (origin not in ['unix', 'julian']):
original = arg
if (not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg)))):
raise ValueError("'{arg}' is not compatible with origin='{origin}'; it must be numeric with a unit specified ".format(arg=arg, origin=origin))
try:
offset = tslib.Timestamp(origin)
except tslib.OutOfBoundsDatetime:
raise tslib.OutOfBoundsDatetime('origin {origin} is Out of Bounds'.format(origin=origin))
except ValueError:
raise ValueError('origin {origin} cannot be converted to a Timestamp'.format(origin=origin))
if (offset.tz is not None):
raise ValueError('origin offset {} must be tz-naive'.format(offset))
offset -= tslib.Timestamp(0)
offset = (offset // tslib.Timedelta(1, unit=unit))
if (is_list_like(arg) and (not isinstance(arg, (ABCSeries, ABCIndexClass, np.ndarray)))):
arg = np.asarray(arg)
arg = (arg + offset)
if isinstance(arg, tslib.Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = arg.map(cache_array)
else:
from pandas import Series
values = _convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name)
else:
result = _convert_listlike(arg, box, format, name=arg.name)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = _convert_listlike(arg, box, format)
else:
result = _convert_listlike(np.array([arg]), box, format)[0]
return result | def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False):
'\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n\n .. versionadded:: 0.18.1\n\n or DataFrame/dict-like\n\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as NaT\n - If \'ignore\', then invalid parsing will return the input\n dayfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n If True, parses dates with the day first, eg 10/11/12 is parsed as\n 2012-11-10.\n Warning: dayfirst=True is not strict, but will prefer to parse\n with day first (this is a known bug, based on dateutil behavior).\n yearfirst : boolean, default False\n Specify a date parse order if `arg` is str or its list-likes.\n\n - If True parses dates with the year first, eg 10/11/12 is parsed as\n 2010-11-12.\n - If both dayfirst and yearfirst are True, yearfirst is preceded (same\n as dateutil).\n\n Warning: yearfirst=True is not strict, but will prefer to parse\n with year first (this is a known bug, based on dateutil beahavior).\n\n .. versionadded:: 0.16.1\n\n utc : boolean, default None\n Return UTC DatetimeIndex if True (converting any tz-aware\n datetime.datetime objects as well).\n box : boolean, default True\n\n - If True returns a DatetimeIndex\n - If False returns ndarray of values.\n format : string, default None\n strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse\n all the way up to nanoseconds.\n exact : boolean, True by default\n\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n\n unit : string, default \'ns\'\n unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit=\'ms\' and origin=\'unix\' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n origin : scalar, default is \'unix\'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If \'unix\' (or POSIX) time; origin is set to 1970-01-01.\n - If \'julian\', unit must be \'D\', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n\n .. versionadded:: 0.20.0\n cache : boolean, default False\n If True, use a cache of unique, converted dates to apply the datetime\n conversion. May produce sigificant speed-up when parsing duplicate date\n strings, especially ones with timezone offsets.\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n\n Examples\n --------\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like [\'year\', \'month\', \'day\', \'minute\', \'second\',\n \'ms\', \'us\', \'ns\']) or plurals of the same\n\n >>> df = pd.DataFrame({\'year\': [2015, 2016],\n \'month\': [2, 3],\n \'day\': [4, 5]})\n >>> pd.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html\n #timeseries-timestamp-limits>`_, passing errors=\'ignore\'\n will return the original input instead of raising any exception.\n\n Passing errors=\'coerce\' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'ignore\')\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> pd.to_datetime(\'13000101\', format=\'%Y%m%d\', errors=\'coerce\')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = pd.Series([\'3/11/2000\', \'3/12/2000\', \'3/13/2000\']*1000)\n\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n dtype: object\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=True)\n 100 loops, best of 3: 10.4 ms per loop\n\n >>> %timeit pd.to_datetime(s,infer_datetime_format=False)\n 1 loop, best of 3: 471 ms per loop\n\n Using a unix epoch time\n\n >>> pd.to_datetime(1490195805, unit=\'s\')\n Timestamp(\'2017-03-22 15:16:45\')\n >>> pd.to_datetime(1490195805433502912, unit=\'ns\')\n Timestamp(\'2017-03-22 15:16:45.433502912\')\n\n .. warning:: For float arg, precision rounding might happen. To prevent\n unexpected behavior use a fixed-width exact type.\n\n Using a non-unix epoch origin\n\n >>> pd.to_datetime([1, 2, 3], unit=\'D\',\n origin=pd.Timestamp(\'1960-01-01\'))\n 0 1960-01-02\n 1 1960-01-03\n 2 1960-01-04\n\n See also\n --------\n pandas.DataFrame.astype : Cast argument to a specified dtype.\n pandas.to_timedelta : Convert argument to timedelta.\n '
from pandas.core.indexes.datetimes import DatetimeIndex
tz = ('utc' if utc else None)
def _convert_listlike(arg, box, format, name=None, tz=tz):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
if is_datetime64tz_dtype(arg):
if (not isinstance(arg, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if utc:
arg = arg.tz_convert(None).tz_localize('UTC')
return arg
elif is_datetime64_ns_dtype(arg):
if (box and (not isinstance(arg, DatetimeIndex))):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif (unit is not None):
if (format is not None):
raise ValueError('cannot specify both format and unit')
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit, errors=errors)
if box:
if (errors == 'ignore'):
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
require_iso8601 = False
if (infer_datetime_format and (format is None)):
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if (format is not None):
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = (not infer_datetime_format)
format = None
try:
result = None
if (format is not None):
if (format == '%Y%m%d'):
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to '%Y%m%d' date format")
if (result is None):
try:
result = array_strptime(arg, format, exact=exact, errors=errors)
except tslib.OutOfBoundsDatetime:
if (errors == 'raise'):
raise
result = arg
except ValueError:
if (not infer_datetime_format):
if (errors == 'raise'):
raise
result = arg
if ((result is None) and ((format is None) or infer_datetime_format)):
result = tslib.array_to_datetime(arg, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601)
if (is_datetime64_dtype(result) and box):
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
(values, tz) = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if (arg is None):
return None
if (origin == 'julian'):
original = arg
j0 = tslib.Timestamp(0).to_julian_date()
if (unit != 'D'):
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = (arg - j0)
except:
raise ValueError("incompatible 'arg' type for given 'origin'='julian'")
j_max = (tslib.Timestamp.max.to_julian_date() - j0)
j_min = (tslib.Timestamp.min.to_julian_date() - j0)
if (np.any((arg > j_max)) or np.any((arg < j_min))):
raise tslib.OutOfBoundsDatetime("{original} is Out of Bounds for origin='julian'".format(original=original))
elif (origin not in ['unix', 'julian']):
original = arg
if (not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg)))):
raise ValueError("'{arg}' is not compatible with origin='{origin}'; it must be numeric with a unit specified ".format(arg=arg, origin=origin))
try:
offset = tslib.Timestamp(origin)
except tslib.OutOfBoundsDatetime:
raise tslib.OutOfBoundsDatetime('origin {origin} is Out of Bounds'.format(origin=origin))
except ValueError:
raise ValueError('origin {origin} cannot be converted to a Timestamp'.format(origin=origin))
if (offset.tz is not None):
raise ValueError('origin offset {} must be tz-naive'.format(offset))
offset -= tslib.Timestamp(0)
offset = (offset // tslib.Timedelta(1, unit=unit))
if (is_list_like(arg) and (not isinstance(arg, (ABCSeries, ABCIndexClass, np.ndarray)))):
arg = np.asarray(arg)
arg = (arg + offset)
if isinstance(arg, tslib.Timestamp):
result = arg
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = arg.map(cache_array)
else:
from pandas import Series
values = _convert_listlike(arg._values, True, format)
result = Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name)
else:
result = _convert_listlike(arg, box, format, name=arg.name)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, tz, _convert_listlike)
if (not cache_array.empty):
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = _convert_listlike(arg, box, format)
else:
result = _convert_listlike(np.array([arg]), box, format)[0]
return result<|docstring|>Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce sigificant speed-up when parsing duplicate date
strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
See also
--------
pandas.DataFrame.astype : Cast argument to a specified dtype.
pandas.to_timedelta : Convert argument to timedelta.<|endoftext|> |
a87b01d2924d306e57f18c868d17e47f20a8c380c1c92f716edb754e71aef9e1 | def _assemble_from_unit_mappings(arg, errors):
"\n assemble the unit specified fields from the arg (DataFrame)\n Return a Series for actual parsing\n\n Parameters\n ----------\n arg : DataFrame\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n Series\n "
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if (not arg.columns.is_unique):
raise ValueError('cannot assemble with duplicate keys')
def f(value):
if (value in _unit_map):
return _unit_map[value]
if (value.lower() in _unit_map):
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for (k, v) in unit.items()}
required = ['year', 'month', 'day']
req = sorted(list((set(required) - set(unit_rev.keys()))))
if len(req):
raise ValueError('to assemble mappings requires at least that [year, month, day] be specified: [{required}] is missing'.format(required=','.join(req)))
excess = sorted(list((set(unit_rev.keys()) - set(_unit_map.values()))))
if len(excess):
raise ValueError('extra keys have been passed to the datetime assemblage: [{excess}]'.format(excess=','.join(excess)))
def coerce(values):
values = to_numeric(values, errors=errors)
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (((coerce(arg[unit_rev['year']]) * 10000) + (coerce(arg[unit_rev['month']]) * 100)) + coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes: {error}'.format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if ((value is not None) and (value in arg)):
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes [{value}]: {error}'.format(value=value, error=e))
return values | assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series | venv/Lib/site-packages/pandas/core/tools/datetimes.py | _assemble_from_unit_mappings | shehzadulislam/Quiz2Shehzad | 69 | python | def _assemble_from_unit_mappings(arg, errors):
"\n assemble the unit specified fields from the arg (DataFrame)\n Return a Series for actual parsing\n\n Parameters\n ----------\n arg : DataFrame\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n Series\n "
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if (not arg.columns.is_unique):
raise ValueError('cannot assemble with duplicate keys')
def f(value):
if (value in _unit_map):
return _unit_map[value]
if (value.lower() in _unit_map):
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for (k, v) in unit.items()}
required = ['year', 'month', 'day']
req = sorted(list((set(required) - set(unit_rev.keys()))))
if len(req):
raise ValueError('to assemble mappings requires at least that [year, month, day] be specified: [{required}] is missing'.format(required=','.join(req)))
excess = sorted(list((set(unit_rev.keys()) - set(_unit_map.values()))))
if len(excess):
raise ValueError('extra keys have been passed to the datetime assemblage: [{excess}]'.format(excess=','.join(excess)))
def coerce(values):
values = to_numeric(values, errors=errors)
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (((coerce(arg[unit_rev['year']]) * 10000) + (coerce(arg[unit_rev['month']]) * 100)) + coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes: {error}'.format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if ((value is not None) and (value in arg)):
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes [{value}]: {error}'.format(value=value, error=e))
return values | def _assemble_from_unit_mappings(arg, errors):
"\n assemble the unit specified fields from the arg (DataFrame)\n Return a Series for actual parsing\n\n Parameters\n ----------\n arg : DataFrame\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n\n Returns\n -------\n Series\n "
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if (not arg.columns.is_unique):
raise ValueError('cannot assemble with duplicate keys')
def f(value):
if (value in _unit_map):
return _unit_map[value]
if (value.lower() in _unit_map):
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for (k, v) in unit.items()}
required = ['year', 'month', 'day']
req = sorted(list((set(required) - set(unit_rev.keys()))))
if len(req):
raise ValueError('to assemble mappings requires at least that [year, month, day] be specified: [{required}] is missing'.format(required=','.join(req)))
excess = sorted(list((set(unit_rev.keys()) - set(_unit_map.values()))))
if len(excess):
raise ValueError('extra keys have been passed to the datetime assemblage: [{excess}]'.format(excess=','.join(excess)))
def coerce(values):
values = to_numeric(values, errors=errors)
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (((coerce(arg[unit_rev['year']]) * 10000) + (coerce(arg[unit_rev['month']]) * 100)) + coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes: {error}'.format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if ((value is not None) and (value in arg)):
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as e:
raise ValueError('cannot assemble the datetimes [{value}]: {error}'.format(value=value, error=e))
return values<|docstring|>assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series<|endoftext|> |
7ddd57b7a5f4d9ed7cbec9628fe489830f4e4fa0f2d9cbc21a45fe12497ddf06 | def _attempt_YYYYMMDD(arg, errors):
" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,\n arg is a passed in as an object dtype, but could really be ints/strings\n with nan-like/or floats (e.g. with nan)\n\n Parameters\n ----------\n arg : passed value\n errors : 'raise','ignore','coerce'\n "
def calc(carg):
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day((carg / 10000), ((carg / 100) % 100), (carg % 100))
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[(~ mask)] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).astype('M8[ns]')
return result
try:
return calc(arg.astype(np.int64))
except:
pass
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except:
pass
try:
mask = (~ algorithms.isin(arg, list(tslib.nat_strings)))
return calc_with_mask(arg, mask)
except:
pass
return None | try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce' | venv/Lib/site-packages/pandas/core/tools/datetimes.py | _attempt_YYYYMMDD | shehzadulislam/Quiz2Shehzad | 69 | python | def _attempt_YYYYMMDD(arg, errors):
" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,\n arg is a passed in as an object dtype, but could really be ints/strings\n with nan-like/or floats (e.g. with nan)\n\n Parameters\n ----------\n arg : passed value\n errors : 'raise','ignore','coerce'\n "
def calc(carg):
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day((carg / 10000), ((carg / 100) % 100), (carg % 100))
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[(~ mask)] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).astype('M8[ns]')
return result
try:
return calc(arg.astype(np.int64))
except:
pass
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except:
pass
try:
mask = (~ algorithms.isin(arg, list(tslib.nat_strings)))
return calc_with_mask(arg, mask)
except:
pass
return None | def _attempt_YYYYMMDD(arg, errors):
" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,\n arg is a passed in as an object dtype, but could really be ints/strings\n with nan-like/or floats (e.g. with nan)\n\n Parameters\n ----------\n arg : passed value\n errors : 'raise','ignore','coerce'\n "
def calc(carg):
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day((carg / 10000), ((carg / 100) % 100), (carg % 100))
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[(~ mask)] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)).astype('M8[ns]')
return result
try:
return calc(arg.astype(np.int64))
except:
pass
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except:
pass
try:
mask = (~ algorithms.isin(arg, list(tslib.nat_strings)))
return calc_with_mask(arg, mask)
except:
pass
return None<|docstring|>try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'<|endoftext|> |
f9d43f740f2fb04391fba9ef1c3806bb7acb6c5048287e46d4d4915cdba563c8 | def to_time(arg, format=None, infer_time_format=False, errors='raise'):
'\n Parse time strings to time objects using fixed strptime formats ("%H:%M",\n "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",\n "%I%M%S%p")\n\n Use infer_time_format if all the strings are in the same format to speed\n up conversion.\n\n Parameters\n ----------\n arg : string in time format, datetime.time, list, tuple, 1-d array, Series\n format : str, default None\n Format used to convert arg into a time object. If None, fixed formats\n are used.\n infer_time_format: bool, default False\n Infer the time format based on the first non-NaN element. If all\n strings are in the same format, this will speed up conversion.\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as None\n - If \'ignore\', then invalid parsing will return the input\n\n Returns\n -------\n datetime.time\n '
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
if (infer_time_format and (format is None)):
format = _guess_time_format_for_array(arg)
times = []
if (format is not None):
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if (errors == 'raise'):
msg = 'Cannot convert {element} to a time with given format {format}'.format(element=element, format=format)
raise ValueError(msg)
elif (errors == 'ignore'):
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element, time_format).time()
if (not format_found):
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if (time_object is not None):
times.append(time_object)
elif (errors == 'raise'):
raise ValueError('Cannot convert arg {arg} to a time'.format(arg=arg))
elif (errors == 'ignore'):
return arg
else:
times.append(None)
return times
if (arg is None):
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0] | Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time | venv/Lib/site-packages/pandas/core/tools/datetimes.py | to_time | shehzadulislam/Quiz2Shehzad | 69 | python | def to_time(arg, format=None, infer_time_format=False, errors='raise'):
'\n Parse time strings to time objects using fixed strptime formats ("%H:%M",\n "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",\n "%I%M%S%p")\n\n Use infer_time_format if all the strings are in the same format to speed\n up conversion.\n\n Parameters\n ----------\n arg : string in time format, datetime.time, list, tuple, 1-d array, Series\n format : str, default None\n Format used to convert arg into a time object. If None, fixed formats\n are used.\n infer_time_format: bool, default False\n Infer the time format based on the first non-NaN element. If all\n strings are in the same format, this will speed up conversion.\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as None\n - If \'ignore\', then invalid parsing will return the input\n\n Returns\n -------\n datetime.time\n '
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
if (infer_time_format and (format is None)):
format = _guess_time_format_for_array(arg)
times = []
if (format is not None):
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if (errors == 'raise'):
msg = 'Cannot convert {element} to a time with given format {format}'.format(element=element, format=format)
raise ValueError(msg)
elif (errors == 'ignore'):
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element, time_format).time()
if (not format_found):
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if (time_object is not None):
times.append(time_object)
elif (errors == 'raise'):
raise ValueError('Cannot convert arg {arg} to a time'.format(arg=arg))
elif (errors == 'ignore'):
return arg
else:
times.append(None)
return times
if (arg is None):
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0] | def to_time(arg, format=None, infer_time_format=False, errors='raise'):
'\n Parse time strings to time objects using fixed strptime formats ("%H:%M",\n "%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",\n "%I%M%S%p")\n\n Use infer_time_format if all the strings are in the same format to speed\n up conversion.\n\n Parameters\n ----------\n arg : string in time format, datetime.time, list, tuple, 1-d array, Series\n format : str, default None\n Format used to convert arg into a time object. If None, fixed formats\n are used.\n infer_time_format: bool, default False\n Infer the time format based on the first non-NaN element. If all\n strings are in the same format, this will speed up conversion.\n errors : {\'ignore\', \'raise\', \'coerce\'}, default \'raise\'\n - If \'raise\', then invalid parsing will raise an exception\n - If \'coerce\', then invalid parsing will be set as None\n - If \'ignore\', then invalid parsing will return the input\n\n Returns\n -------\n datetime.time\n '
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif (getattr(arg, 'ndim', 1) > 1):
raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series')
arg = _ensure_object(arg)
if (infer_time_format and (format is None)):
format = _guess_time_format_for_array(arg)
times = []
if (format is not None):
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if (errors == 'raise'):
msg = 'Cannot convert {element} to a time with given format {format}'.format(element=element, format=format)
raise ValueError(msg)
elif (errors == 'ignore'):
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element, time_format).time()
if (not format_found):
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if (time_object is not None):
times.append(time_object)
elif (errors == 'raise'):
raise ValueError('Cannot convert arg {arg} to a time'.format(arg=arg))
elif (errors == 'ignore'):
return arg
else:
times.append(None)
return times
if (arg is None):
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]<|docstring|>Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time<|endoftext|> |
5e16ee6a94e23bf6de16aaf900811f784bdf17f352b39244f10c0c2099877383 | def format(dt):
'Returns date in YYYYMMDD format.'
return dt.strftime('%Y%m%d') | Returns date in YYYYMMDD format. | venv/Lib/site-packages/pandas/core/tools/datetimes.py | format | shehzadulislam/Quiz2Shehzad | 69 | python | def format(dt):
return dt.strftime('%Y%m%d') | def format(dt):
return dt.strftime('%Y%m%d')<|docstring|>Returns date in YYYYMMDD format.<|endoftext|> |
2f8aad1afa5b709a4d7cfe7a452ee998d28e8bdf90115873a613bc1ba5804658 | def ole2datetime(oledt):
'function for converting excel date to normal date format'
val = float(oledt)
if (val < 61):
msg = 'Value is outside of acceptable range: {value}'.format(value=val)
raise ValueError(msg)
return (OLE_TIME_ZERO + timedelta(days=val)) | function for converting excel date to normal date format | venv/Lib/site-packages/pandas/core/tools/datetimes.py | ole2datetime | shehzadulislam/Quiz2Shehzad | 69 | python | def ole2datetime(oledt):
val = float(oledt)
if (val < 61):
msg = 'Value is outside of acceptable range: {value}'.format(value=val)
raise ValueError(msg)
return (OLE_TIME_ZERO + timedelta(days=val)) | def ole2datetime(oledt):
val = float(oledt)
if (val < 61):
msg = 'Value is outside of acceptable range: {value}'.format(value=val)
raise ValueError(msg)
return (OLE_TIME_ZERO + timedelta(days=val))<|docstring|>function for converting excel date to normal date format<|endoftext|> |
9cb697f8e3b446d7baf47f05320cb551885c138ff3b618379470687ef026383f | def cancel(self):
"Stop the timer if it hasn't finished yet"
self.finished_event.set() | Stop the timer if it hasn't finished yet | src/timer/process_timer.py | cancel | tahesse/gifCutterBot | 4 | python | def cancel(self):
self.finished_event.set() | def cancel(self):
self.finished_event.set()<|docstring|>Stop the timer if it hasn't finished yet<|endoftext|> |
2ba218a317aaa36f45c0ba65fe2dc50ebb0f9bb4a141664c9570cddd9972995f | def __init__(__self__, *, cache_behavior: str, cache_type: str, odata_type: str, cache_duration: Optional[str]=None):
'\n Defines the parameters for the cache expiration action.\n :param str cache_behavior: Caching behavior for the requests\n :param str cache_type: The level at which the content needs to be cached.\n :param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss\n '
pulumi.set(__self__, 'cache_behavior', cache_behavior)
pulumi.set(__self__, 'cache_type', cache_type)
pulumi.set(__self__, 'odata_type', odata_type)
if (cache_duration is not None):
pulumi.set(__self__, 'cache_duration', cache_duration) | Defines the parameters for the cache expiration action.
:param str cache_behavior: Caching behavior for the requests
:param str cache_type: The level at which the content needs to be cached.
:param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, cache_behavior: str, cache_type: str, odata_type: str, cache_duration: Optional[str]=None):
'\n Defines the parameters for the cache expiration action.\n :param str cache_behavior: Caching behavior for the requests\n :param str cache_type: The level at which the content needs to be cached.\n :param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss\n '
pulumi.set(__self__, 'cache_behavior', cache_behavior)
pulumi.set(__self__, 'cache_type', cache_type)
pulumi.set(__self__, 'odata_type', odata_type)
if (cache_duration is not None):
pulumi.set(__self__, 'cache_duration', cache_duration) | def __init__(__self__, *, cache_behavior: str, cache_type: str, odata_type: str, cache_duration: Optional[str]=None):
'\n Defines the parameters for the cache expiration action.\n :param str cache_behavior: Caching behavior for the requests\n :param str cache_type: The level at which the content needs to be cached.\n :param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss\n '
pulumi.set(__self__, 'cache_behavior', cache_behavior)
pulumi.set(__self__, 'cache_type', cache_type)
pulumi.set(__self__, 'odata_type', odata_type)
if (cache_duration is not None):
pulumi.set(__self__, 'cache_duration', cache_duration)<|docstring|>Defines the parameters for the cache expiration action.
:param str cache_behavior: Caching behavior for the requests
:param str cache_type: The level at which the content needs to be cached.
:param str cache_duration: The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss<|endoftext|> |
e55ae6bbafbd8296aba14768a4c43ff596dae628c13cce2b51ba2f753ade980d | @property
@pulumi.getter(name='cacheBehavior')
def cache_behavior(self) -> str:
'\n Caching behavior for the requests\n '
return pulumi.get(self, 'cache_behavior') | Caching behavior for the requests | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | cache_behavior | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='cacheBehavior')
def cache_behavior(self) -> str:
'\n \n '
return pulumi.get(self, 'cache_behavior') | @property
@pulumi.getter(name='cacheBehavior')
def cache_behavior(self) -> str:
'\n \n '
return pulumi.get(self, 'cache_behavior')<|docstring|>Caching behavior for the requests<|endoftext|> |
a23cd1b78f753c39680085e3fc5640ffc58f3717cf970880991610a485068b61 | @property
@pulumi.getter(name='cacheType')
def cache_type(self) -> str:
'\n The level at which the content needs to be cached.\n '
return pulumi.get(self, 'cache_type') | The level at which the content needs to be cached. | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | cache_type | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='cacheType')
def cache_type(self) -> str:
'\n \n '
return pulumi.get(self, 'cache_type') | @property
@pulumi.getter(name='cacheType')
def cache_type(self) -> str:
'\n \n '
return pulumi.get(self, 'cache_type')<|docstring|>The level at which the content needs to be cached.<|endoftext|> |
7f5b804bee3e3f34fd901b6a38e437b9822c7dcd526b430ed8b9ac96e441a4ef | @property
@pulumi.getter(name='cacheDuration')
def cache_duration(self) -> Optional[str]:
'\n The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss\n '
return pulumi.get(self, 'cache_duration') | The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | cache_duration | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='cacheDuration')
def cache_duration(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'cache_duration') | @property
@pulumi.getter(name='cacheDuration')
def cache_duration(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'cache_duration')<|docstring|>The duration for which the content needs to be cached. Allowed format is [d.]hh:mm:ss<|endoftext|> |
2ee5787becebd7ff5fd7bf03e37ffc109058df4db4d57dd7c0a09a404b3319cb | def __init__(__self__, *, odata_type: str, query_string_behavior: str, query_parameters: Optional[str]=None):
'\n Defines the parameters for the cache-key query string action.\n :param str query_string_behavior: Caching behavior for the requests\n :param str query_parameters: query parameters to include or exclude (comma separated).\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'query_string_behavior', query_string_behavior)
if (query_parameters is not None):
pulumi.set(__self__, 'query_parameters', query_parameters) | Defines the parameters for the cache-key query string action.
:param str query_string_behavior: Caching behavior for the requests
:param str query_parameters: query parameters to include or exclude (comma separated). | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, odata_type: str, query_string_behavior: str, query_parameters: Optional[str]=None):
'\n Defines the parameters for the cache-key query string action.\n :param str query_string_behavior: Caching behavior for the requests\n :param str query_parameters: query parameters to include or exclude (comma separated).\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'query_string_behavior', query_string_behavior)
if (query_parameters is not None):
pulumi.set(__self__, 'query_parameters', query_parameters) | def __init__(__self__, *, odata_type: str, query_string_behavior: str, query_parameters: Optional[str]=None):
'\n Defines the parameters for the cache-key query string action.\n :param str query_string_behavior: Caching behavior for the requests\n :param str query_parameters: query parameters to include or exclude (comma separated).\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'query_string_behavior', query_string_behavior)
if (query_parameters is not None):
pulumi.set(__self__, 'query_parameters', query_parameters)<|docstring|>Defines the parameters for the cache-key query string action.
:param str query_string_behavior: Caching behavior for the requests
:param str query_parameters: query parameters to include or exclude (comma separated).<|endoftext|> |
7344299735333cfe582d1b2892a68d85fb4e2728f6fde5023fb6b4e924df6b24 | @property
@pulumi.getter(name='queryStringBehavior')
def query_string_behavior(self) -> str:
'\n Caching behavior for the requests\n '
return pulumi.get(self, 'query_string_behavior') | Caching behavior for the requests | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | query_string_behavior | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='queryStringBehavior')
def query_string_behavior(self) -> str:
'\n \n '
return pulumi.get(self, 'query_string_behavior') | @property
@pulumi.getter(name='queryStringBehavior')
def query_string_behavior(self) -> str:
'\n \n '
return pulumi.get(self, 'query_string_behavior')<|docstring|>Caching behavior for the requests<|endoftext|> |
5d7cccab5182f832713cee43e8ea9d740d1a9c6931114cffd65fd81fd418d9a4 | @property
@pulumi.getter(name='queryParameters')
def query_parameters(self) -> Optional[str]:
'\n query parameters to include or exclude (comma separated).\n '
return pulumi.get(self, 'query_parameters') | query parameters to include or exclude (comma separated). | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | query_parameters | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='queryParameters')
def query_parameters(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'query_parameters') | @property
@pulumi.getter(name='queryParameters')
def query_parameters(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'query_parameters')<|docstring|>query parameters to include or exclude (comma separated).<|endoftext|> |
b4ab376bbbdd196b5a78ce33097d6cc0c375992a61eef6c4c718c4b53a01924a | def __init__(__self__, *, odata_type: str, operator: str, match_values: Optional[Sequence[str]]=None, negate_condition: Optional[bool]=None, selector: Optional[str]=None, transforms: Optional[Sequence[str]]=None):
'\n Defines the parameters for Cookies match conditions\n :param str operator: Describes operator to be matched\n :param Sequence[str] match_values: The match value for the condition of the delivery rule\n :param bool negate_condition: Describes if this is negate condition or not\n :param str selector: Name of Cookies to be matched\n :param Sequence[str] transforms: List of transforms\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'operator', operator)
if (match_values is not None):
pulumi.set(__self__, 'match_values', match_values)
if (negate_condition is not None):
pulumi.set(__self__, 'negate_condition', negate_condition)
if (selector is not None):
pulumi.set(__self__, 'selector', selector)
if (transforms is not None):
pulumi.set(__self__, 'transforms', transforms) | Defines the parameters for Cookies match conditions
:param str operator: Describes operator to be matched
:param Sequence[str] match_values: The match value for the condition of the delivery rule
:param bool negate_condition: Describes if this is negate condition or not
:param str selector: Name of Cookies to be matched
:param Sequence[str] transforms: List of transforms | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, odata_type: str, operator: str, match_values: Optional[Sequence[str]]=None, negate_condition: Optional[bool]=None, selector: Optional[str]=None, transforms: Optional[Sequence[str]]=None):
'\n Defines the parameters for Cookies match conditions\n :param str operator: Describes operator to be matched\n :param Sequence[str] match_values: The match value for the condition of the delivery rule\n :param bool negate_condition: Describes if this is negate condition or not\n :param str selector: Name of Cookies to be matched\n :param Sequence[str] transforms: List of transforms\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'operator', operator)
if (match_values is not None):
pulumi.set(__self__, 'match_values', match_values)
if (negate_condition is not None):
pulumi.set(__self__, 'negate_condition', negate_condition)
if (selector is not None):
pulumi.set(__self__, 'selector', selector)
if (transforms is not None):
pulumi.set(__self__, 'transforms', transforms) | def __init__(__self__, *, odata_type: str, operator: str, match_values: Optional[Sequence[str]]=None, negate_condition: Optional[bool]=None, selector: Optional[str]=None, transforms: Optional[Sequence[str]]=None):
'\n Defines the parameters for Cookies match conditions\n :param str operator: Describes operator to be matched\n :param Sequence[str] match_values: The match value for the condition of the delivery rule\n :param bool negate_condition: Describes if this is negate condition or not\n :param str selector: Name of Cookies to be matched\n :param Sequence[str] transforms: List of transforms\n '
pulumi.set(__self__, 'odata_type', odata_type)
pulumi.set(__self__, 'operator', operator)
if (match_values is not None):
pulumi.set(__self__, 'match_values', match_values)
if (negate_condition is not None):
pulumi.set(__self__, 'negate_condition', negate_condition)
if (selector is not None):
pulumi.set(__self__, 'selector', selector)
if (transforms is not None):
pulumi.set(__self__, 'transforms', transforms)<|docstring|>Defines the parameters for Cookies match conditions
:param str operator: Describes operator to be matched
:param Sequence[str] match_values: The match value for the condition of the delivery rule
:param bool negate_condition: Describes if this is negate condition or not
:param str selector: Name of Cookies to be matched
:param Sequence[str] transforms: List of transforms<|endoftext|> |
03b4694364f389b4c4a22b6baf22b56dde383de57c1cf997536aa34e452bd570 | @property
@pulumi.getter
def operator(self) -> str:
'\n Describes operator to be matched\n '
return pulumi.get(self, 'operator') | Describes operator to be matched | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | operator | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def operator(self) -> str:
'\n \n '
return pulumi.get(self, 'operator') | @property
@pulumi.getter
def operator(self) -> str:
'\n \n '
return pulumi.get(self, 'operator')<|docstring|>Describes operator to be matched<|endoftext|> |
4a89b4d87551a581a0fba673cb73749627a97982f3b1609a08f38cca7e88078c | @property
@pulumi.getter(name='matchValues')
def match_values(self) -> Optional[Sequence[str]]:
'\n The match value for the condition of the delivery rule\n '
return pulumi.get(self, 'match_values') | The match value for the condition of the delivery rule | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | match_values | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='matchValues')
def match_values(self) -> Optional[Sequence[str]]:
'\n \n '
return pulumi.get(self, 'match_values') | @property
@pulumi.getter(name='matchValues')
def match_values(self) -> Optional[Sequence[str]]:
'\n \n '
return pulumi.get(self, 'match_values')<|docstring|>The match value for the condition of the delivery rule<|endoftext|> |
721cadbb8fd9b3ca322eb4a51a0590628318061974e642eb27d1fc8770c15e24 | @property
@pulumi.getter(name='negateCondition')
def negate_condition(self) -> Optional[bool]:
'\n Describes if this is negate condition or not\n '
return pulumi.get(self, 'negate_condition') | Describes if this is negate condition or not | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | negate_condition | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter(name='negateCondition')
def negate_condition(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'negate_condition') | @property
@pulumi.getter(name='negateCondition')
def negate_condition(self) -> Optional[bool]:
'\n \n '
return pulumi.get(self, 'negate_condition')<|docstring|>Describes if this is negate condition or not<|endoftext|> |
4abc62886febf68e15c7659b09e3525b5ace5d5213794b1e9823914648437896 | @property
@pulumi.getter
def selector(self) -> Optional[str]:
'\n Name of Cookies to be matched\n '
return pulumi.get(self, 'selector') | Name of Cookies to be matched | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | selector | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def selector(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'selector') | @property
@pulumi.getter
def selector(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'selector')<|docstring|>Name of Cookies to be matched<|endoftext|> |
60802afed0a11a40b1580427551db5c17905ecbff5094d60eea135f145a9ab82 | @property
@pulumi.getter
def transforms(self) -> Optional[Sequence[str]]:
'\n List of transforms\n '
return pulumi.get(self, 'transforms') | List of transforms | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | transforms | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def transforms(self) -> Optional[Sequence[str]]:
'\n \n '
return pulumi.get(self, 'transforms') | @property
@pulumi.getter
def transforms(self) -> Optional[Sequence[str]]:
'\n \n '
return pulumi.get(self, 'transforms')<|docstring|>List of transforms<|endoftext|> |
619e328d689d401a7e4746ebc7c4efd3b526185bde26a7156bdbfd65e63e42db | def __init__(__self__, *, name: str, origins: Sequence['outputs.ResourceReferenceResponse'], health_probe_settings: Optional['outputs.HealthProbeParametersResponse']=None, response_based_origin_error_detection_settings: Optional['outputs.ResponseBasedOriginErrorDetectionParametersResponse']=None, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int]=None):
"\n The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health.\n :param str name: Origin group name which must be unique within the endpoint.\n :param Sequence['ResourceReferenceResponseArgs'] origins: The source of the content being delivered via CDN within given origin group.\n :param 'HealthProbeParametersResponseArgs' health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin.\n :param 'ResponseBasedOriginErrorDetectionParametersResponseArgs' response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported.\n :param int traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported.\n "
pulumi.set(__self__, 'name', name)
pulumi.set(__self__, 'origins', origins)
if (health_probe_settings is not None):
pulumi.set(__self__, 'health_probe_settings', health_probe_settings)
if (response_based_origin_error_detection_settings is not None):
pulumi.set(__self__, 'response_based_origin_error_detection_settings', response_based_origin_error_detection_settings)
if (traffic_restoration_time_to_healed_or_new_endpoints_in_minutes is not None):
pulumi.set(__self__, 'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes', traffic_restoration_time_to_healed_or_new_endpoints_in_minutes) | The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health.
:param str name: Origin group name which must be unique within the endpoint.
:param Sequence['ResourceReferenceResponseArgs'] origins: The source of the content being delivered via CDN within given origin group.
:param 'HealthProbeParametersResponseArgs' health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin.
:param 'ResponseBasedOriginErrorDetectionParametersResponseArgs' response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported.
:param int traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported. | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | __init__ | pulumi-bot/pulumi-azure-native | 31 | python | def __init__(__self__, *, name: str, origins: Sequence['outputs.ResourceReferenceResponse'], health_probe_settings: Optional['outputs.HealthProbeParametersResponse']=None, response_based_origin_error_detection_settings: Optional['outputs.ResponseBasedOriginErrorDetectionParametersResponse']=None, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int]=None):
"\n The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health.\n :param str name: Origin group name which must be unique within the endpoint.\n :param Sequence['ResourceReferenceResponseArgs'] origins: The source of the content being delivered via CDN within given origin group.\n :param 'HealthProbeParametersResponseArgs' health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin.\n :param 'ResponseBasedOriginErrorDetectionParametersResponseArgs' response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported.\n :param int traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported.\n "
pulumi.set(__self__, 'name', name)
pulumi.set(__self__, 'origins', origins)
if (health_probe_settings is not None):
pulumi.set(__self__, 'health_probe_settings', health_probe_settings)
if (response_based_origin_error_detection_settings is not None):
pulumi.set(__self__, 'response_based_origin_error_detection_settings', response_based_origin_error_detection_settings)
if (traffic_restoration_time_to_healed_or_new_endpoints_in_minutes is not None):
pulumi.set(__self__, 'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes', traffic_restoration_time_to_healed_or_new_endpoints_in_minutes) | def __init__(__self__, *, name: str, origins: Sequence['outputs.ResourceReferenceResponse'], health_probe_settings: Optional['outputs.HealthProbeParametersResponse']=None, response_based_origin_error_detection_settings: Optional['outputs.ResponseBasedOriginErrorDetectionParametersResponse']=None, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int]=None):
"\n The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health.\n :param str name: Origin group name which must be unique within the endpoint.\n :param Sequence['ResourceReferenceResponseArgs'] origins: The source of the content being delivered via CDN within given origin group.\n :param 'HealthProbeParametersResponseArgs' health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin.\n :param 'ResponseBasedOriginErrorDetectionParametersResponseArgs' response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported.\n :param int traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported.\n "
pulumi.set(__self__, 'name', name)
pulumi.set(__self__, 'origins', origins)
if (health_probe_settings is not None):
pulumi.set(__self__, 'health_probe_settings', health_probe_settings)
if (response_based_origin_error_detection_settings is not None):
pulumi.set(__self__, 'response_based_origin_error_detection_settings', response_based_origin_error_detection_settings)
if (traffic_restoration_time_to_healed_or_new_endpoints_in_minutes is not None):
pulumi.set(__self__, 'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes', traffic_restoration_time_to_healed_or_new_endpoints_in_minutes)<|docstring|>The origin group for CDN content which is added when creating a CDN endpoint. Traffic is sent to the origins within the origin group based on origin health.
:param str name: Origin group name which must be unique within the endpoint.
:param Sequence['ResourceReferenceResponseArgs'] origins: The source of the content being delivered via CDN within given origin group.
:param 'HealthProbeParametersResponseArgs' health_probe_settings: Health probe settings to the origin that is used to determine the health of the origin.
:param 'ResponseBasedOriginErrorDetectionParametersResponseArgs' response_based_origin_error_detection_settings: The JSON object that contains the properties to determine origin health using real requests/responses.This property is currently not supported.
:param int traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new endpoint is added. Default is 10 mins. This property is currently not supported.<|endoftext|> |
e365d8332150c07babd4b784e776465348c40075fd6772f4d324d99ecf5dab41 | @property
@pulumi.getter
def name(self) -> str:
'\n Origin group name which must be unique within the endpoint.\n '
return pulumi.get(self, 'name') | Origin group name which must be unique within the endpoint. | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | name | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name') | @property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name')<|docstring|>Origin group name which must be unique within the endpoint.<|endoftext|> |
f0f9f5cfbcacceea77198016be97a7a7319507b911064ddf3f962a178435b556 | @property
@pulumi.getter
def origins(self) -> Sequence['outputs.ResourceReferenceResponse']:
'\n The source of the content being delivered via CDN within given origin group.\n '
return pulumi.get(self, 'origins') | The source of the content being delivered via CDN within given origin group. | sdk/python/pulumi_azure_native/cdn/v20191231/outputs.py | origins | pulumi-bot/pulumi-azure-native | 31 | python | @property
@pulumi.getter
def origins(self) -> Sequence['outputs.ResourceReferenceResponse']:
'\n \n '
return pulumi.get(self, 'origins') | @property
@pulumi.getter
def origins(self) -> Sequence['outputs.ResourceReferenceResponse']:
'\n \n '
return pulumi.get(self, 'origins')<|docstring|>The source of the content being delivered via CDN within given origin group.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.