repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
BernardFW/bernard | src/bernard/i18n/utils.py | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/i18n/utils.py#L14-L27 | def split_locale(locale: Text) -> Tuple[Text, Optional[Text]]:
"""
Decompose the locale into a normalized tuple.
The first item is the locale (as lowercase letters) while the second item
is either the country as lower case either None if no country was supplied.
"""
items = re.split(r'[_\-]', locale.lower(), 1)
try:
return items[0], items[1]
except IndexError:
return items[0], None | [
"def",
"split_locale",
"(",
"locale",
":",
"Text",
")",
"->",
"Tuple",
"[",
"Text",
",",
"Optional",
"[",
"Text",
"]",
"]",
":",
"items",
"=",
"re",
".",
"split",
"(",
"r'[_\\-]'",
",",
"locale",
".",
"lower",
"(",
")",
",",
"1",
")",
"try",
":",
"return",
"items",
"[",
"0",
"]",
",",
"items",
"[",
"1",
"]",
"except",
"IndexError",
":",
"return",
"items",
"[",
"0",
"]",
",",
"None"
] | Decompose the locale into a normalized tuple.
The first item is the locale (as lowercase letters) while the second item
is either the country as lower case either None if no country was supplied. | [
"Decompose",
"the",
"locale",
"into",
"a",
"normalized",
"tuple",
"."
] | python | train |
tcalmant/ipopo | pelix/ipopo/handlers/provides.py | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/provides.py#L189-L224 | def manipulate(self, stored_instance, component_instance):
"""
Manipulates the component instance
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance
"""
# Store the stored instance
self._ipopo_instance = stored_instance
if self.__controller is None:
# No controller: do nothing
return
# Get the current value of the member (True by default)
controller_value = getattr(component_instance, self.__controller, True)
# Store the controller value
stored_instance.set_controller_state(
self.__controller, controller_value
)
# Prepare the methods names
getter_name = "{0}{1}".format(
ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_GETTER_SUFFIX,
)
setter_name = "{0}{1}".format(
ipopo_constants.IPOPO_CONTROLLER_PREFIX,
ipopo_constants.IPOPO_SETTER_SUFFIX,
)
# Inject the getter and setter at the instance level
getter, setter = self._field_controller_generator()
setattr(component_instance, getter_name, getter)
setattr(component_instance, setter_name, setter) | [
"def",
"manipulate",
"(",
"self",
",",
"stored_instance",
",",
"component_instance",
")",
":",
"# Store the stored instance",
"self",
".",
"_ipopo_instance",
"=",
"stored_instance",
"if",
"self",
".",
"__controller",
"is",
"None",
":",
"# No controller: do nothing",
"return",
"# Get the current value of the member (True by default)",
"controller_value",
"=",
"getattr",
"(",
"component_instance",
",",
"self",
".",
"__controller",
",",
"True",
")",
"# Store the controller value",
"stored_instance",
".",
"set_controller_state",
"(",
"self",
".",
"__controller",
",",
"controller_value",
")",
"# Prepare the methods names",
"getter_name",
"=",
"\"{0}{1}\"",
".",
"format",
"(",
"ipopo_constants",
".",
"IPOPO_CONTROLLER_PREFIX",
",",
"ipopo_constants",
".",
"IPOPO_GETTER_SUFFIX",
",",
")",
"setter_name",
"=",
"\"{0}{1}\"",
".",
"format",
"(",
"ipopo_constants",
".",
"IPOPO_CONTROLLER_PREFIX",
",",
"ipopo_constants",
".",
"IPOPO_SETTER_SUFFIX",
",",
")",
"# Inject the getter and setter at the instance level",
"getter",
",",
"setter",
"=",
"self",
".",
"_field_controller_generator",
"(",
")",
"setattr",
"(",
"component_instance",
",",
"getter_name",
",",
"getter",
")",
"setattr",
"(",
"component_instance",
",",
"setter_name",
",",
"setter",
")"
] | Manipulates the component instance
:param stored_instance: The iPOPO component StoredInstance
:param component_instance: The component instance | [
"Manipulates",
"the",
"component",
"instance"
] | python | train |
shapiromatron/bmds | bmds/models/base.py | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/models/base.py#L155-L163 | def execution_duration(self):
"""
Returns total BMDS execution time, in seconds.
"""
duration = None
if self.execution_start and self.execution_end:
delta = self.execution_end - self.execution_start
duration = delta.total_seconds()
return duration | [
"def",
"execution_duration",
"(",
"self",
")",
":",
"duration",
"=",
"None",
"if",
"self",
".",
"execution_start",
"and",
"self",
".",
"execution_end",
":",
"delta",
"=",
"self",
".",
"execution_end",
"-",
"self",
".",
"execution_start",
"duration",
"=",
"delta",
".",
"total_seconds",
"(",
")",
"return",
"duration"
] | Returns total BMDS execution time, in seconds. | [
"Returns",
"total",
"BMDS",
"execution",
"time",
"in",
"seconds",
"."
] | python | train |
Microsoft/nni | examples/trials/ga_squad/train_model.py | https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/ga_squad/train_model.py#L87-L233 | def build_net(self, is_training):
"""Build the whole neural network for the QA model."""
cfg = self.cfg
with tf.device('/cpu:0'):
word_embed = tf.get_variable(
name='word_embed', initializer=self.embed, dtype=tf.float32, trainable=False)
char_embed = tf.get_variable(name='char_embed',
shape=[cfg.char_vcb_size,
cfg.char_embed_dim],
dtype=tf.float32)
# [query_length, batch_size]
self.query_word = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name='query_word')
self.query_mask = tf.placeholder(dtype=tf.float32,
shape=[None, None],
name='query_mask')
# [batch_size]
self.query_lengths = tf.placeholder(
dtype=tf.int32, shape=[None], name='query_lengths')
# [passage_length, batch_size]
self.passage_word = tf.placeholder(
dtype=tf.int32, shape=[None, None], name='passage_word')
self.passage_mask = tf.placeholder(
dtype=tf.float32, shape=[None, None], name='passage_mask')
# [batch_size]
self.passage_lengths = tf.placeholder(
dtype=tf.int32, shape=[None], name='passage_lengths')
if is_training:
self.answer_begin = tf.placeholder(
dtype=tf.int32, shape=[None], name='answer_begin')
self.answer_end = tf.placeholder(
dtype=tf.int32, shape=[None], name='answer_end')
self.query_char_ids = tf.placeholder(dtype=tf.int32,
shape=[
self.cfg.max_char_length, None, None],
name='query_char_ids')
# sequence_length, batch_size
self.query_char_lengths = tf.placeholder(
dtype=tf.int32, shape=[None, None], name='query_char_lengths')
self.passage_char_ids = tf.placeholder(dtype=tf.int32,
shape=[
self.cfg.max_char_length, None, None],
name='passage_char_ids')
# sequence_length, batch_size
self.passage_char_lengths = tf.placeholder(dtype=tf.int32,
shape=[None, None],
name='passage_char_lengths')
query_char_states = self.build_char_states(char_embed=char_embed,
is_training=is_training,
reuse=False,
char_ids=self.query_char_ids,
char_lengths=self.query_char_lengths)
passage_char_states = self.build_char_states(char_embed=char_embed,
is_training=is_training,
reuse=True,
char_ids=self.passage_char_ids,
char_lengths=self.passage_char_lengths)
with tf.variable_scope("encoding") as scope:
query_states = tf.concat([tf.nn.embedding_lookup(
word_embed, self.query_word), query_char_states], axis=2)
scope.reuse_variables()
passage_states = tf.concat([tf.nn.embedding_lookup(
word_embed, self.passage_word), passage_char_states], axis=2)
passage_states = tf.transpose(passage_states, perm=[1, 0, 2])
query_states = tf.transpose(query_states, perm=[1, 0, 2])
self.passage_states = passage_states
self.query_states = query_states
output, output2 = graph_to_network(passage_states, query_states,
self.passage_lengths, self.query_lengths,
self.graph, self.cfg.dropout,
is_training, num_heads=cfg.num_heads,
rnn_units=cfg.rnn_units)
passage_att_mask = self.passage_mask
batch_size_x = tf.shape(self.query_lengths)
answer_h = tf.zeros(
tf.concat([batch_size_x, tf.constant([cfg.ptr_dim], dtype=tf.int32)], axis=0))
answer_context = tf.reduce_mean(output2, axis=1)
query_init_w = tf.get_variable(
'query_init_w', shape=[output2.get_shape().as_list()[-1], cfg.ptr_dim])
self.query_init = query_init_w
answer_context = tf.matmul(answer_context, query_init_w)
output = tf.transpose(output, perm=[1, 0, 2])
with tf.variable_scope('answer_ptr_layer'):
ptr_att = DotAttention('ptr',
hidden_dim=cfg.ptr_dim,
is_vanilla=self.cfg.att_is_vanilla,
is_identity_transform=self.cfg.att_is_id,
need_padding=self.cfg.att_need_padding)
answer_pre_compute = ptr_att.get_pre_compute(output)
ptr_gru = XGRUCell(hidden_dim=cfg.ptr_dim)
begin_prob, begin_logits = ptr_att.get_prob(output, answer_context, passage_att_mask,
answer_pre_compute, True)
att_state = ptr_att.get_att(output, begin_prob)
(_, answer_h) = ptr_gru.call(inputs=att_state, state=answer_h)
answer_context = answer_h
end_prob, end_logits = ptr_att.get_prob(output, answer_context,
passage_att_mask, answer_pre_compute,
True)
self.begin_prob = tf.transpose(begin_prob, perm=[1, 0])
self.end_prob = tf.transpose(end_prob, perm=[1, 0])
begin_logits = tf.transpose(begin_logits, perm=[1, 0])
end_logits = tf.transpose(end_logits, perm=[1, 0])
if is_training:
def label_smoothing(inputs, masks, epsilon=0.1):
"""Modify target for label smoothing."""
epsilon = cfg.labelsmoothing
num_of_channel = tf.shape(inputs)[-1] # number of channels
inputs = tf.cast(inputs, tf.float32)
return (((1 - epsilon) * inputs) + (epsilon /
tf.cast(num_of_channel, tf.float32))) * masks
cost1 = tf.reduce_mean(
tf.losses.softmax_cross_entropy(label_smoothing(
tf.one_hot(self.answer_begin,
depth=tf.shape(self.passage_word)[0]),
tf.transpose(self.passage_mask, perm=[1, 0])), begin_logits))
cost2 = tf.reduce_mean(
tf.losses.softmax_cross_entropy(
label_smoothing(tf.one_hot(self.answer_end,
depth=tf.shape(self.passage_word)[0]),
tf.transpose(self.passage_mask, perm=[1, 0])), end_logits))
reg_ws = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
l2_loss = tf.reduce_sum(reg_ws)
loss = cost1 + cost2 + l2_loss
self.loss = loss
optimizer = tf.train.AdamOptimizer(learning_rate=cfg.learning_rate)
self.train_op = optimizer.minimize(self.loss)
return tf.stack([self.begin_prob, self.end_prob]) | [
"def",
"build_net",
"(",
"self",
",",
"is_training",
")",
":",
"cfg",
"=",
"self",
".",
"cfg",
"with",
"tf",
".",
"device",
"(",
"'/cpu:0'",
")",
":",
"word_embed",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"'word_embed'",
",",
"initializer",
"=",
"self",
".",
"embed",
",",
"dtype",
"=",
"tf",
".",
"float32",
",",
"trainable",
"=",
"False",
")",
"char_embed",
"=",
"tf",
".",
"get_variable",
"(",
"name",
"=",
"'char_embed'",
",",
"shape",
"=",
"[",
"cfg",
".",
"char_vcb_size",
",",
"cfg",
".",
"char_embed_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# [query_length, batch_size]",
"self",
".",
"query_word",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'query_word'",
")",
"self",
".",
"query_mask",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'query_mask'",
")",
"# [batch_size]",
"self",
".",
"query_lengths",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"name",
"=",
"'query_lengths'",
")",
"# [passage_length, batch_size]",
"self",
".",
"passage_word",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'passage_word'",
")",
"self",
".",
"passage_mask",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'passage_mask'",
")",
"# [batch_size]",
"self",
".",
"passage_lengths",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"name",
"=",
"'passage_lengths'",
")",
"if",
"is_training",
":",
"self",
".",
"answer_begin",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"name",
"=",
"'answer_begin'",
")",
"self",
".",
"answer_end",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
"]",
",",
"name",
"=",
"'answer_end'",
")",
"self",
".",
"query_char_ids",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"self",
".",
"cfg",
".",
"max_char_length",
",",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'query_char_ids'",
")",
"# sequence_length, batch_size",
"self",
".",
"query_char_lengths",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'query_char_lengths'",
")",
"self",
".",
"passage_char_ids",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"self",
".",
"cfg",
".",
"max_char_length",
",",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'passage_char_ids'",
")",
"# sequence_length, batch_size",
"self",
".",
"passage_char_lengths",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"int32",
",",
"shape",
"=",
"[",
"None",
",",
"None",
"]",
",",
"name",
"=",
"'passage_char_lengths'",
")",
"query_char_states",
"=",
"self",
".",
"build_char_states",
"(",
"char_embed",
"=",
"char_embed",
",",
"is_training",
"=",
"is_training",
",",
"reuse",
"=",
"False",
",",
"char_ids",
"=",
"self",
".",
"query_char_ids",
",",
"char_lengths",
"=",
"self",
".",
"query_char_lengths",
")",
"passage_char_states",
"=",
"self",
".",
"build_char_states",
"(",
"char_embed",
"=",
"char_embed",
",",
"is_training",
"=",
"is_training",
",",
"reuse",
"=",
"True",
",",
"char_ids",
"=",
"self",
".",
"passage_char_ids",
",",
"char_lengths",
"=",
"self",
".",
"passage_char_lengths",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"\"encoding\"",
")",
"as",
"scope",
":",
"query_states",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"word_embed",
",",
"self",
".",
"query_word",
")",
",",
"query_char_states",
"]",
",",
"axis",
"=",
"2",
")",
"scope",
".",
"reuse_variables",
"(",
")",
"passage_states",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"nn",
".",
"embedding_lookup",
"(",
"word_embed",
",",
"self",
".",
"passage_word",
")",
",",
"passage_char_states",
"]",
",",
"axis",
"=",
"2",
")",
"passage_states",
"=",
"tf",
".",
"transpose",
"(",
"passage_states",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"query_states",
"=",
"tf",
".",
"transpose",
"(",
"query_states",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"self",
".",
"passage_states",
"=",
"passage_states",
"self",
".",
"query_states",
"=",
"query_states",
"output",
",",
"output2",
"=",
"graph_to_network",
"(",
"passage_states",
",",
"query_states",
",",
"self",
".",
"passage_lengths",
",",
"self",
".",
"query_lengths",
",",
"self",
".",
"graph",
",",
"self",
".",
"cfg",
".",
"dropout",
",",
"is_training",
",",
"num_heads",
"=",
"cfg",
".",
"num_heads",
",",
"rnn_units",
"=",
"cfg",
".",
"rnn_units",
")",
"passage_att_mask",
"=",
"self",
".",
"passage_mask",
"batch_size_x",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"query_lengths",
")",
"answer_h",
"=",
"tf",
".",
"zeros",
"(",
"tf",
".",
"concat",
"(",
"[",
"batch_size_x",
",",
"tf",
".",
"constant",
"(",
"[",
"cfg",
".",
"ptr_dim",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
"]",
",",
"axis",
"=",
"0",
")",
")",
"answer_context",
"=",
"tf",
".",
"reduce_mean",
"(",
"output2",
",",
"axis",
"=",
"1",
")",
"query_init_w",
"=",
"tf",
".",
"get_variable",
"(",
"'query_init_w'",
",",
"shape",
"=",
"[",
"output2",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
",",
"cfg",
".",
"ptr_dim",
"]",
")",
"self",
".",
"query_init",
"=",
"query_init_w",
"answer_context",
"=",
"tf",
".",
"matmul",
"(",
"answer_context",
",",
"query_init_w",
")",
"output",
"=",
"tf",
".",
"transpose",
"(",
"output",
",",
"perm",
"=",
"[",
"1",
",",
"0",
",",
"2",
"]",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'answer_ptr_layer'",
")",
":",
"ptr_att",
"=",
"DotAttention",
"(",
"'ptr'",
",",
"hidden_dim",
"=",
"cfg",
".",
"ptr_dim",
",",
"is_vanilla",
"=",
"self",
".",
"cfg",
".",
"att_is_vanilla",
",",
"is_identity_transform",
"=",
"self",
".",
"cfg",
".",
"att_is_id",
",",
"need_padding",
"=",
"self",
".",
"cfg",
".",
"att_need_padding",
")",
"answer_pre_compute",
"=",
"ptr_att",
".",
"get_pre_compute",
"(",
"output",
")",
"ptr_gru",
"=",
"XGRUCell",
"(",
"hidden_dim",
"=",
"cfg",
".",
"ptr_dim",
")",
"begin_prob",
",",
"begin_logits",
"=",
"ptr_att",
".",
"get_prob",
"(",
"output",
",",
"answer_context",
",",
"passage_att_mask",
",",
"answer_pre_compute",
",",
"True",
")",
"att_state",
"=",
"ptr_att",
".",
"get_att",
"(",
"output",
",",
"begin_prob",
")",
"(",
"_",
",",
"answer_h",
")",
"=",
"ptr_gru",
".",
"call",
"(",
"inputs",
"=",
"att_state",
",",
"state",
"=",
"answer_h",
")",
"answer_context",
"=",
"answer_h",
"end_prob",
",",
"end_logits",
"=",
"ptr_att",
".",
"get_prob",
"(",
"output",
",",
"answer_context",
",",
"passage_att_mask",
",",
"answer_pre_compute",
",",
"True",
")",
"self",
".",
"begin_prob",
"=",
"tf",
".",
"transpose",
"(",
"begin_prob",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
"self",
".",
"end_prob",
"=",
"tf",
".",
"transpose",
"(",
"end_prob",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
"begin_logits",
"=",
"tf",
".",
"transpose",
"(",
"begin_logits",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
"end_logits",
"=",
"tf",
".",
"transpose",
"(",
"end_logits",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
"if",
"is_training",
":",
"def",
"label_smoothing",
"(",
"inputs",
",",
"masks",
",",
"epsilon",
"=",
"0.1",
")",
":",
"\"\"\"Modify target for label smoothing.\"\"\"",
"epsilon",
"=",
"cfg",
".",
"labelsmoothing",
"num_of_channel",
"=",
"tf",
".",
"shape",
"(",
"inputs",
")",
"[",
"-",
"1",
"]",
"# number of channels",
"inputs",
"=",
"tf",
".",
"cast",
"(",
"inputs",
",",
"tf",
".",
"float32",
")",
"return",
"(",
"(",
"(",
"1",
"-",
"epsilon",
")",
"*",
"inputs",
")",
"+",
"(",
"epsilon",
"/",
"tf",
".",
"cast",
"(",
"num_of_channel",
",",
"tf",
".",
"float32",
")",
")",
")",
"*",
"masks",
"cost1",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"losses",
".",
"softmax_cross_entropy",
"(",
"label_smoothing",
"(",
"tf",
".",
"one_hot",
"(",
"self",
".",
"answer_begin",
",",
"depth",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"passage_word",
")",
"[",
"0",
"]",
")",
",",
"tf",
".",
"transpose",
"(",
"self",
".",
"passage_mask",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
")",
",",
"begin_logits",
")",
")",
"cost2",
"=",
"tf",
".",
"reduce_mean",
"(",
"tf",
".",
"losses",
".",
"softmax_cross_entropy",
"(",
"label_smoothing",
"(",
"tf",
".",
"one_hot",
"(",
"self",
".",
"answer_end",
",",
"depth",
"=",
"tf",
".",
"shape",
"(",
"self",
".",
"passage_word",
")",
"[",
"0",
"]",
")",
",",
"tf",
".",
"transpose",
"(",
"self",
".",
"passage_mask",
",",
"perm",
"=",
"[",
"1",
",",
"0",
"]",
")",
")",
",",
"end_logits",
")",
")",
"reg_ws",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"REGULARIZATION_LOSSES",
")",
"l2_loss",
"=",
"tf",
".",
"reduce_sum",
"(",
"reg_ws",
")",
"loss",
"=",
"cost1",
"+",
"cost2",
"+",
"l2_loss",
"self",
".",
"loss",
"=",
"loss",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
"=",
"cfg",
".",
"learning_rate",
")",
"self",
".",
"train_op",
"=",
"optimizer",
".",
"minimize",
"(",
"self",
".",
"loss",
")",
"return",
"tf",
".",
"stack",
"(",
"[",
"self",
".",
"begin_prob",
",",
"self",
".",
"end_prob",
"]",
")"
] | Build the whole neural network for the QA model. | [
"Build",
"the",
"whole",
"neural",
"network",
"for",
"the",
"QA",
"model",
"."
] | python | train |
bachya/pyflunearyou | pyflunearyou/report.py | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/report.py#L45-L70 | async def nearest_by_coordinates(
self, latitude: float, longitude: float) -> dict:
"""Get the nearest report (with local and state info) to a lat/lon."""
# Since user data is more granular than state or CDC data, find the
# user report whose coordinates are closest to the provided
# coordinates:
nearest_user_report = get_nearest_by_coordinates(
await self.user_reports(), 'latitude', 'longitude', latitude,
longitude)
try:
# If the user report corresponds to a known state in
# flunearyou.org's database, we can safely assume that's the
# correct state:
nearest_state = next((
state for state in await self.state_data()
if state['place_id'] == nearest_user_report['contained_by']))
except StopIteration:
# If a place ID doesn't exist (e.g., ZIP Code 98012 doesn't have a
# place ID), calculate the nearest state by measuring the distance
# from the provided latitude/longitude to flunearyou.org's
# latitude/longitude that defines each state:
nearest_state = get_nearest_by_coordinates(
await self.state_data(), 'lat', 'lon', latitude, longitude)
return {'local': nearest_user_report, 'state': nearest_state} | [
"async",
"def",
"nearest_by_coordinates",
"(",
"self",
",",
"latitude",
":",
"float",
",",
"longitude",
":",
"float",
")",
"->",
"dict",
":",
"# Since user data is more granular than state or CDC data, find the",
"# user report whose coordinates are closest to the provided",
"# coordinates:",
"nearest_user_report",
"=",
"get_nearest_by_coordinates",
"(",
"await",
"self",
".",
"user_reports",
"(",
")",
",",
"'latitude'",
",",
"'longitude'",
",",
"latitude",
",",
"longitude",
")",
"try",
":",
"# If the user report corresponds to a known state in",
"# flunearyou.org's database, we can safely assume that's the",
"# correct state:",
"nearest_state",
"=",
"next",
"(",
"(",
"state",
"for",
"state",
"in",
"await",
"self",
".",
"state_data",
"(",
")",
"if",
"state",
"[",
"'place_id'",
"]",
"==",
"nearest_user_report",
"[",
"'contained_by'",
"]",
")",
")",
"except",
"StopIteration",
":",
"# If a place ID doesn't exist (e.g., ZIP Code 98012 doesn't have a",
"# place ID), calculate the nearest state by measuring the distance",
"# from the provided latitude/longitude to flunearyou.org's",
"# latitude/longitude that defines each state:",
"nearest_state",
"=",
"get_nearest_by_coordinates",
"(",
"await",
"self",
".",
"state_data",
"(",
")",
",",
"'lat'",
",",
"'lon'",
",",
"latitude",
",",
"longitude",
")",
"return",
"{",
"'local'",
":",
"nearest_user_report",
",",
"'state'",
":",
"nearest_state",
"}"
] | Get the nearest report (with local and state info) to a lat/lon. | [
"Get",
"the",
"nearest",
"report",
"(",
"with",
"local",
"and",
"state",
"info",
")",
"to",
"a",
"lat",
"/",
"lon",
"."
] | python | train |
ethereum/py-evm | eth/chains/base.py | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L443-L451 | def from_genesis_header(cls,
base_db: BaseAtomicDB,
genesis_header: BlockHeader) -> 'BaseChain':
"""
Initializes the chain from the genesis header.
"""
chaindb = cls.get_chaindb_class()(base_db)
chaindb.persist_header(genesis_header)
return cls(base_db) | [
"def",
"from_genesis_header",
"(",
"cls",
",",
"base_db",
":",
"BaseAtomicDB",
",",
"genesis_header",
":",
"BlockHeader",
")",
"->",
"'BaseChain'",
":",
"chaindb",
"=",
"cls",
".",
"get_chaindb_class",
"(",
")",
"(",
"base_db",
")",
"chaindb",
".",
"persist_header",
"(",
"genesis_header",
")",
"return",
"cls",
"(",
"base_db",
")"
] | Initializes the chain from the genesis header. | [
"Initializes",
"the",
"chain",
"from",
"the",
"genesis",
"header",
"."
] | python | train |
openpermissions/koi | koi/commands.py | https://github.com/openpermissions/koi/blob/d721f8e1dfa8f07ad265d9dec32e8aaf80a9f281/koi/commands.py#L224-L240 | def run(func):
"""Execute the provided function if there are no subcommands"""
@defaults.command(help='Run the service')
@click.pass_context
def runserver(ctx, *args, **kwargs):
if (ctx.parent.invoked_subcommand and
ctx.command.name != ctx.parent.invoked_subcommand):
return
# work around the fact that tornado's parse_command_line can't
# cope with having subcommands / positional arguments.
sys.argv = [sys.argv[0]] + [a for a in sys.argv if a[0] == '-']
sys.exit(func())
return runserver | [
"def",
"run",
"(",
"func",
")",
":",
"@",
"defaults",
".",
"command",
"(",
"help",
"=",
"'Run the service'",
")",
"@",
"click",
".",
"pass_context",
"def",
"runserver",
"(",
"ctx",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"ctx",
".",
"parent",
".",
"invoked_subcommand",
"and",
"ctx",
".",
"command",
".",
"name",
"!=",
"ctx",
".",
"parent",
".",
"invoked_subcommand",
")",
":",
"return",
"# work around the fact that tornado's parse_command_line can't",
"# cope with having subcommands / positional arguments.",
"sys",
".",
"argv",
"=",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
"]",
"+",
"[",
"a",
"for",
"a",
"in",
"sys",
".",
"argv",
"if",
"a",
"[",
"0",
"]",
"==",
"'-'",
"]",
"sys",
".",
"exit",
"(",
"func",
"(",
")",
")",
"return",
"runserver"
] | Execute the provided function if there are no subcommands | [
"Execute",
"the",
"provided",
"function",
"if",
"there",
"are",
"no",
"subcommands"
] | python | train |
chrisspen/burlap | burlap/git.py | https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/git.py#L179-L216 | def pull(self, path, use_sudo=False, user=None, force=False):
"""
Fetch changes from the default remote repository and merge them.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to pull from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param force: If ``True``, append the ``--force`` option to the command.
:type force: bool
"""
if path is None:
raise ValueError("Path to the working copy is needed to pull from a remote repository.")
options = []
if force:
options.append('--force')
options = ' '.join(options)
cmd = 'git pull %s' % options
with cd(path):
if use_sudo and user is None:
run_as_root(cmd)
elif use_sudo:
sudo(cmd, user=user)
else:
run(cmd) | [
"def",
"pull",
"(",
"self",
",",
"path",
",",
"use_sudo",
"=",
"False",
",",
"user",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"if",
"path",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Path to the working copy is needed to pull from a remote repository.\"",
")",
"options",
"=",
"[",
"]",
"if",
"force",
":",
"options",
".",
"append",
"(",
"'--force'",
")",
"options",
"=",
"' '",
".",
"join",
"(",
"options",
")",
"cmd",
"=",
"'git pull %s'",
"%",
"options",
"with",
"cd",
"(",
"path",
")",
":",
"if",
"use_sudo",
"and",
"user",
"is",
"None",
":",
"run_as_root",
"(",
"cmd",
")",
"elif",
"use_sudo",
":",
"sudo",
"(",
"cmd",
",",
"user",
"=",
"user",
")",
"else",
":",
"run",
"(",
"cmd",
")"
] | Fetch changes from the default remote repository and merge them.
:param path: Path of the working copy directory. This directory must exist
and be a Git working copy with a default remote to pull from.
:type path: str
:param use_sudo: If ``True`` execute ``git`` with
:func:`fabric.operations.sudo`, else with
:func:`fabric.operations.run`.
:type use_sudo: bool
:param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo`
with the given user. If ``use_sudo is False`` this parameter
has no effect.
:type user: str
:param force: If ``True``, append the ``--force`` option to the command.
:type force: bool | [
"Fetch",
"changes",
"from",
"the",
"default",
"remote",
"repository",
"and",
"merge",
"them",
"."
] | python | valid |
Erotemic/utool | utool/util_setup.py | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_setup.py#L103-L110 | def setup_chmod(setup_fpath, setup_dir, chmod_patterns):
""" Gives files matching pattern the same chmod flags as setup.py """
#st_mode = os.stat(setup_fpath).st_mode
st_mode = 33277
for pattern in chmod_patterns:
for fpath in util_path.glob(setup_dir, pattern, recursive=True):
print('[setup] chmod fpath=%r' % fpath)
os.chmod(fpath, st_mode) | [
"def",
"setup_chmod",
"(",
"setup_fpath",
",",
"setup_dir",
",",
"chmod_patterns",
")",
":",
"#st_mode = os.stat(setup_fpath).st_mode",
"st_mode",
"=",
"33277",
"for",
"pattern",
"in",
"chmod_patterns",
":",
"for",
"fpath",
"in",
"util_path",
".",
"glob",
"(",
"setup_dir",
",",
"pattern",
",",
"recursive",
"=",
"True",
")",
":",
"print",
"(",
"'[setup] chmod fpath=%r'",
"%",
"fpath",
")",
"os",
".",
"chmod",
"(",
"fpath",
",",
"st_mode",
")"
] | Gives files matching pattern the same chmod flags as setup.py | [
"Gives",
"files",
"matching",
"pattern",
"the",
"same",
"chmod",
"flags",
"as",
"setup",
".",
"py"
] | python | train |
camsci/meteor-pi | src/pythonModules/meteorpi_db/meteorpi_db/__init__.py | https://github.com/camsci/meteor-pi/blob/7b01527650bd1b2b76d6f364e8122e25b8812c8d/src/pythonModules/meteorpi_db/meteorpi_db/__init__.py#L580-L597 | def get_observation(self, observation_id):
"""
Retrieve an existing :class:`meteorpi_model.Observation` by its ID
:param string observation_id:
UUID of the observation
:return:
A :class:`meteorpi_model.Observation` instance, or None if not found
"""
search = mp.ObservationSearch(observation_id=observation_id)
b = search_observations_sql_builder(search)
sql = b.get_select_sql(columns='l.publicId AS obstory_id, l.name AS obstory_name, '
'o.obsTime, s.name AS obsType, o.publicId, o.uid',
skip=0, limit=1, order='o.obsTime DESC')
obs = list(self.generators.observation_generator(sql=sql, sql_args=b.sql_args))
if not obs:
return None
return obs[0] | [
"def",
"get_observation",
"(",
"self",
",",
"observation_id",
")",
":",
"search",
"=",
"mp",
".",
"ObservationSearch",
"(",
"observation_id",
"=",
"observation_id",
")",
"b",
"=",
"search_observations_sql_builder",
"(",
"search",
")",
"sql",
"=",
"b",
".",
"get_select_sql",
"(",
"columns",
"=",
"'l.publicId AS obstory_id, l.name AS obstory_name, '",
"'o.obsTime, s.name AS obsType, o.publicId, o.uid'",
",",
"skip",
"=",
"0",
",",
"limit",
"=",
"1",
",",
"order",
"=",
"'o.obsTime DESC'",
")",
"obs",
"=",
"list",
"(",
"self",
".",
"generators",
".",
"observation_generator",
"(",
"sql",
"=",
"sql",
",",
"sql_args",
"=",
"b",
".",
"sql_args",
")",
")",
"if",
"not",
"obs",
":",
"return",
"None",
"return",
"obs",
"[",
"0",
"]"
] | Retrieve an existing :class:`meteorpi_model.Observation` by its ID
:param string observation_id:
UUID of the observation
:return:
A :class:`meteorpi_model.Observation` instance, or None if not found | [
"Retrieve",
"an",
"existing",
":",
"class",
":",
"meteorpi_model",
".",
"Observation",
"by",
"its",
"ID"
] | python | train |
klahnakoski/pyLibrary | mo_math/__init__.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/__init__.py#L190-L202 | def mod(value, mod=1):
"""
RETURN NON-NEGATIVE MODULO
RETURN None WHEN GIVEN INVALID ARGUMENTS
"""
if value == None:
return None
elif mod <= 0:
return None
elif value < 0:
return (value % mod + mod) % mod
else:
return value % mod | [
"def",
"mod",
"(",
"value",
",",
"mod",
"=",
"1",
")",
":",
"if",
"value",
"==",
"None",
":",
"return",
"None",
"elif",
"mod",
"<=",
"0",
":",
"return",
"None",
"elif",
"value",
"<",
"0",
":",
"return",
"(",
"value",
"%",
"mod",
"+",
"mod",
")",
"%",
"mod",
"else",
":",
"return",
"value",
"%",
"mod"
] | RETURN NON-NEGATIVE MODULO
RETURN None WHEN GIVEN INVALID ARGUMENTS | [
"RETURN",
"NON",
"-",
"NEGATIVE",
"MODULO",
"RETURN",
"None",
"WHEN",
"GIVEN",
"INVALID",
"ARGUMENTS"
] | python | train |
Crunch-io/crunch-cube | src/cr/cube/measures/wishart_pairwise_significance.py | https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/measures/wishart_pairwise_significance.py#L176-L182 | def _pairwise_chisq(self):
"""Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray.
Returns a square, symmetric matrix of test statistics for the null
hypothesis that each vector along *axis* is equal to each other.
"""
return self._chi_squared(self._proportions, self._margin, self._observed) | [
"def",
"_pairwise_chisq",
"(",
"self",
")",
":",
"return",
"self",
".",
"_chi_squared",
"(",
"self",
".",
"_proportions",
",",
"self",
".",
"_margin",
",",
"self",
".",
"_observed",
")"
] | Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray.
Returns a square, symmetric matrix of test statistics for the null
hypothesis that each vector along *axis* is equal to each other. | [
"Pairwise",
"comparisons",
"(",
"Chi",
"-",
"Square",
")",
"along",
"axis",
"as",
"numpy",
".",
"ndarray",
"."
] | python | train |
google/grr | grr/server/grr_response_server/databases/mysql_users.py | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_users.py#L24-L52 | def _ResponseToApprovalsWithGrants(response):
"""Converts a generator with approval rows into ApprovalRequest objects."""
prev_triplet = None
cur_approval_request = None
for (approval_id_int, approval_timestamp, approval_request_bytes,
grantor_username, grant_timestamp) in response:
cur_triplet = (approval_id_int, approval_timestamp, approval_request_bytes)
if cur_triplet != prev_triplet:
prev_triplet = cur_triplet
if cur_approval_request:
yield cur_approval_request
cur_approval_request = mysql_utils.StringToRDFProto(
rdf_objects.ApprovalRequest, approval_request_bytes)
cur_approval_request.approval_id = _IntToApprovalID(approval_id_int)
cur_approval_request.timestamp = mysql_utils.TimestampToRDFDatetime(
approval_timestamp)
if grantor_username and grant_timestamp:
cur_approval_request.grants.append(
rdf_objects.ApprovalGrant(
grantor_username=grantor_username,
timestamp=mysql_utils.TimestampToRDFDatetime(grant_timestamp)))
if cur_approval_request:
yield cur_approval_request | [
"def",
"_ResponseToApprovalsWithGrants",
"(",
"response",
")",
":",
"prev_triplet",
"=",
"None",
"cur_approval_request",
"=",
"None",
"for",
"(",
"approval_id_int",
",",
"approval_timestamp",
",",
"approval_request_bytes",
",",
"grantor_username",
",",
"grant_timestamp",
")",
"in",
"response",
":",
"cur_triplet",
"=",
"(",
"approval_id_int",
",",
"approval_timestamp",
",",
"approval_request_bytes",
")",
"if",
"cur_triplet",
"!=",
"prev_triplet",
":",
"prev_triplet",
"=",
"cur_triplet",
"if",
"cur_approval_request",
":",
"yield",
"cur_approval_request",
"cur_approval_request",
"=",
"mysql_utils",
".",
"StringToRDFProto",
"(",
"rdf_objects",
".",
"ApprovalRequest",
",",
"approval_request_bytes",
")",
"cur_approval_request",
".",
"approval_id",
"=",
"_IntToApprovalID",
"(",
"approval_id_int",
")",
"cur_approval_request",
".",
"timestamp",
"=",
"mysql_utils",
".",
"TimestampToRDFDatetime",
"(",
"approval_timestamp",
")",
"if",
"grantor_username",
"and",
"grant_timestamp",
":",
"cur_approval_request",
".",
"grants",
".",
"append",
"(",
"rdf_objects",
".",
"ApprovalGrant",
"(",
"grantor_username",
"=",
"grantor_username",
",",
"timestamp",
"=",
"mysql_utils",
".",
"TimestampToRDFDatetime",
"(",
"grant_timestamp",
")",
")",
")",
"if",
"cur_approval_request",
":",
"yield",
"cur_approval_request"
] | Converts a generator with approval rows into ApprovalRequest objects. | [
"Converts",
"a",
"generator",
"with",
"approval",
"rows",
"into",
"ApprovalRequest",
"objects",
"."
] | python | train |
Capitains/MyCapytain | MyCapytain/resources/prototypes/cts/inventory.py | https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/prototypes/cts/inventory.py#L360-L367 | def get_description(self, lang=None):
""" Get the DC description of the object
:param lang: Lang to retrieve
:return: Description string representation
:rtype: Literal
"""
return self.metadata.get_single(key=RDF_NAMESPACES.CTS.description, lang=lang) | [
"def",
"get_description",
"(",
"self",
",",
"lang",
"=",
"None",
")",
":",
"return",
"self",
".",
"metadata",
".",
"get_single",
"(",
"key",
"=",
"RDF_NAMESPACES",
".",
"CTS",
".",
"description",
",",
"lang",
"=",
"lang",
")"
] | Get the DC description of the object
:param lang: Lang to retrieve
:return: Description string representation
:rtype: Literal | [
"Get",
"the",
"DC",
"description",
"of",
"the",
"object"
] | python | train |
LogicalDash/LiSE | ELiDE/ELiDE/board/pawnspot.py | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/board/pawnspot.py#L91-L116 | def finalize(self, initial=True):
"""Call this after you've created all the PawnSpot you need and are ready to add them to the board."""
if getattr(self, '_finalized', False):
return
if (
self.proxy is None or
not hasattr(self.proxy, 'name')
):
Clock.schedule_once(self.finalize, 0)
return
if initial:
self.name = self.proxy.name
self.paths = self.proxy.setdefault(
'_image_paths', self.default_image_paths
)
zeroes = [0] * len(self.paths)
self.offxs = self.proxy.setdefault('_offxs', zeroes)
self.offys = self.proxy.setdefault('_offys', zeroes)
self.proxy.connect(self._trigger_pull_from_proxy)
self.bind(
paths=self._trigger_push_image_paths,
offxs=self._trigger_push_offxs,
offys=self._trigger_push_offys
)
self._finalized = True
self.finalize_children() | [
"def",
"finalize",
"(",
"self",
",",
"initial",
"=",
"True",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'_finalized'",
",",
"False",
")",
":",
"return",
"if",
"(",
"self",
".",
"proxy",
"is",
"None",
"or",
"not",
"hasattr",
"(",
"self",
".",
"proxy",
",",
"'name'",
")",
")",
":",
"Clock",
".",
"schedule_once",
"(",
"self",
".",
"finalize",
",",
"0",
")",
"return",
"if",
"initial",
":",
"self",
".",
"name",
"=",
"self",
".",
"proxy",
".",
"name",
"self",
".",
"paths",
"=",
"self",
".",
"proxy",
".",
"setdefault",
"(",
"'_image_paths'",
",",
"self",
".",
"default_image_paths",
")",
"zeroes",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"self",
".",
"paths",
")",
"self",
".",
"offxs",
"=",
"self",
".",
"proxy",
".",
"setdefault",
"(",
"'_offxs'",
",",
"zeroes",
")",
"self",
".",
"offys",
"=",
"self",
".",
"proxy",
".",
"setdefault",
"(",
"'_offys'",
",",
"zeroes",
")",
"self",
".",
"proxy",
".",
"connect",
"(",
"self",
".",
"_trigger_pull_from_proxy",
")",
"self",
".",
"bind",
"(",
"paths",
"=",
"self",
".",
"_trigger_push_image_paths",
",",
"offxs",
"=",
"self",
".",
"_trigger_push_offxs",
",",
"offys",
"=",
"self",
".",
"_trigger_push_offys",
")",
"self",
".",
"_finalized",
"=",
"True",
"self",
".",
"finalize_children",
"(",
")"
] | Call this after you've created all the PawnSpot you need and are ready to add them to the board. | [
"Call",
"this",
"after",
"you",
"ve",
"created",
"all",
"the",
"PawnSpot",
"you",
"need",
"and",
"are",
"ready",
"to",
"add",
"them",
"to",
"the",
"board",
"."
] | python | train |
theosysbio/means | src/means/simulation/ssa.py | https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/simulation/ssa.py#L215-L234 | def _gssa(self, initial_conditions, t_max):
"""
This function is inspired from Yoav Ram's code available at:
http://nbviewer.ipython.org/github/yoavram/ipython-notebooks/blob/master/GSSA.ipynb
:param initial_conditions: the initial conditions of the system
:param t_max: the time when the simulation should stop
:return:
"""
# set the initial conditions and t0 = 0.
species_over_time = [np.array(initial_conditions).astype("int16")]
t = 0
time_points = [t]
while t < t_max and species_over_time[-1].sum() > 0:
last = species_over_time[-1]
e, dt = self._draw(last)
t += dt
species_over_time.append(last + self.__change[e,:])
time_points.append(t)
return time_points, np.array(species_over_time).T | [
"def",
"_gssa",
"(",
"self",
",",
"initial_conditions",
",",
"t_max",
")",
":",
"# set the initial conditions and t0 = 0.",
"species_over_time",
"=",
"[",
"np",
".",
"array",
"(",
"initial_conditions",
")",
".",
"astype",
"(",
"\"int16\"",
")",
"]",
"t",
"=",
"0",
"time_points",
"=",
"[",
"t",
"]",
"while",
"t",
"<",
"t_max",
"and",
"species_over_time",
"[",
"-",
"1",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"last",
"=",
"species_over_time",
"[",
"-",
"1",
"]",
"e",
",",
"dt",
"=",
"self",
".",
"_draw",
"(",
"last",
")",
"t",
"+=",
"dt",
"species_over_time",
".",
"append",
"(",
"last",
"+",
"self",
".",
"__change",
"[",
"e",
",",
":",
"]",
")",
"time_points",
".",
"append",
"(",
"t",
")",
"return",
"time_points",
",",
"np",
".",
"array",
"(",
"species_over_time",
")",
".",
"T"
] | This function is inspired from Yoav Ram's code available at:
http://nbviewer.ipython.org/github/yoavram/ipython-notebooks/blob/master/GSSA.ipynb
:param initial_conditions: the initial conditions of the system
:param t_max: the time when the simulation should stop
:return: | [
"This",
"function",
"is",
"inspired",
"from",
"Yoav",
"Ram",
"s",
"code",
"available",
"at",
":",
"http",
":",
"//",
"nbviewer",
".",
"ipython",
".",
"org",
"/",
"github",
"/",
"yoavram",
"/",
"ipython",
"-",
"notebooks",
"/",
"blob",
"/",
"master",
"/",
"GSSA",
".",
"ipynb"
] | python | train |
d0c-s4vage/pfp | pfp/fields.py | https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/fields.py#L1410-L1423 | def _pfp__parse(self, stream, save_offset=False):
"""Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed
"""
res = super(Enum, self)._pfp__parse(stream, save_offset)
if self._pfp__value in self.enum_vals:
self.enum_name = self.enum_vals[self._pfp__value]
else:
self.enum_name = "?? UNK_ENUM ??"
return res | [
"def",
"_pfp__parse",
"(",
"self",
",",
"stream",
",",
"save_offset",
"=",
"False",
")",
":",
"res",
"=",
"super",
"(",
"Enum",
",",
"self",
")",
".",
"_pfp__parse",
"(",
"stream",
",",
"save_offset",
")",
"if",
"self",
".",
"_pfp__value",
"in",
"self",
".",
"enum_vals",
":",
"self",
".",
"enum_name",
"=",
"self",
".",
"enum_vals",
"[",
"self",
".",
"_pfp__value",
"]",
"else",
":",
"self",
".",
"enum_name",
"=",
"\"?? UNK_ENUM ??\"",
"return",
"res"
] | Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed | [
"Parse",
"the",
"IO",
"stream",
"for",
"this",
"enum"
] | python | train |
ruipgil/TrackToTrip | tracktotrip/smooth.py | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/smooth.py#L47-L58 | def with_extrapolation(points, noise, n_points):
""" Smooths a set of points, but it extrapolates some points at the beginning
Args:
points (:obj:`list` of :obj:`Point`)
noise (float): Expected noise, the higher it is the more the path will
be smoothed.
Returns:
:obj:`list` of :obj:`Point`
"""
n_points = 10
return kalman_filter(extrapolate_points(points, n_points) + points, noise)[n_points:] | [
"def",
"with_extrapolation",
"(",
"points",
",",
"noise",
",",
"n_points",
")",
":",
"n_points",
"=",
"10",
"return",
"kalman_filter",
"(",
"extrapolate_points",
"(",
"points",
",",
"n_points",
")",
"+",
"points",
",",
"noise",
")",
"[",
"n_points",
":",
"]"
] | Smooths a set of points, but it extrapolates some points at the beginning
Args:
points (:obj:`list` of :obj:`Point`)
noise (float): Expected noise, the higher it is the more the path will
be smoothed.
Returns:
:obj:`list` of :obj:`Point` | [
"Smooths",
"a",
"set",
"of",
"points",
"but",
"it",
"extrapolates",
"some",
"points",
"at",
"the",
"beginning"
] | python | train |
Kozea/wdb | client/wdb/__init__.py | https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L758-L776 | def receive(self, timeout=None):
"""Receive data through websocket"""
log.debug('Receiving')
if not self._socket:
log.warn('No connection')
return
try:
if timeout:
rv = self._socket.poll(timeout)
if not rv:
log.info('Connection timeouted')
return 'Quit'
data = self._socket.recv_bytes()
except Exception:
log.error('Connection lost')
return 'Quit'
log.debug('Got %s' % data)
return data.decode('utf-8') | [
"def",
"receive",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"'Receiving'",
")",
"if",
"not",
"self",
".",
"_socket",
":",
"log",
".",
"warn",
"(",
"'No connection'",
")",
"return",
"try",
":",
"if",
"timeout",
":",
"rv",
"=",
"self",
".",
"_socket",
".",
"poll",
"(",
"timeout",
")",
"if",
"not",
"rv",
":",
"log",
".",
"info",
"(",
"'Connection timeouted'",
")",
"return",
"'Quit'",
"data",
"=",
"self",
".",
"_socket",
".",
"recv_bytes",
"(",
")",
"except",
"Exception",
":",
"log",
".",
"error",
"(",
"'Connection lost'",
")",
"return",
"'Quit'",
"log",
".",
"debug",
"(",
"'Got %s'",
"%",
"data",
")",
"return",
"data",
".",
"decode",
"(",
"'utf-8'",
")"
] | Receive data through websocket | [
"Receive",
"data",
"through",
"websocket"
] | python | train |
ambitioninc/rabbitmq-admin | rabbitmq_admin/base.py | https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/base.py#L112-L123 | def _api_delete(self, url, **kwargs):
"""
A convenience wrapper for _delete. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._delete(**kwargs) | [
"def",
"_api_delete",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'url'",
"]",
"=",
"self",
".",
"url",
"+",
"url",
"kwargs",
"[",
"'auth'",
"]",
"=",
"self",
".",
"auth",
"headers",
"=",
"deepcopy",
"(",
"self",
".",
"headers",
")",
"headers",
".",
"update",
"(",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
")",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"self",
".",
"_delete",
"(",
"*",
"*",
"kwargs",
")"
] | A convenience wrapper for _delete. Adds headers, auth and base url by
default | [
"A",
"convenience",
"wrapper",
"for",
"_delete",
".",
"Adds",
"headers",
"auth",
"and",
"base",
"url",
"by",
"default"
] | python | train |
Accelize/pycosio | pycosio/storage/azure.py | https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/azure.py#L140-L154 | def _secured_storage_parameters(self):
"""
Updates storage parameters with unsecure mode.
Returns:
dict: Updated storage_parameters.
"""
parameters = self._storage_parameters or dict()
# Handles unsecure mode
if self._unsecure:
parameters = parameters.copy()
parameters['protocol'] = 'http'
return parameters | [
"def",
"_secured_storage_parameters",
"(",
"self",
")",
":",
"parameters",
"=",
"self",
".",
"_storage_parameters",
"or",
"dict",
"(",
")",
"# Handles unsecure mode",
"if",
"self",
".",
"_unsecure",
":",
"parameters",
"=",
"parameters",
".",
"copy",
"(",
")",
"parameters",
"[",
"'protocol'",
"]",
"=",
"'http'",
"return",
"parameters"
] | Updates storage parameters with unsecure mode.
Returns:
dict: Updated storage_parameters. | [
"Updates",
"storage",
"parameters",
"with",
"unsecure",
"mode",
"."
] | python | train |
perrygeo/simanneal | examples/watershed/shapefile.py | https://github.com/perrygeo/simanneal/blob/293bc81b5bc4bf0ba7760a0e4df5ba97fdcf2881/examples/watershed/shapefile.py#L569-L599 | def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.") | [
"def",
"__shapefileHeader",
"(",
"self",
",",
"fileObj",
",",
"headerType",
"=",
"'shp'",
")",
":",
"f",
"=",
"self",
".",
"__getFileObj",
"(",
"fileObj",
")",
"f",
".",
"seek",
"(",
"0",
")",
"# File code, Unused bytes\r",
"f",
".",
"write",
"(",
"pack",
"(",
"\">6i\"",
",",
"9994",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
"# File length (Bytes / 2 = 16-bit words)\r",
"if",
"headerType",
"==",
"'shp'",
":",
"f",
".",
"write",
"(",
"pack",
"(",
"\">i\"",
",",
"self",
".",
"__shpFileLength",
"(",
")",
")",
")",
"elif",
"headerType",
"==",
"'shx'",
":",
"f",
".",
"write",
"(",
"pack",
"(",
"'>i'",
",",
"(",
"(",
"100",
"+",
"(",
"len",
"(",
"self",
".",
"_shapes",
")",
"*",
"8",
")",
")",
"//",
"2",
")",
")",
")",
"# Version, Shape type\r",
"f",
".",
"write",
"(",
"pack",
"(",
"\"<2i\"",
",",
"1000",
",",
"self",
".",
"shapeType",
")",
")",
"# The shapefile's bounding box (lower left, upper right)\r",
"if",
"self",
".",
"shapeType",
"!=",
"0",
":",
"try",
":",
"f",
".",
"write",
"(",
"pack",
"(",
"\"<4d\"",
",",
"*",
"self",
".",
"bbox",
"(",
")",
")",
")",
"except",
"error",
":",
"raise",
"ShapefileException",
"(",
"\"Failed to write shapefile bounding box. Floats required.\"",
")",
"else",
":",
"f",
".",
"write",
"(",
"pack",
"(",
"\"<4d\"",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
")",
"# Elevation\r",
"z",
"=",
"self",
".",
"zbox",
"(",
")",
"# Measure\r",
"m",
"=",
"self",
".",
"mbox",
"(",
")",
"try",
":",
"f",
".",
"write",
"(",
"pack",
"(",
"\"<4d\"",
",",
"z",
"[",
"0",
"]",
",",
"z",
"[",
"1",
"]",
",",
"m",
"[",
"0",
"]",
",",
"m",
"[",
"1",
"]",
")",
")",
"except",
"error",
":",
"raise",
"ShapefileException",
"(",
"\"Failed to write shapefile elevation and measure values. Floats required.\"",
")"
] | Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted. | [
"Writes",
"the",
"specified",
"header",
"type",
"to",
"the",
"specified",
"file",
"-",
"like",
"object",
".",
"Several",
"of",
"the",
"shapefile",
"formats",
"are",
"so",
"similar",
"that",
"a",
"single",
"generic",
"method",
"to",
"read",
"or",
"write",
"them",
"is",
"warranted",
"."
] | python | train |
explosion/spaCy | spacy/displacy/render.py | https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/displacy/render.py#L172-L197 | def get_arrowhead(self, direction, x, y, end):
"""Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
pos1, pos2, pos3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
pos1, pos2, pos3 = (
end,
end + self.arrow_width - 2,
end - self.arrow_width + 2,
)
arrowhead = (
pos1,
y + 2,
pos2,
y - self.arrow_width,
pos3,
y - self.arrow_width,
)
return "M{},{} L{},{} {},{}".format(*arrowhead) | [
"def",
"get_arrowhead",
"(",
"self",
",",
"direction",
",",
"x",
",",
"y",
",",
"end",
")",
":",
"if",
"direction",
"==",
"\"left\"",
":",
"pos1",
",",
"pos2",
",",
"pos3",
"=",
"(",
"x",
",",
"x",
"-",
"self",
".",
"arrow_width",
"+",
"2",
",",
"x",
"+",
"self",
".",
"arrow_width",
"-",
"2",
")",
"else",
":",
"pos1",
",",
"pos2",
",",
"pos3",
"=",
"(",
"end",
",",
"end",
"+",
"self",
".",
"arrow_width",
"-",
"2",
",",
"end",
"-",
"self",
".",
"arrow_width",
"+",
"2",
",",
")",
"arrowhead",
"=",
"(",
"pos1",
",",
"y",
"+",
"2",
",",
"pos2",
",",
"y",
"-",
"self",
".",
"arrow_width",
",",
"pos3",
",",
"y",
"-",
"self",
".",
"arrow_width",
",",
")",
"return",
"\"M{},{} L{},{} {},{}\"",
".",
"format",
"(",
"*",
"arrowhead",
")"
] | Render individual arrow head.
direction (unicode): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (unicode): Definition of the arrow head path ('d' attribute). | [
"Render",
"individual",
"arrow",
"head",
"."
] | python | train |
phoebe-project/phoebe2 | phoebe/backend/mesh.py | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/mesh.py#L395-L406 | def averages(self):
"""
Access to the average of the values at the vertices for each triangle.
If the quantities are defined at centers instead of vertices, this
will return None. Also see :method:`centers`.
:return: numpy array or None
"""
if not self.mesh._compute_at_vertices:
return None
return np.mean(self.vertices_per_triangle, axis=1) | [
"def",
"averages",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"mesh",
".",
"_compute_at_vertices",
":",
"return",
"None",
"return",
"np",
".",
"mean",
"(",
"self",
".",
"vertices_per_triangle",
",",
"axis",
"=",
"1",
")"
] | Access to the average of the values at the vertices for each triangle.
If the quantities are defined at centers instead of vertices, this
will return None. Also see :method:`centers`.
:return: numpy array or None | [
"Access",
"to",
"the",
"average",
"of",
"the",
"values",
"at",
"the",
"vertices",
"for",
"each",
"triangle",
".",
"If",
"the",
"quantities",
"are",
"defined",
"at",
"centers",
"instead",
"of",
"vertices",
"this",
"will",
"return",
"None",
".",
"Also",
"see",
":",
"method",
":",
"centers",
"."
] | python | train |
fabaff/python-mystrom | pymystrom/cli.py | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/cli.py#L35-L40 | def read_config(ip, mac):
"""Read the current configuration of a myStrom device."""
click.echo("Read configuration from %s" % ip)
request = requests.get(
'http://{}/{}/{}/'.format(ip, URI, mac), timeout=TIMEOUT)
print(request.json()) | [
"def",
"read_config",
"(",
"ip",
",",
"mac",
")",
":",
"click",
".",
"echo",
"(",
"\"Read configuration from %s\"",
"%",
"ip",
")",
"request",
"=",
"requests",
".",
"get",
"(",
"'http://{}/{}/{}/'",
".",
"format",
"(",
"ip",
",",
"URI",
",",
"mac",
")",
",",
"timeout",
"=",
"TIMEOUT",
")",
"print",
"(",
"request",
".",
"json",
"(",
")",
")"
] | Read the current configuration of a myStrom device. | [
"Read",
"the",
"current",
"configuration",
"of",
"a",
"myStrom",
"device",
"."
] | python | train |
saltstack/salt | salt/modules/cloud.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cloud.py#L156-L183 | def get_instance(name, provider=None):
'''
Return details on an instance.
Similar to the cloud action show_instance
but returns only the instance details.
CLI Example:
.. code-block:: bash
salt minionname cloud.get_instance myinstance
SLS Example:
.. code-block:: bash
{{ salt['cloud.get_instance']('myinstance')['mac_address'] }}
'''
data = action(fun='show_instance', names=[name], provider=provider)
info = salt.utils.data.simple_types_filter(data)
try:
# get the first: [alias][driver][vm_name]
info = next(six.itervalues(next(six.itervalues(next(six.itervalues(info))))))
except AttributeError:
return None
return info | [
"def",
"get_instance",
"(",
"name",
",",
"provider",
"=",
"None",
")",
":",
"data",
"=",
"action",
"(",
"fun",
"=",
"'show_instance'",
",",
"names",
"=",
"[",
"name",
"]",
",",
"provider",
"=",
"provider",
")",
"info",
"=",
"salt",
".",
"utils",
".",
"data",
".",
"simple_types_filter",
"(",
"data",
")",
"try",
":",
"# get the first: [alias][driver][vm_name]",
"info",
"=",
"next",
"(",
"six",
".",
"itervalues",
"(",
"next",
"(",
"six",
".",
"itervalues",
"(",
"next",
"(",
"six",
".",
"itervalues",
"(",
"info",
")",
")",
")",
")",
")",
")",
"except",
"AttributeError",
":",
"return",
"None",
"return",
"info"
] | Return details on an instance.
Similar to the cloud action show_instance
but returns only the instance details.
CLI Example:
.. code-block:: bash
salt minionname cloud.get_instance myinstance
SLS Example:
.. code-block:: bash
{{ salt['cloud.get_instance']('myinstance')['mac_address'] }} | [
"Return",
"details",
"on",
"an",
"instance",
"."
] | python | train |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L730-L750 | def change_axis(self, axis_num, channel_name):
"""
TODO: refactor that and set_axes
what to do with ax?
axis_num: int
axis number
channel_name: str
new channel to plot on that axis
"""
current_channels = list(self.current_channels)
if len(current_channels) == 1:
if axis_num == 0:
new_channels = channel_name,
else:
new_channels = current_channels[0], channel_name
else:
new_channels = list(current_channels)
new_channels[axis_num] = channel_name
self.set_axes(new_channels, self.ax) | [
"def",
"change_axis",
"(",
"self",
",",
"axis_num",
",",
"channel_name",
")",
":",
"current_channels",
"=",
"list",
"(",
"self",
".",
"current_channels",
")",
"if",
"len",
"(",
"current_channels",
")",
"==",
"1",
":",
"if",
"axis_num",
"==",
"0",
":",
"new_channels",
"=",
"channel_name",
",",
"else",
":",
"new_channels",
"=",
"current_channels",
"[",
"0",
"]",
",",
"channel_name",
"else",
":",
"new_channels",
"=",
"list",
"(",
"current_channels",
")",
"new_channels",
"[",
"axis_num",
"]",
"=",
"channel_name",
"self",
".",
"set_axes",
"(",
"new_channels",
",",
"self",
".",
"ax",
")"
] | TODO: refactor that and set_axes
what to do with ax?
axis_num: int
axis number
channel_name: str
new channel to plot on that axis | [
"TODO",
":",
"refactor",
"that",
"and",
"set_axes",
"what",
"to",
"do",
"with",
"ax?"
] | python | train |
wavefrontHQ/python-client | wavefront_api_client/api/dashboard_api.py | https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/dashboard_api.py#L1399-L1420 | def set_dashboard_tags(self, id, **kwargs): # noqa: E501
"""Set all tags associated with a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_dashboard_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.set_dashboard_tags_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.set_dashboard_tags_with_http_info(id, **kwargs) # noqa: E501
return data | [
"def",
"set_dashboard_tags",
"(",
"self",
",",
"id",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"set_dashboard_tags_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"set_dashboard_tags_with_http_info",
"(",
"id",
",",
"*",
"*",
"kwargs",
")",
"# noqa: E501",
"return",
"data"
] | Set all tags associated with a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_dashboard_tags(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param list[str] body:
:return: ResponseContainer
If the method is called asynchronously,
returns the request thread. | [
"Set",
"all",
"tags",
"associated",
"with",
"a",
"specific",
"dashboard",
"#",
"noqa",
":",
"E501"
] | python | train |
buildbot/buildbot | master/buildbot/changes/hgpoller.py | https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/changes/hgpoller.py#L148-L163 | def _initRepository(self):
"""Have mercurial init the workdir as a repository (hg init) if needed.
hg init will also create all needed intermediate directories.
"""
if self._isRepositoryReady():
return defer.succeed(None)
log.msg('hgpoller: initializing working dir from %s' % self.repourl)
d = utils.getProcessOutputAndValue(self.hgbin,
['init', self._absWorkdir()],
env=os.environ)
d.addCallback(self._convertNonZeroToFailure)
d.addErrback(self._stopOnFailure)
d.addCallback(lambda _: log.msg(
"hgpoller: finished initializing working dir %r" % self.workdir))
return d | [
"def",
"_initRepository",
"(",
"self",
")",
":",
"if",
"self",
".",
"_isRepositoryReady",
"(",
")",
":",
"return",
"defer",
".",
"succeed",
"(",
"None",
")",
"log",
".",
"msg",
"(",
"'hgpoller: initializing working dir from %s'",
"%",
"self",
".",
"repourl",
")",
"d",
"=",
"utils",
".",
"getProcessOutputAndValue",
"(",
"self",
".",
"hgbin",
",",
"[",
"'init'",
",",
"self",
".",
"_absWorkdir",
"(",
")",
"]",
",",
"env",
"=",
"os",
".",
"environ",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"_convertNonZeroToFailure",
")",
"d",
".",
"addErrback",
"(",
"self",
".",
"_stopOnFailure",
")",
"d",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"log",
".",
"msg",
"(",
"\"hgpoller: finished initializing working dir %r\"",
"%",
"self",
".",
"workdir",
")",
")",
"return",
"d"
] | Have mercurial init the workdir as a repository (hg init) if needed.
hg init will also create all needed intermediate directories. | [
"Have",
"mercurial",
"init",
"the",
"workdir",
"as",
"a",
"repository",
"(",
"hg",
"init",
")",
"if",
"needed",
"."
] | python | train |
psss/did | did/plugins/trac.py | https://github.com/psss/did/blob/04e4ee6f1aa14c0cae3ba9f9803871f3f98279cb/did/plugins/trac.py#L119-L124 | def closed(self):
""" True if ticket was closed in given time frame """
for who, what, old, new in self.history():
if what == "status" and new == "closed":
return True
return False | [
"def",
"closed",
"(",
"self",
")",
":",
"for",
"who",
",",
"what",
",",
"old",
",",
"new",
"in",
"self",
".",
"history",
"(",
")",
":",
"if",
"what",
"==",
"\"status\"",
"and",
"new",
"==",
"\"closed\"",
":",
"return",
"True",
"return",
"False"
] | True if ticket was closed in given time frame | [
"True",
"if",
"ticket",
"was",
"closed",
"in",
"given",
"time",
"frame"
] | python | train |
xzased/lvm2py | lvm2py/vg.py | https://github.com/xzased/lvm2py/blob/34ce69304531a474c2fe4a4009ca445a8c103cd6/lvm2py/vg.py#L348-L380 | def remove_pv(self, pv):
"""
Removes a physical volume from the volume group::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
pv = vg.pvscan()[0]
vg.remove_pv(pv)
*Args:*
* pv (obj): A PhysicalVolume instance.
*Raises:*
* HandleError, CommitError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised. Also, when removing the last physical volume, the volume
group is deleted in lvm, leaving the instance with a null handle.
"""
name = pv.name
self.open()
rm = lvm_vg_reduce(self.handle, name)
if rm != 0:
self.close()
raise CommitError("Failed to remove %s." % name)
self._commit()
self.close() | [
"def",
"remove_pv",
"(",
"self",
",",
"pv",
")",
":",
"name",
"=",
"pv",
".",
"name",
"self",
".",
"open",
"(",
")",
"rm",
"=",
"lvm_vg_reduce",
"(",
"self",
".",
"handle",
",",
"name",
")",
"if",
"rm",
"!=",
"0",
":",
"self",
".",
"close",
"(",
")",
"raise",
"CommitError",
"(",
"\"Failed to remove %s.\"",
"%",
"name",
")",
"self",
".",
"_commit",
"(",
")",
"self",
".",
"close",
"(",
")"
] | Removes a physical volume from the volume group::
from lvm2py import *
lvm = LVM()
vg = lvm.get_vg("myvg", "w")
pv = vg.pvscan()[0]
vg.remove_pv(pv)
*Args:*
* pv (obj): A PhysicalVolume instance.
*Raises:*
* HandleError, CommitError
.. note::
The VolumeGroup instance must be in write mode, otherwise CommitError
is raised. Also, when removing the last physical volume, the volume
group is deleted in lvm, leaving the instance with a null handle. | [
"Removes",
"a",
"physical",
"volume",
"from",
"the",
"volume",
"group",
"::"
] | python | train |
hsolbrig/PyShEx | pyshex/shape_expressions_language/p5_4_node_constraints.py | https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_4_node_constraints.py#L216-L269 | def _nodeSatisfiesValue(cntxt: Context, n: Node, vsv: ShExJ.valueSetValue) -> bool:
"""
A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions
"""
vsv = map_object_literal(vsv)
if isinstance_(vsv, ShExJ.objectValue):
return objectValueMatches(n, vsv)
if isinstance(vsv, ShExJ.Language):
if vsv.languageTag is not None and isinstance(n, Literal) and n.language is not None:
return n.language == vsv.languageTag
else:
return False
if isinstance(vsv, ShExJ.IriStem):
return nodeInIriStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.IriStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInIriStem(cntxt, n, vsv.stem) and not any(
(uriref_matches_iriref(n, excl) if isinstance(excl, ShExJ.IRIREF) else
uriref_startswith_iriref(n, excl.stem)) for excl in exclusions)
if isinstance(vsv, ShExJ.LiteralStem):
return nodeInLiteralStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LiteralStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLiteralStem(cntxt, n, vsv.stem) and not any(str(n) == excl for excl in exclusions)
if isinstance(vsv, ShExJ.LanguageStem):
return nodeInLanguageStem(cntxt, n, vsv.stem)
if isinstance(vsv, ShExJ.LanguageStemRange):
exclusions = vsv.exclusions if vsv.exclusions is not None else []
return nodeInLanguageStem(cntxt, n, vsv.stem) and not any(str(n) == str(excl) for excl in exclusions)
return False | [
"def",
"_nodeSatisfiesValue",
"(",
"cntxt",
":",
"Context",
",",
"n",
":",
"Node",
",",
"vsv",
":",
"ShExJ",
".",
"valueSetValue",
")",
"->",
"bool",
":",
"vsv",
"=",
"map_object_literal",
"(",
"vsv",
")",
"if",
"isinstance_",
"(",
"vsv",
",",
"ShExJ",
".",
"objectValue",
")",
":",
"return",
"objectValueMatches",
"(",
"n",
",",
"vsv",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"Language",
")",
":",
"if",
"vsv",
".",
"languageTag",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"n",
",",
"Literal",
")",
"and",
"n",
".",
"language",
"is",
"not",
"None",
":",
"return",
"n",
".",
"language",
"==",
"vsv",
".",
"languageTag",
"else",
":",
"return",
"False",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"IriStem",
")",
":",
"return",
"nodeInIriStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"IriStemRange",
")",
":",
"exclusions",
"=",
"vsv",
".",
"exclusions",
"if",
"vsv",
".",
"exclusions",
"is",
"not",
"None",
"else",
"[",
"]",
"return",
"nodeInIriStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"and",
"not",
"any",
"(",
"(",
"uriref_matches_iriref",
"(",
"n",
",",
"excl",
")",
"if",
"isinstance",
"(",
"excl",
",",
"ShExJ",
".",
"IRIREF",
")",
"else",
"uriref_startswith_iriref",
"(",
"n",
",",
"excl",
".",
"stem",
")",
")",
"for",
"excl",
"in",
"exclusions",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"LiteralStem",
")",
":",
"return",
"nodeInLiteralStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"LiteralStemRange",
")",
":",
"exclusions",
"=",
"vsv",
".",
"exclusions",
"if",
"vsv",
".",
"exclusions",
"is",
"not",
"None",
"else",
"[",
"]",
"return",
"nodeInLiteralStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"and",
"not",
"any",
"(",
"str",
"(",
"n",
")",
"==",
"excl",
"for",
"excl",
"in",
"exclusions",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"LanguageStem",
")",
":",
"return",
"nodeInLanguageStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"if",
"isinstance",
"(",
"vsv",
",",
"ShExJ",
".",
"LanguageStemRange",
")",
":",
"exclusions",
"=",
"vsv",
".",
"exclusions",
"if",
"vsv",
".",
"exclusions",
"is",
"not",
"None",
"else",
"[",
"]",
"return",
"nodeInLanguageStem",
"(",
"cntxt",
",",
"n",
",",
"vsv",
".",
"stem",
")",
"and",
"not",
"any",
"(",
"str",
"(",
"n",
")",
"==",
"str",
"(",
"excl",
")",
"for",
"excl",
"in",
"exclusions",
")",
"return",
"False"
] | A term matches a valueSetValue if:
* vsv is an objectValue and n = vsv.
* vsv is a Language with langTag lt and n is a language-tagged string with a language tag l and l = lt.
* vsv is a IriStem, LiteralStem or LanguageStem with stem st and nodeIn(n, st).
* vsv is a IriStemRange, LiteralStemRange or LanguageStemRange with stem st and exclusions excls and
nodeIn(n, st) and there is no x in excls such that nodeIn(n, excl).
* vsv is a Wildcard with exclusions excls and there is no x in excls such that nodeIn(n, excl).
Note that ObjectLiteral is *not* typed in ShExJ.jsg, so we identify it by a lack of a 'type' variable
.. note:: Mismatch with spec
This won't work correctly if the stem value is passed in to nodeIn, as there will be no way to know whether
we're matching an IRI or other type
... note:: Language issue
The stem range spec shouldn't have the first element in the exclusions | [
"A",
"term",
"matches",
"a",
"valueSetValue",
"if",
":",
"*",
"vsv",
"is",
"an",
"objectValue",
"and",
"n",
"=",
"vsv",
".",
"*",
"vsv",
"is",
"a",
"Language",
"with",
"langTag",
"lt",
"and",
"n",
"is",
"a",
"language",
"-",
"tagged",
"string",
"with",
"a",
"language",
"tag",
"l",
"and",
"l",
"=",
"lt",
".",
"*",
"vsv",
"is",
"a",
"IriStem",
"LiteralStem",
"or",
"LanguageStem",
"with",
"stem",
"st",
"and",
"nodeIn",
"(",
"n",
"st",
")",
".",
"*",
"vsv",
"is",
"a",
"IriStemRange",
"LiteralStemRange",
"or",
"LanguageStemRange",
"with",
"stem",
"st",
"and",
"exclusions",
"excls",
"and",
"nodeIn",
"(",
"n",
"st",
")",
"and",
"there",
"is",
"no",
"x",
"in",
"excls",
"such",
"that",
"nodeIn",
"(",
"n",
"excl",
")",
".",
"*",
"vsv",
"is",
"a",
"Wildcard",
"with",
"exclusions",
"excls",
"and",
"there",
"is",
"no",
"x",
"in",
"excls",
"such",
"that",
"nodeIn",
"(",
"n",
"excl",
")",
"."
] | python | train |
ihgazni2/edict | edict/edict.py | https://github.com/ihgazni2/edict/blob/44a08ccc10b196aa3854619b4c51ddb246778a34/edict/edict.py#L109-L129 | def _reorder_via_klist(d,nkl,**kwargs):
'''
d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''}
pobj(d)
nkl = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment']
pobj(_reorder_via_klist(d,nkl))
'''
if('deepcopy' in kwargs):
deepcopy = kwargs['deepcopy']
else:
deepcopy = True
if(deepcopy):
d = copy.deepcopy(d)
else:
pass
nd = {}
lngth = nkl.__len__()
for i in range(0,lngth):
k = nkl[i]
nd[k] = d[k]
return(nd) | [
"def",
"_reorder_via_klist",
"(",
"d",
",",
"nkl",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"'deepcopy'",
"in",
"kwargs",
")",
":",
"deepcopy",
"=",
"kwargs",
"[",
"'deepcopy'",
"]",
"else",
":",
"deepcopy",
"=",
"True",
"if",
"(",
"deepcopy",
")",
":",
"d",
"=",
"copy",
".",
"deepcopy",
"(",
"d",
")",
"else",
":",
"pass",
"nd",
"=",
"{",
"}",
"lngth",
"=",
"nkl",
".",
"__len__",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"lngth",
")",
":",
"k",
"=",
"nkl",
"[",
"i",
"]",
"nd",
"[",
"k",
"]",
"=",
"d",
"[",
"k",
"]",
"return",
"(",
"nd",
")"
] | d = {'scheme': 'http', 'path': '/index.php', 'params': 'params', 'query': 'username=query', 'fragment': 'frag', 'username': '', 'password': '', 'hostname': 'www.baidu.com', 'port': ''}
pobj(d)
nkl = ['scheme', 'username', 'password', 'hostname', 'port', 'path', 'params', 'query', 'fragment']
pobj(_reorder_via_klist(d,nkl)) | [
"d",
"=",
"{",
"scheme",
":",
"http",
"path",
":",
"/",
"index",
".",
"php",
"params",
":",
"params",
"query",
":",
"username",
"=",
"query",
"fragment",
":",
"frag",
"username",
":",
"password",
":",
"hostname",
":",
"www",
".",
"baidu",
".",
"com",
"port",
":",
"}",
"pobj",
"(",
"d",
")",
"nkl",
"=",
"[",
"scheme",
"username",
"password",
"hostname",
"port",
"path",
"params",
"query",
"fragment",
"]",
"pobj",
"(",
"_reorder_via_klist",
"(",
"d",
"nkl",
"))"
] | python | train |
casacore/python-casacore | casacore/images/image.py | https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L586-L612 | def view(self, tempname='/tmp/tempimage'):
"""Display the image using casaviewer.
If the image is not persistent, a copy will be made that the user
has to delete once viewing has finished. The name of the copy can be
given in argument `tempname`. Default is '/tmp/tempimage'.
"""
import os
# Test if casaviewer can be found.
# On OS-X 'which' always returns 0, so use test on top of it.
if os.system('test -x `which casaviewer` > /dev/null 2>&1') == 0:
six.print_("Starting casaviewer in the background ...")
self.unlock()
if self.ispersistent():
os.system('casaviewer ' + self.name() + ' &')
elif len(tempname) > 0:
six.print_(" making a persistent copy in " + tempname)
six.print_(" which should be deleted after the viewer has ended")
self.saveas(tempname)
os.system('casaviewer ' + tempname + ' &')
else:
six.print_("Cannot view because the image is in memory only.")
six.print_("You can browse a persistent copy of the image like:")
six.print_(" t.view('/tmp/tempimage')")
else:
six.print_("casaviewer cannot be found") | [
"def",
"view",
"(",
"self",
",",
"tempname",
"=",
"'/tmp/tempimage'",
")",
":",
"import",
"os",
"# Test if casaviewer can be found.",
"# On OS-X 'which' always returns 0, so use test on top of it.",
"if",
"os",
".",
"system",
"(",
"'test -x `which casaviewer` > /dev/null 2>&1'",
")",
"==",
"0",
":",
"six",
".",
"print_",
"(",
"\"Starting casaviewer in the background ...\"",
")",
"self",
".",
"unlock",
"(",
")",
"if",
"self",
".",
"ispersistent",
"(",
")",
":",
"os",
".",
"system",
"(",
"'casaviewer '",
"+",
"self",
".",
"name",
"(",
")",
"+",
"' &'",
")",
"elif",
"len",
"(",
"tempname",
")",
">",
"0",
":",
"six",
".",
"print_",
"(",
"\" making a persistent copy in \"",
"+",
"tempname",
")",
"six",
".",
"print_",
"(",
"\" which should be deleted after the viewer has ended\"",
")",
"self",
".",
"saveas",
"(",
"tempname",
")",
"os",
".",
"system",
"(",
"'casaviewer '",
"+",
"tempname",
"+",
"' &'",
")",
"else",
":",
"six",
".",
"print_",
"(",
"\"Cannot view because the image is in memory only.\"",
")",
"six",
".",
"print_",
"(",
"\"You can browse a persistent copy of the image like:\"",
")",
"six",
".",
"print_",
"(",
"\" t.view('/tmp/tempimage')\"",
")",
"else",
":",
"six",
".",
"print_",
"(",
"\"casaviewer cannot be found\"",
")"
] | Display the image using casaviewer.
If the image is not persistent, a copy will be made that the user
has to delete once viewing has finished. The name of the copy can be
given in argument `tempname`. Default is '/tmp/tempimage'. | [
"Display",
"the",
"image",
"using",
"casaviewer",
"."
] | python | train |
CiscoUcs/UcsPythonSDK | src/UcsSdk/UcsBase.py | https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L829-L842 | def Expandkey(key, clen):
""" Internal method supporting encryption and decryption functionality. """
import sha
from string import join
from array import array
blocks = (clen + 19) / 20
xkey = []
seed = key
for i in xrange(blocks):
seed = sha.new(key + seed).digest()
xkey.append(seed)
j = join(xkey, '')
return array('L', j) | [
"def",
"Expandkey",
"(",
"key",
",",
"clen",
")",
":",
"import",
"sha",
"from",
"string",
"import",
"join",
"from",
"array",
"import",
"array",
"blocks",
"=",
"(",
"clen",
"+",
"19",
")",
"/",
"20",
"xkey",
"=",
"[",
"]",
"seed",
"=",
"key",
"for",
"i",
"in",
"xrange",
"(",
"blocks",
")",
":",
"seed",
"=",
"sha",
".",
"new",
"(",
"key",
"+",
"seed",
")",
".",
"digest",
"(",
")",
"xkey",
".",
"append",
"(",
"seed",
")",
"j",
"=",
"join",
"(",
"xkey",
",",
"''",
")",
"return",
"array",
"(",
"'L'",
",",
"j",
")"
] | Internal method supporting encryption and decryption functionality. | [
"Internal",
"method",
"supporting",
"encryption",
"and",
"decryption",
"functionality",
"."
] | python | train |
KnowledgeLinks/rdfframework | rdfframework/search/esmappings.py | https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/search/esmappings.py#L134-L198 | def send_es_mapping(self, es_map, **kwargs):
"""
sends the mapping to elasticsearch
args:
es_map: dictionary of the index mapping
kwargs:
reset_idx: WARNING! If True the current referenced es index
will be deleted destroying all data in that index in
elasticsearch. if False an incremented index will be
created and data-migration will start from the old to
the new index
"""
log.setLevel(kwargs.get('log_level', self.log_level))
def next_es_index_version(curr_alias):
""" returns the next number for a new index
args:
alias_def: the dictionary returned by es for get alias
"""
try:
alias_def = self.es.indices.get_alias(alias)
except es_except.NotFoundError:
alias_def = {alias + "_v0":{}}
old_idx = list(alias_def)[0]
parts = old_idx.split("_v")
try:
parts[1] = str(int(parts[1]) + 1)
except IndexError:
parts = [old_idx,'1']
return {'old': old_idx, 'new': "_v".join(parts)}
reset_idx= kwargs.get('reset_idx', False)
alias = es_map.pop('index')
idx_names = next_es_index_version(alias)
# Delete if the index series if reset_idx was passed
if reset_idx:
log.warning("DELETING Elasticsearch INDEX => %s ******", alias)
self.es.indices.delete(index=alias + "_v*", ignore=[400, 404])
idx_names['new'] = alias + "_v1"
# Create the new index and apply the mapping
self.es.indices.create(index=idx_names['new'],
body=es_map['body'],
update_all_types=True)
# if the index was not deleted transfer documents from old to the
# new index
if not reset_idx and self.es.indices.exists(idx_names['old']):
url = os.path.join(self.es_url,'_reindex').replace('\\','/')
data = {"source":{"index": idx_names['old']},
"dest":{"index": idx_names['new']}}
# Python elasticsearch recommends using a direct call to the
# es 5+ _reindex URL vice using their helper.
result = requests.post(url,
headers={'Content-Type':'application/json'},
data = json.dumps(data))
self.es.indices.delete_alias(index=idx_names['old'],
name=alias,
ignore=[403])
self.es.indices.delete(index=idx_names['old'], ignore=[400, 404])
# add the alias to the new index
self.es.indices.put_alias(index=idx_names['new'], name=alias) | [
"def",
"send_es_mapping",
"(",
"self",
",",
"es_map",
",",
"*",
"*",
"kwargs",
")",
":",
"log",
".",
"setLevel",
"(",
"kwargs",
".",
"get",
"(",
"'log_level'",
",",
"self",
".",
"log_level",
")",
")",
"def",
"next_es_index_version",
"(",
"curr_alias",
")",
":",
"\"\"\" returns the next number for a new index\r\n\r\n args:\r\n alias_def: the dictionary returned by es for get alias\r\n \"\"\"",
"try",
":",
"alias_def",
"=",
"self",
".",
"es",
".",
"indices",
".",
"get_alias",
"(",
"alias",
")",
"except",
"es_except",
".",
"NotFoundError",
":",
"alias_def",
"=",
"{",
"alias",
"+",
"\"_v0\"",
":",
"{",
"}",
"}",
"old_idx",
"=",
"list",
"(",
"alias_def",
")",
"[",
"0",
"]",
"parts",
"=",
"old_idx",
".",
"split",
"(",
"\"_v\"",
")",
"try",
":",
"parts",
"[",
"1",
"]",
"=",
"str",
"(",
"int",
"(",
"parts",
"[",
"1",
"]",
")",
"+",
"1",
")",
"except",
"IndexError",
":",
"parts",
"=",
"[",
"old_idx",
",",
"'1'",
"]",
"return",
"{",
"'old'",
":",
"old_idx",
",",
"'new'",
":",
"\"_v\"",
".",
"join",
"(",
"parts",
")",
"}",
"reset_idx",
"=",
"kwargs",
".",
"get",
"(",
"'reset_idx'",
",",
"False",
")",
"alias",
"=",
"es_map",
".",
"pop",
"(",
"'index'",
")",
"idx_names",
"=",
"next_es_index_version",
"(",
"alias",
")",
"# Delete if the index series if reset_idx was passed\r",
"if",
"reset_idx",
":",
"log",
".",
"warning",
"(",
"\"DELETING Elasticsearch INDEX => %s ******\"",
",",
"alias",
")",
"self",
".",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"alias",
"+",
"\"_v*\"",
",",
"ignore",
"=",
"[",
"400",
",",
"404",
"]",
")",
"idx_names",
"[",
"'new'",
"]",
"=",
"alias",
"+",
"\"_v1\"",
"# Create the new index and apply the mapping\r",
"self",
".",
"es",
".",
"indices",
".",
"create",
"(",
"index",
"=",
"idx_names",
"[",
"'new'",
"]",
",",
"body",
"=",
"es_map",
"[",
"'body'",
"]",
",",
"update_all_types",
"=",
"True",
")",
"# if the index was not deleted transfer documents from old to the\r",
"# new index\r",
"if",
"not",
"reset_idx",
"and",
"self",
".",
"es",
".",
"indices",
".",
"exists",
"(",
"idx_names",
"[",
"'old'",
"]",
")",
":",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"es_url",
",",
"'_reindex'",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"data",
"=",
"{",
"\"source\"",
":",
"{",
"\"index\"",
":",
"idx_names",
"[",
"'old'",
"]",
"}",
",",
"\"dest\"",
":",
"{",
"\"index\"",
":",
"idx_names",
"[",
"'new'",
"]",
"}",
"}",
"# Python elasticsearch recommends using a direct call to the\r",
"# es 5+ _reindex URL vice using their helper.\r",
"result",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/json'",
"}",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
")",
"self",
".",
"es",
".",
"indices",
".",
"delete_alias",
"(",
"index",
"=",
"idx_names",
"[",
"'old'",
"]",
",",
"name",
"=",
"alias",
",",
"ignore",
"=",
"[",
"403",
"]",
")",
"self",
".",
"es",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"idx_names",
"[",
"'old'",
"]",
",",
"ignore",
"=",
"[",
"400",
",",
"404",
"]",
")",
"# add the alias to the new index\r",
"self",
".",
"es",
".",
"indices",
".",
"put_alias",
"(",
"index",
"=",
"idx_names",
"[",
"'new'",
"]",
",",
"name",
"=",
"alias",
")"
] | sends the mapping to elasticsearch
args:
es_map: dictionary of the index mapping
kwargs:
reset_idx: WARNING! If True the current referenced es index
will be deleted destroying all data in that index in
elasticsearch. if False an incremented index will be
created and data-migration will start from the old to
the new index | [
"sends",
"the",
"mapping",
"to",
"elasticsearch",
"args",
":",
"es_map",
":",
"dictionary",
"of",
"the",
"index",
"mapping",
"kwargs",
":",
"reset_idx",
":",
"WARNING!",
"If",
"True",
"the",
"current",
"referenced",
"es",
"index",
"will",
"be",
"deleted",
"destroying",
"all",
"data",
"in",
"that",
"index",
"in",
"elasticsearch",
".",
"if",
"False",
"an",
"incremented",
"index",
"will",
"be",
"created",
"and",
"data",
"-",
"migration",
"will",
"start",
"from",
"the",
"old",
"to",
"the",
"new",
"index"
] | python | train |
openego/eDisGo | edisgo/grid/network.py | https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L43-L75 | def plot_mv_grid_topology(self, technologies=False, **kwargs):
"""
Plots plain MV grid topology and optionally nodes by technology type
(e.g. station or generator).
Parameters
----------
technologies : :obj:`Boolean`
If True plots stations, generators, etc. in the grid in different
colors. If False does not plot any nodes. Default: False.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is None:
try:
timesteps = self.network.timeseries.timeindex
self.network.pypsa = pypsa_io.to_pypsa(
self.network, mode=None, timesteps=timesteps)
except:
logging.warning(
"pypsa representation of MV grid needed to plot MV "
"grid topology.")
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='technology' if technologies is True else None,
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', '')) | [
"def",
"plot_mv_grid_topology",
"(",
"self",
",",
"technologies",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"network",
".",
"pypsa",
"is",
"None",
":",
"try",
":",
"timesteps",
"=",
"self",
".",
"network",
".",
"timeseries",
".",
"timeindex",
"self",
".",
"network",
".",
"pypsa",
"=",
"pypsa_io",
".",
"to_pypsa",
"(",
"self",
".",
"network",
",",
"mode",
"=",
"None",
",",
"timesteps",
"=",
"timesteps",
")",
"except",
":",
"logging",
".",
"warning",
"(",
"\"pypsa representation of MV grid needed to plot MV \"",
"\"grid topology.\"",
")",
"if",
"self",
".",
"network",
".",
"pypsa",
"is",
"not",
"None",
":",
"plots",
".",
"mv_grid_topology",
"(",
"self",
".",
"network",
".",
"pypsa",
",",
"self",
".",
"network",
".",
"config",
",",
"node_color",
"=",
"'technology'",
"if",
"technologies",
"is",
"True",
"else",
"None",
",",
"filename",
"=",
"kwargs",
".",
"get",
"(",
"'filename'",
",",
"None",
")",
",",
"grid_district_geom",
"=",
"kwargs",
".",
"get",
"(",
"'grid_district_geom'",
",",
"True",
")",
",",
"background_map",
"=",
"kwargs",
".",
"get",
"(",
"'background_map'",
",",
"True",
")",
",",
"xlim",
"=",
"kwargs",
".",
"get",
"(",
"'xlim'",
",",
"None",
")",
",",
"ylim",
"=",
"kwargs",
".",
"get",
"(",
"'ylim'",
",",
"None",
")",
",",
"title",
"=",
"kwargs",
".",
"get",
"(",
"'title'",
",",
"''",
")",
")"
] | Plots plain MV grid topology and optionally nodes by technology type
(e.g. station or generator).
Parameters
----------
technologies : :obj:`Boolean`
If True plots stations, generators, etc. in the grid in different
colors. If False does not plot any nodes. Default: False.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | [
"Plots",
"plain",
"MV",
"grid",
"topology",
"and",
"optionally",
"nodes",
"by",
"technology",
"type",
"(",
"e",
".",
"g",
".",
"station",
"or",
"generator",
")",
"."
] | python | train |
the01/python-floscraper | floscraper/models.py | https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/models.py#L115-L131 | def from_dict(d):
"""
Response from dict
:param d: Dict to load
:type d: dict
:return: response
:rtype: Response
"""
if d is None:
return None
return Response(
d.get('html'),
CacheInfo.from_dict(d.get('cache_info')),
d.get('scraped'),
d.get('raw')
) | [
"def",
"from_dict",
"(",
"d",
")",
":",
"if",
"d",
"is",
"None",
":",
"return",
"None",
"return",
"Response",
"(",
"d",
".",
"get",
"(",
"'html'",
")",
",",
"CacheInfo",
".",
"from_dict",
"(",
"d",
".",
"get",
"(",
"'cache_info'",
")",
")",
",",
"d",
".",
"get",
"(",
"'scraped'",
")",
",",
"d",
".",
"get",
"(",
"'raw'",
")",
")"
] | Response from dict
:param d: Dict to load
:type d: dict
:return: response
:rtype: Response | [
"Response",
"from",
"dict"
] | python | train |
pyQode/pyqode.core | pyqode/core/api/code_edit.py | https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/api/code_edit.py#L788-L796 | def zoom_in(self, increment=1):
"""
Zooms in the editor (makes the font bigger).
:param increment: zoom level increment. Default is 1.
"""
self.zoom_level += increment
TextHelper(self).mark_whole_doc_dirty()
self._reset_stylesheet() | [
"def",
"zoom_in",
"(",
"self",
",",
"increment",
"=",
"1",
")",
":",
"self",
".",
"zoom_level",
"+=",
"increment",
"TextHelper",
"(",
"self",
")",
".",
"mark_whole_doc_dirty",
"(",
")",
"self",
".",
"_reset_stylesheet",
"(",
")"
] | Zooms in the editor (makes the font bigger).
:param increment: zoom level increment. Default is 1. | [
"Zooms",
"in",
"the",
"editor",
"(",
"makes",
"the",
"font",
"bigger",
")",
"."
] | python | train |
etcher-be/emiz | emiz/avwx/core.py | https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/core.py#L795-L824 | def get_flight_rules(vis: Number, ceiling: Cloud) -> int:
"""
Returns int based on current flight rules from parsed METAR data
0=VFR, 1=MVFR, 2=IFR, 3=LIFR
Note: Common practice is to report IFR if visibility unavailable
"""
# Parse visibility
if not vis:
return 2
if vis.repr == 'CAVOK' or vis.repr.startswith('P6'):
vis = 10 # type: ignore
elif vis.repr.startswith('M'):
vis = 0 # type: ignore
# Convert meters to miles
elif len(vis.repr) == 4:
vis = vis.value * 0.000621371 # type: ignore
else:
vis = vis.value # type: ignore
# Parse ceiling
cld = ceiling.altitude if ceiling else 99
# Determine flight rules
if (vis <= 5) or (cld <= 30): # type: ignore
if (vis < 3) or (cld < 10): # type: ignore
if (vis < 1) or (cld < 5): # type: ignore
return 3 # LIFR
return 2 # IFR
return 1 # MVFR
return 0 | [
"def",
"get_flight_rules",
"(",
"vis",
":",
"Number",
",",
"ceiling",
":",
"Cloud",
")",
"->",
"int",
":",
"# Parse visibility",
"if",
"not",
"vis",
":",
"return",
"2",
"if",
"vis",
".",
"repr",
"==",
"'CAVOK'",
"or",
"vis",
".",
"repr",
".",
"startswith",
"(",
"'P6'",
")",
":",
"vis",
"=",
"10",
"# type: ignore",
"elif",
"vis",
".",
"repr",
".",
"startswith",
"(",
"'M'",
")",
":",
"vis",
"=",
"0",
"# type: ignore",
"# Convert meters to miles",
"elif",
"len",
"(",
"vis",
".",
"repr",
")",
"==",
"4",
":",
"vis",
"=",
"vis",
".",
"value",
"*",
"0.000621371",
"# type: ignore",
"else",
":",
"vis",
"=",
"vis",
".",
"value",
"# type: ignore",
"# Parse ceiling",
"cld",
"=",
"ceiling",
".",
"altitude",
"if",
"ceiling",
"else",
"99",
"# Determine flight rules",
"if",
"(",
"vis",
"<=",
"5",
")",
"or",
"(",
"cld",
"<=",
"30",
")",
":",
"# type: ignore",
"if",
"(",
"vis",
"<",
"3",
")",
"or",
"(",
"cld",
"<",
"10",
")",
":",
"# type: ignore",
"if",
"(",
"vis",
"<",
"1",
")",
"or",
"(",
"cld",
"<",
"5",
")",
":",
"# type: ignore",
"return",
"3",
"# LIFR",
"return",
"2",
"# IFR",
"return",
"1",
"# MVFR",
"return",
"0"
] | Returns int based on current flight rules from parsed METAR data
0=VFR, 1=MVFR, 2=IFR, 3=LIFR
Note: Common practice is to report IFR if visibility unavailable | [
"Returns",
"int",
"based",
"on",
"current",
"flight",
"rules",
"from",
"parsed",
"METAR",
"data"
] | python | train |
klen/zeta-library | zetalibrary/scss/__init__.py | https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/scss/__init__.py#L2976-L3000 | def _inline_image(image, mime_type=None):
"""
Embeds the contents of a file directly inside your stylesheet, eliminating
the need for another HTTP request. For small files such images or fonts,
this can be a performance benefit at the cost of a larger generated CSS
file.
"""
file = StringValue(image).value
mime_type = StringValue(mime_type).value or mimetypes.guess_type(file)[0]
path = None
if callable(STATIC_ROOT):
try:
_file, _storage = list(STATIC_ROOT(file))[0]
path = _storage.open(_file)
except:
pass
else:
_path = os.path.join(STATIC_ROOT, file)
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
url = 'data:' + mime_type + ';base64,' + base64.b64encode(path.read())
url = url = '%s%s?_=%s' % (STATIC_URL, file, 'NA')
inline = 'url("%s")' % escape(url)
return StringValue(inline) | [
"def",
"_inline_image",
"(",
"image",
",",
"mime_type",
"=",
"None",
")",
":",
"file",
"=",
"StringValue",
"(",
"image",
")",
".",
"value",
"mime_type",
"=",
"StringValue",
"(",
"mime_type",
")",
".",
"value",
"or",
"mimetypes",
".",
"guess_type",
"(",
"file",
")",
"[",
"0",
"]",
"path",
"=",
"None",
"if",
"callable",
"(",
"STATIC_ROOT",
")",
":",
"try",
":",
"_file",
",",
"_storage",
"=",
"list",
"(",
"STATIC_ROOT",
"(",
"file",
")",
")",
"[",
"0",
"]",
"path",
"=",
"_storage",
".",
"open",
"(",
"_file",
")",
"except",
":",
"pass",
"else",
":",
"_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"STATIC_ROOT",
",",
"file",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"_path",
")",
":",
"path",
"=",
"open",
"(",
"_path",
",",
"'rb'",
")",
"if",
"path",
":",
"url",
"=",
"'data:'",
"+",
"mime_type",
"+",
"';base64,'",
"+",
"base64",
".",
"b64encode",
"(",
"path",
".",
"read",
"(",
")",
")",
"url",
"=",
"url",
"=",
"'%s%s?_=%s'",
"%",
"(",
"STATIC_URL",
",",
"file",
",",
"'NA'",
")",
"inline",
"=",
"'url(\"%s\")'",
"%",
"escape",
"(",
"url",
")",
"return",
"StringValue",
"(",
"inline",
")"
] | Embeds the contents of a file directly inside your stylesheet, eliminating
the need for another HTTP request. For small files such images or fonts,
this can be a performance benefit at the cost of a larger generated CSS
file. | [
"Embeds",
"the",
"contents",
"of",
"a",
"file",
"directly",
"inside",
"your",
"stylesheet",
"eliminating",
"the",
"need",
"for",
"another",
"HTTP",
"request",
".",
"For",
"small",
"files",
"such",
"images",
"or",
"fonts",
"this",
"can",
"be",
"a",
"performance",
"benefit",
"at",
"the",
"cost",
"of",
"a",
"larger",
"generated",
"CSS",
"file",
"."
] | python | train |
brocade/pynos | pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py | https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py#L433-L446 | def port_profile_qos_profile_qos_trust_trust_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
trust = ET.SubElement(qos, "trust")
trust_cos = ET.SubElement(trust, "trust-cos")
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"port_profile_qos_profile_qos_trust_trust_cos",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"port_profile",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"port-profile\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-port-profile\"",
")",
"name_key",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"name\"",
")",
"name_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'name'",
")",
"qos_profile",
"=",
"ET",
".",
"SubElement",
"(",
"port_profile",
",",
"\"qos-profile\"",
")",
"qos",
"=",
"ET",
".",
"SubElement",
"(",
"qos_profile",
",",
"\"qos\"",
")",
"trust",
"=",
"ET",
".",
"SubElement",
"(",
"qos",
",",
"\"trust\"",
")",
"trust_cos",
"=",
"ET",
".",
"SubElement",
"(",
"trust",
",",
"\"trust-cos\"",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | Auto Generated Code | [
"Auto",
"Generated",
"Code"
] | python | train |
NikolayDachev/jadm | lib/paramiko-1.14.1/paramiko/_winapi.py | https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/_winapi.py#L145-L152 | def read(self, n):
"""
Read n bytes from mapped view.
"""
out = ctypes.create_string_buffer(n)
ctypes.windll.kernel32.RtlMoveMemory(out, self.view + self.pos, n)
self.pos += n
return out.raw | [
"def",
"read",
"(",
"self",
",",
"n",
")",
":",
"out",
"=",
"ctypes",
".",
"create_string_buffer",
"(",
"n",
")",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"RtlMoveMemory",
"(",
"out",
",",
"self",
".",
"view",
"+",
"self",
".",
"pos",
",",
"n",
")",
"self",
".",
"pos",
"+=",
"n",
"return",
"out",
".",
"raw"
] | Read n bytes from mapped view. | [
"Read",
"n",
"bytes",
"from",
"mapped",
"view",
"."
] | python | train |
alephdata/memorious | memorious/logic/http.py | https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/http.py#L162-L185 | def fetch(self):
"""Lazily trigger download of the data when requested."""
if self._file_path is not None:
return self._file_path
temp_path = self.context.work_path
if self._content_hash is not None:
self._file_path = storage.load_file(self._content_hash,
temp_path=temp_path)
return self._file_path
if self.response is not None:
self._file_path = random_filename(temp_path)
content_hash = sha1()
with open(self._file_path, 'wb') as fh:
for chunk in self.response.iter_content(chunk_size=8192):
content_hash.update(chunk)
fh.write(chunk)
self._remove_file = True
chash = content_hash.hexdigest()
self._content_hash = storage.archive_file(self._file_path,
content_hash=chash)
if self.http.cache and self.ok:
self.context.set_tag(self.request_id, self.serialize())
self.retrieved_at = datetime.utcnow().isoformat()
return self._file_path | [
"def",
"fetch",
"(",
"self",
")",
":",
"if",
"self",
".",
"_file_path",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_file_path",
"temp_path",
"=",
"self",
".",
"context",
".",
"work_path",
"if",
"self",
".",
"_content_hash",
"is",
"not",
"None",
":",
"self",
".",
"_file_path",
"=",
"storage",
".",
"load_file",
"(",
"self",
".",
"_content_hash",
",",
"temp_path",
"=",
"temp_path",
")",
"return",
"self",
".",
"_file_path",
"if",
"self",
".",
"response",
"is",
"not",
"None",
":",
"self",
".",
"_file_path",
"=",
"random_filename",
"(",
"temp_path",
")",
"content_hash",
"=",
"sha1",
"(",
")",
"with",
"open",
"(",
"self",
".",
"_file_path",
",",
"'wb'",
")",
"as",
"fh",
":",
"for",
"chunk",
"in",
"self",
".",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"8192",
")",
":",
"content_hash",
".",
"update",
"(",
"chunk",
")",
"fh",
".",
"write",
"(",
"chunk",
")",
"self",
".",
"_remove_file",
"=",
"True",
"chash",
"=",
"content_hash",
".",
"hexdigest",
"(",
")",
"self",
".",
"_content_hash",
"=",
"storage",
".",
"archive_file",
"(",
"self",
".",
"_file_path",
",",
"content_hash",
"=",
"chash",
")",
"if",
"self",
".",
"http",
".",
"cache",
"and",
"self",
".",
"ok",
":",
"self",
".",
"context",
".",
"set_tag",
"(",
"self",
".",
"request_id",
",",
"self",
".",
"serialize",
"(",
")",
")",
"self",
".",
"retrieved_at",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"return",
"self",
".",
"_file_path"
] | Lazily trigger download of the data when requested. | [
"Lazily",
"trigger",
"download",
"of",
"the",
"data",
"when",
"requested",
"."
] | python | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/text_format.py#L1167-L1180 | def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
the_bytes = self.ConsumeByteString()
try:
return six.text_type(the_bytes, 'utf-8')
except UnicodeDecodeError as e:
raise self._StringParseError(e) | [
"def",
"ConsumeString",
"(",
"self",
")",
":",
"the_bytes",
"=",
"self",
".",
"ConsumeByteString",
"(",
")",
"try",
":",
"return",
"six",
".",
"text_type",
"(",
"the_bytes",
",",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
"as",
"e",
":",
"raise",
"self",
".",
"_StringParseError",
"(",
"e",
")"
] | Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed. | [
"Consumes",
"a",
"string",
"value",
"."
] | python | train |
ZELLMECHANIK-DRESDEN/dclab | dclab/parse_funcs.py | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/parse_funcs.py#L43-L52 | def fintlist(alist):
"""A list of integers"""
outlist = []
if not isinstance(alist, (list, tuple)):
# we have a string (comma-separated integers)
alist = alist.strip().strip("[] ").split(",")
for it in alist:
if it:
outlist.append(fint(it))
return outlist | [
"def",
"fintlist",
"(",
"alist",
")",
":",
"outlist",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"alist",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"# we have a string (comma-separated integers)",
"alist",
"=",
"alist",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"\"[] \"",
")",
".",
"split",
"(",
"\",\"",
")",
"for",
"it",
"in",
"alist",
":",
"if",
"it",
":",
"outlist",
".",
"append",
"(",
"fint",
"(",
"it",
")",
")",
"return",
"outlist"
] | A list of integers | [
"A",
"list",
"of",
"integers"
] | python | train |
apache/incubator-mxnet | python/mxnet/model.py | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L753-L802 | def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1] | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"eval_metric",
"=",
"'acc'",
",",
"num_batch",
"=",
"None",
",",
"batch_end_callback",
"=",
"None",
",",
"reset",
"=",
"True",
")",
":",
"# setup metric",
"if",
"not",
"isinstance",
"(",
"eval_metric",
",",
"metric",
".",
"EvalMetric",
")",
":",
"eval_metric",
"=",
"metric",
".",
"create",
"(",
"eval_metric",
")",
"X",
"=",
"self",
".",
"_init_iter",
"(",
"X",
",",
"None",
",",
"is_train",
"=",
"False",
")",
"if",
"reset",
":",
"X",
".",
"reset",
"(",
")",
"data_shapes",
"=",
"X",
".",
"provide_data",
"data_names",
"=",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"data_shapes",
"]",
"type_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"value",
".",
"dtype",
")",
"for",
"(",
"key",
",",
"value",
")",
"in",
"self",
".",
"arg_params",
".",
"items",
"(",
")",
")",
"for",
"x",
"in",
"X",
".",
"provide_data",
":",
"if",
"isinstance",
"(",
"x",
",",
"DataDesc",
")",
":",
"type_dict",
"[",
"x",
".",
"name",
"]",
"=",
"x",
".",
"dtype",
"else",
":",
"type_dict",
"[",
"x",
"[",
"0",
"]",
"]",
"=",
"mx_real_t",
"self",
".",
"_init_predictor",
"(",
"data_shapes",
",",
"type_dict",
")",
"data_arrays",
"=",
"[",
"self",
".",
"_pred_exec",
".",
"arg_dict",
"[",
"name",
"]",
"for",
"name",
"in",
"data_names",
"]",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"X",
")",
":",
"if",
"num_batch",
"is",
"not",
"None",
"and",
"i",
"==",
"num_batch",
":",
"break",
"_load_data",
"(",
"batch",
",",
"data_arrays",
")",
"self",
".",
"_pred_exec",
".",
"forward",
"(",
"is_train",
"=",
"False",
")",
"eval_metric",
".",
"update",
"(",
"batch",
".",
"label",
",",
"self",
".",
"_pred_exec",
".",
"outputs",
")",
"if",
"batch_end_callback",
"is",
"not",
"None",
":",
"batch_end_params",
"=",
"BatchEndParam",
"(",
"epoch",
"=",
"0",
",",
"nbatch",
"=",
"i",
",",
"eval_metric",
"=",
"eval_metric",
",",
"locals",
"=",
"locals",
"(",
")",
")",
"_multiple_callbacks",
"(",
"batch_end_callback",
",",
"batch_end_params",
")",
"return",
"eval_metric",
".",
"get",
"(",
")",
"[",
"1",
"]"
] | Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score. | [
"Run",
"the",
"model",
"given",
"an",
"input",
"and",
"calculate",
"the",
"score",
"as",
"assessed",
"by",
"an",
"evaluation",
"metric",
"."
] | python | train |
DataBiosphere/toil | src/toil/job.py | https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/job.py#L1364-L1379 | def _runner(self, jobGraph, jobStore, fileStore):
"""
This method actually runs the job, and serialises the next jobs.
:param class jobGraph: Instance of a jobGraph object
:param class jobStore: Instance of the job store
:param toil.fileStore.FileStore fileStore: Instance of a Cached on uncached
filestore
:return:
"""
# Make fileStore available as an attribute during run() ...
self._fileStore = fileStore
# ... but also pass it to run() as an argument for backwards compatibility.
returnValues = self._run(jobGraph, fileStore)
# Serialize the new jobs defined by the run method to the jobStore
self._serialiseExistingJob(jobGraph, jobStore, returnValues) | [
"def",
"_runner",
"(",
"self",
",",
"jobGraph",
",",
"jobStore",
",",
"fileStore",
")",
":",
"# Make fileStore available as an attribute during run() ...",
"self",
".",
"_fileStore",
"=",
"fileStore",
"# ... but also pass it to run() as an argument for backwards compatibility.",
"returnValues",
"=",
"self",
".",
"_run",
"(",
"jobGraph",
",",
"fileStore",
")",
"# Serialize the new jobs defined by the run method to the jobStore",
"self",
".",
"_serialiseExistingJob",
"(",
"jobGraph",
",",
"jobStore",
",",
"returnValues",
")"
] | This method actually runs the job, and serialises the next jobs.
:param class jobGraph: Instance of a jobGraph object
:param class jobStore: Instance of the job store
:param toil.fileStore.FileStore fileStore: Instance of a Cached on uncached
filestore
:return: | [
"This",
"method",
"actually",
"runs",
"the",
"job",
"and",
"serialises",
"the",
"next",
"jobs",
"."
] | python | train |
gmr/tinman | tinman/application.py | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/application.py#L124-L137 | def _prepare_paths(self):
"""Set the value of {{base}} in paths if the base path is set in the
configuration.
:raises: ValueError
"""
if config.BASE in self.paths:
for path in [path for path in self.paths if path != config.BASE]:
if config.BASE_VARIABLE in self.paths[path]:
self.paths[path] = \
self.paths[path].replace(config.BASE_VARIABLE,
self.paths[config.BASE])
LOGGER.debug('Prepared paths: %r', self.paths) | [
"def",
"_prepare_paths",
"(",
"self",
")",
":",
"if",
"config",
".",
"BASE",
"in",
"self",
".",
"paths",
":",
"for",
"path",
"in",
"[",
"path",
"for",
"path",
"in",
"self",
".",
"paths",
"if",
"path",
"!=",
"config",
".",
"BASE",
"]",
":",
"if",
"config",
".",
"BASE_VARIABLE",
"in",
"self",
".",
"paths",
"[",
"path",
"]",
":",
"self",
".",
"paths",
"[",
"path",
"]",
"=",
"self",
".",
"paths",
"[",
"path",
"]",
".",
"replace",
"(",
"config",
".",
"BASE_VARIABLE",
",",
"self",
".",
"paths",
"[",
"config",
".",
"BASE",
"]",
")",
"LOGGER",
".",
"debug",
"(",
"'Prepared paths: %r'",
",",
"self",
".",
"paths",
")"
] | Set the value of {{base}} in paths if the base path is set in the
configuration.
:raises: ValueError | [
"Set",
"the",
"value",
"of",
"{{",
"base",
"}}",
"in",
"paths",
"if",
"the",
"base",
"path",
"is",
"set",
"in",
"the",
"configuration",
"."
] | python | train |
inasafe/inasafe | safe/gui/tools/wizard/step_fc90_analysis.py | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/wizard/step_fc90_analysis.py#L312-L319 | def hide_busy(self):
"""Unlock buttons A helper function to indicate processing is done."""
self.progress_bar.hide()
self.parent.pbnNext.setEnabled(True)
self.parent.pbnBack.setEnabled(True)
self.parent.pbnCancel.setEnabled(True)
self.parent.repaint()
disable_busy_cursor() | [
"def",
"hide_busy",
"(",
"self",
")",
":",
"self",
".",
"progress_bar",
".",
"hide",
"(",
")",
"self",
".",
"parent",
".",
"pbnNext",
".",
"setEnabled",
"(",
"True",
")",
"self",
".",
"parent",
".",
"pbnBack",
".",
"setEnabled",
"(",
"True",
")",
"self",
".",
"parent",
".",
"pbnCancel",
".",
"setEnabled",
"(",
"True",
")",
"self",
".",
"parent",
".",
"repaint",
"(",
")",
"disable_busy_cursor",
"(",
")"
] | Unlock buttons A helper function to indicate processing is done. | [
"Unlock",
"buttons",
"A",
"helper",
"function",
"to",
"indicate",
"processing",
"is",
"done",
"."
] | python | train |
mitsei/dlkit | dlkit/json_/grading/sessions.py | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L1883-L1905 | def get_grade_entries_by_genus_type(self, grade_entry_genus_type):
"""Gets a ``GradeEntryList`` corresponding to the given grade entry genus ``Type`` which does not include grade entries of genus types derived from the specified ``Type``.
arg: grade_entry_genus_type (osid.type.Type): a grade entry
genus type
return: (osid.grading.GradeEntryList) - the returned
``GradeEntry`` list
raise: NullArgument - ``grade_entry_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(grade_entry_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.GradeEntryList(result, runtime=self._runtime, proxy=self._proxy) | [
"def",
"get_grade_entries_by_genus_type",
"(",
"self",
",",
"grade_entry_genus_type",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceLookupSession.get_resources_by_genus_type",
"# NOTE: This implementation currently ignores plenary view",
"collection",
"=",
"JSONClientValidated",
"(",
"'grading'",
",",
"collection",
"=",
"'GradeEntry'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"result",
"=",
"collection",
".",
"find",
"(",
"dict",
"(",
"{",
"'genusTypeId'",
":",
"str",
"(",
"grade_entry_genus_type",
")",
"}",
",",
"*",
"*",
"self",
".",
"_view_filter",
"(",
")",
")",
")",
".",
"sort",
"(",
"'_id'",
",",
"DESCENDING",
")",
"return",
"objects",
".",
"GradeEntryList",
"(",
"result",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] | Gets a ``GradeEntryList`` corresponding to the given grade entry genus ``Type`` which does not include grade entries of genus types derived from the specified ``Type``.
arg: grade_entry_genus_type (osid.type.Type): a grade entry
genus type
return: (osid.grading.GradeEntryList) - the returned
``GradeEntry`` list
raise: NullArgument - ``grade_entry_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | [
"Gets",
"a",
"GradeEntryList",
"corresponding",
"to",
"the",
"given",
"grade",
"entry",
"genus",
"Type",
"which",
"does",
"not",
"include",
"grade",
"entries",
"of",
"genus",
"types",
"derived",
"from",
"the",
"specified",
"Type",
"."
] | python | train |
pantsbuild/pants | src/python/pants/backend/jvm/subsystems/jvm_platform.py | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/subsystems/jvm_platform.py#L155-L175 | def parse_java_version(cls, version):
"""Parses the java version (given a string or Revision object).
Handles java version-isms, converting things like '7' -> '1.7' appropriately.
Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra
versioning information after the second number.
:param version: the input version, given as a string or Revision object.
:return: the parsed and cleaned version, suitable as a javac -source or -target argument.
:rtype: Revision
"""
conversion = {str(i): '1.{}'.format(i) for i in cls.SUPPORTED_CONVERSION_VERSIONS}
if str(version) in conversion:
return Revision.lenient(conversion[str(version)])
if not hasattr(version, 'components'):
version = Revision.lenient(version)
if len(version.components) <= 2:
return version
return Revision(*version.components[:2]) | [
"def",
"parse_java_version",
"(",
"cls",
",",
"version",
")",
":",
"conversion",
"=",
"{",
"str",
"(",
"i",
")",
":",
"'1.{}'",
".",
"format",
"(",
"i",
")",
"for",
"i",
"in",
"cls",
".",
"SUPPORTED_CONVERSION_VERSIONS",
"}",
"if",
"str",
"(",
"version",
")",
"in",
"conversion",
":",
"return",
"Revision",
".",
"lenient",
"(",
"conversion",
"[",
"str",
"(",
"version",
")",
"]",
")",
"if",
"not",
"hasattr",
"(",
"version",
",",
"'components'",
")",
":",
"version",
"=",
"Revision",
".",
"lenient",
"(",
"version",
")",
"if",
"len",
"(",
"version",
".",
"components",
")",
"<=",
"2",
":",
"return",
"version",
"return",
"Revision",
"(",
"*",
"version",
".",
"components",
"[",
":",
"2",
"]",
")"
] | Parses the java version (given a string or Revision object).
Handles java version-isms, converting things like '7' -> '1.7' appropriately.
Truncates input versions down to just the major and minor numbers (eg, 1.6), ignoring extra
versioning information after the second number.
:param version: the input version, given as a string or Revision object.
:return: the parsed and cleaned version, suitable as a javac -source or -target argument.
:rtype: Revision | [
"Parses",
"the",
"java",
"version",
"(",
"given",
"a",
"string",
"or",
"Revision",
"object",
")",
"."
] | python | train |
google/apitools | apitools/base/py/base_api.py | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/base_api.py#L689-L731 | def _RunMethod(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Call this method with request."""
if upload is not None and download is not None:
# TODO(craigcitro): This just involves refactoring the logic
# below into callbacks that we can pass around; in particular,
# the order should be that the upload gets the initial request,
# and then passes its reply to a download if one exists, and
# then that goes to ProcessResponse and is returned.
raise exceptions.NotYetImplementedError(
'Cannot yet use both upload and download at once')
http_request = self.PrepareHttpRequest(
method_config, request, global_params, upload, upload_config,
download)
# TODO(craigcitro): Make num_retries customizable on Transfer
# objects, and pass in self.__client.num_retries when initializing
# an upload or download.
if download is not None:
download.InitializeDownload(http_request, client=self.client)
return
http_response = None
if upload is not None:
http_response = upload.InitializeUpload(
http_request, client=self.client)
if http_response is None:
http = self.__client.http
if upload and upload.bytes_http:
http = upload.bytes_http
opts = {
'retries': self.__client.num_retries,
'max_retry_wait': self.__client.max_retry_wait,
}
if self.__client.check_response_func:
opts['check_response_func'] = self.__client.check_response_func
if self.__client.retry_func:
opts['retry_func'] = self.__client.retry_func
http_response = http_wrapper.MakeRequest(
http, http_request, **opts)
return self.ProcessHttpResponse(method_config, http_response, request) | [
"def",
"_RunMethod",
"(",
"self",
",",
"method_config",
",",
"request",
",",
"global_params",
"=",
"None",
",",
"upload",
"=",
"None",
",",
"upload_config",
"=",
"None",
",",
"download",
"=",
"None",
")",
":",
"if",
"upload",
"is",
"not",
"None",
"and",
"download",
"is",
"not",
"None",
":",
"# TODO(craigcitro): This just involves refactoring the logic",
"# below into callbacks that we can pass around; in particular,",
"# the order should be that the upload gets the initial request,",
"# and then passes its reply to a download if one exists, and",
"# then that goes to ProcessResponse and is returned.",
"raise",
"exceptions",
".",
"NotYetImplementedError",
"(",
"'Cannot yet use both upload and download at once'",
")",
"http_request",
"=",
"self",
".",
"PrepareHttpRequest",
"(",
"method_config",
",",
"request",
",",
"global_params",
",",
"upload",
",",
"upload_config",
",",
"download",
")",
"# TODO(craigcitro): Make num_retries customizable on Transfer",
"# objects, and pass in self.__client.num_retries when initializing",
"# an upload or download.",
"if",
"download",
"is",
"not",
"None",
":",
"download",
".",
"InitializeDownload",
"(",
"http_request",
",",
"client",
"=",
"self",
".",
"client",
")",
"return",
"http_response",
"=",
"None",
"if",
"upload",
"is",
"not",
"None",
":",
"http_response",
"=",
"upload",
".",
"InitializeUpload",
"(",
"http_request",
",",
"client",
"=",
"self",
".",
"client",
")",
"if",
"http_response",
"is",
"None",
":",
"http",
"=",
"self",
".",
"__client",
".",
"http",
"if",
"upload",
"and",
"upload",
".",
"bytes_http",
":",
"http",
"=",
"upload",
".",
"bytes_http",
"opts",
"=",
"{",
"'retries'",
":",
"self",
".",
"__client",
".",
"num_retries",
",",
"'max_retry_wait'",
":",
"self",
".",
"__client",
".",
"max_retry_wait",
",",
"}",
"if",
"self",
".",
"__client",
".",
"check_response_func",
":",
"opts",
"[",
"'check_response_func'",
"]",
"=",
"self",
".",
"__client",
".",
"check_response_func",
"if",
"self",
".",
"__client",
".",
"retry_func",
":",
"opts",
"[",
"'retry_func'",
"]",
"=",
"self",
".",
"__client",
".",
"retry_func",
"http_response",
"=",
"http_wrapper",
".",
"MakeRequest",
"(",
"http",
",",
"http_request",
",",
"*",
"*",
"opts",
")",
"return",
"self",
".",
"ProcessHttpResponse",
"(",
"method_config",
",",
"http_response",
",",
"request",
")"
] | Call this method with request. | [
"Call",
"this",
"method",
"with",
"request",
"."
] | python | train |
saltstack/salt | salt/utils/extend.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/extend.py#L45-L61 | def _get_template(path, option_key):
'''
Get the contents of a template file and provide it as a module type
:param path: path to the template.yml file
:type path: ``str``
:param option_key: The unique key of this template
:type option_key: ``str``
:returns: Details about the template
:rtype: ``tuple``
'''
with salt.utils.files.fopen(path, 'r') as template_f:
template = deserialize(template_f)
info = (option_key, template.get('description', ''), template)
return info | [
"def",
"_get_template",
"(",
"path",
",",
"option_key",
")",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"path",
",",
"'r'",
")",
"as",
"template_f",
":",
"template",
"=",
"deserialize",
"(",
"template_f",
")",
"info",
"=",
"(",
"option_key",
",",
"template",
".",
"get",
"(",
"'description'",
",",
"''",
")",
",",
"template",
")",
"return",
"info"
] | Get the contents of a template file and provide it as a module type
:param path: path to the template.yml file
:type path: ``str``
:param option_key: The unique key of this template
:type option_key: ``str``
:returns: Details about the template
:rtype: ``tuple`` | [
"Get",
"the",
"contents",
"of",
"a",
"template",
"file",
"and",
"provide",
"it",
"as",
"a",
"module",
"type"
] | python | train |
praekeltfoundation/seed-identity-store | identities/views.py | https://github.com/praekeltfoundation/seed-identity-store/blob/194e5756b5a74ebce9798c390de958cf5305b105/identities/views.py#L131-L210 | def get_queryset(self):
"""
This view should return a list of all the Identities
for the supplied query parameters. The query parameters
should be in the form:
{"address_type": "address"}
e.g.
{"msisdn": "+27123"}
{"email": "[email protected]"}
A special query paramater "include_inactive" can also be passed
as False to prevent returning identities for which the addresses
have been set to "inactive"
e.g.
{"include_inactive": False}
"""
query_params = list(self.request.query_params.keys())
# variable that stores criteria to filter identities by
filter_criteria = {}
# variable that stores a list of addresses that should be active
# if the special filter is passed in
exclude_if_address_inactive = []
# Determine from param "include_inactive" whether inactive identities
# should be included in the search results
if "include_inactive" in query_params:
if self.request.query_params["include_inactive"] in [
"False",
"false",
False,
]:
include_inactive = False
else:
include_inactive = True
else:
include_inactive = True # default to True
# Compile a list of criteria to filter the identities by, based on the
# query parameters
for filter in query_params:
if filter in ["include_inactive", "cursor"]:
# Don't add the cursor to the filter_criteria
pass
elif filter.startswith("details__addresses__"):
# Edit the query_param to evaluate the key instead of the value
# and add it to the filter_criteria
filter_criteria[filter + "__has_key"] = self.request.query_params[
filter
]
# Add the address to the list of addresses that should not
# be inactive (tuple e.g ("msisdn", "+27123"))
if include_inactive is False:
exclude_if_address_inactive.append(
(
filter.replace("details__addresses__", ""),
self.request.query_params[filter],
)
)
else:
# Add the normal params to the filter criteria
filter_criteria[filter] = self.request.query_params[filter]
identities = Identity.objects.filter(**filter_criteria)
if include_inactive is False:
# Check through all the identities and exclude ones where the
# addresses are inactive
for identity in identities:
for param in exclude_if_address_inactive:
q_key = identity.details["addresses"][param[0]][param[1]]
if "inactive" in q_key and q_key["inactive"] in [
True,
"True",
"true",
]: # noqa
identities = identities.exclude(id=identity.id)
return identities | [
"def",
"get_queryset",
"(",
"self",
")",
":",
"query_params",
"=",
"list",
"(",
"self",
".",
"request",
".",
"query_params",
".",
"keys",
"(",
")",
")",
"# variable that stores criteria to filter identities by",
"filter_criteria",
"=",
"{",
"}",
"# variable that stores a list of addresses that should be active",
"# if the special filter is passed in",
"exclude_if_address_inactive",
"=",
"[",
"]",
"# Determine from param \"include_inactive\" whether inactive identities",
"# should be included in the search results",
"if",
"\"include_inactive\"",
"in",
"query_params",
":",
"if",
"self",
".",
"request",
".",
"query_params",
"[",
"\"include_inactive\"",
"]",
"in",
"[",
"\"False\"",
",",
"\"false\"",
",",
"False",
",",
"]",
":",
"include_inactive",
"=",
"False",
"else",
":",
"include_inactive",
"=",
"True",
"else",
":",
"include_inactive",
"=",
"True",
"# default to True",
"# Compile a list of criteria to filter the identities by, based on the",
"# query parameters",
"for",
"filter",
"in",
"query_params",
":",
"if",
"filter",
"in",
"[",
"\"include_inactive\"",
",",
"\"cursor\"",
"]",
":",
"# Don't add the cursor to the filter_criteria",
"pass",
"elif",
"filter",
".",
"startswith",
"(",
"\"details__addresses__\"",
")",
":",
"# Edit the query_param to evaluate the key instead of the value",
"# and add it to the filter_criteria",
"filter_criteria",
"[",
"filter",
"+",
"\"__has_key\"",
"]",
"=",
"self",
".",
"request",
".",
"query_params",
"[",
"filter",
"]",
"# Add the address to the list of addresses that should not",
"# be inactive (tuple e.g (\"msisdn\", \"+27123\"))",
"if",
"include_inactive",
"is",
"False",
":",
"exclude_if_address_inactive",
".",
"append",
"(",
"(",
"filter",
".",
"replace",
"(",
"\"details__addresses__\"",
",",
"\"\"",
")",
",",
"self",
".",
"request",
".",
"query_params",
"[",
"filter",
"]",
",",
")",
")",
"else",
":",
"# Add the normal params to the filter criteria",
"filter_criteria",
"[",
"filter",
"]",
"=",
"self",
".",
"request",
".",
"query_params",
"[",
"filter",
"]",
"identities",
"=",
"Identity",
".",
"objects",
".",
"filter",
"(",
"*",
"*",
"filter_criteria",
")",
"if",
"include_inactive",
"is",
"False",
":",
"# Check through all the identities and exclude ones where the",
"# addresses are inactive",
"for",
"identity",
"in",
"identities",
":",
"for",
"param",
"in",
"exclude_if_address_inactive",
":",
"q_key",
"=",
"identity",
".",
"details",
"[",
"\"addresses\"",
"]",
"[",
"param",
"[",
"0",
"]",
"]",
"[",
"param",
"[",
"1",
"]",
"]",
"if",
"\"inactive\"",
"in",
"q_key",
"and",
"q_key",
"[",
"\"inactive\"",
"]",
"in",
"[",
"True",
",",
"\"True\"",
",",
"\"true\"",
",",
"]",
":",
"# noqa",
"identities",
"=",
"identities",
".",
"exclude",
"(",
"id",
"=",
"identity",
".",
"id",
")",
"return",
"identities"
] | This view should return a list of all the Identities
for the supplied query parameters. The query parameters
should be in the form:
{"address_type": "address"}
e.g.
{"msisdn": "+27123"}
{"email": "[email protected]"}
A special query paramater "include_inactive" can also be passed
as False to prevent returning identities for which the addresses
have been set to "inactive"
e.g.
{"include_inactive": False} | [
"This",
"view",
"should",
"return",
"a",
"list",
"of",
"all",
"the",
"Identities",
"for",
"the",
"supplied",
"query",
"parameters",
".",
"The",
"query",
"parameters",
"should",
"be",
"in",
"the",
"form",
":",
"{",
"address_type",
":",
"address",
"}",
"e",
".",
"g",
".",
"{",
"msisdn",
":",
"+",
"27123",
"}",
"{",
"email",
":",
"foo@bar",
".",
"com",
"}"
] | python | train |
resonai/ybt | yabt/caching.py | https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/caching.py#L177-L197 | def copy_artifact(src_path: str, artifact_hash: str, conf: Config):
"""Copy the artifact at `src_path` with hash `artifact_hash` to artifacts
cache dir.
If an artifact already exists at that location, it is assumed to be
identical (since it's based on hash), and the copy is skipped.
TODO: pruning policy to limit cache size.
"""
cache_dir = conf.get_artifacts_cache_dir()
if not isdir(cache_dir):
makedirs(cache_dir)
cached_artifact_path = join(cache_dir, artifact_hash)
if isfile(cached_artifact_path) or isdir(cached_artifact_path):
logger.debug('Skipping copy of existing cached artifact {} -> {}',
src_path, cached_artifact_path)
return
abs_src_path = join(conf.project_root, src_path)
logger.debug('Caching artifact {} under {}',
abs_src_path, cached_artifact_path)
shutil.copy(abs_src_path, cached_artifact_path) | [
"def",
"copy_artifact",
"(",
"src_path",
":",
"str",
",",
"artifact_hash",
":",
"str",
",",
"conf",
":",
"Config",
")",
":",
"cache_dir",
"=",
"conf",
".",
"get_artifacts_cache_dir",
"(",
")",
"if",
"not",
"isdir",
"(",
"cache_dir",
")",
":",
"makedirs",
"(",
"cache_dir",
")",
"cached_artifact_path",
"=",
"join",
"(",
"cache_dir",
",",
"artifact_hash",
")",
"if",
"isfile",
"(",
"cached_artifact_path",
")",
"or",
"isdir",
"(",
"cached_artifact_path",
")",
":",
"logger",
".",
"debug",
"(",
"'Skipping copy of existing cached artifact {} -> {}'",
",",
"src_path",
",",
"cached_artifact_path",
")",
"return",
"abs_src_path",
"=",
"join",
"(",
"conf",
".",
"project_root",
",",
"src_path",
")",
"logger",
".",
"debug",
"(",
"'Caching artifact {} under {}'",
",",
"abs_src_path",
",",
"cached_artifact_path",
")",
"shutil",
".",
"copy",
"(",
"abs_src_path",
",",
"cached_artifact_path",
")"
] | Copy the artifact at `src_path` with hash `artifact_hash` to artifacts
cache dir.
If an artifact already exists at that location, it is assumed to be
identical (since it's based on hash), and the copy is skipped.
TODO: pruning policy to limit cache size. | [
"Copy",
"the",
"artifact",
"at",
"src_path",
"with",
"hash",
"artifact_hash",
"to",
"artifacts",
"cache",
"dir",
"."
] | python | train |
mpapi/lazylights | lazylights.py | https://github.com/mpapi/lazylights/blob/536dbd3ce75c28b3545cf66f25fc72589488063f/lazylights.py#L140-L162 | def _retry(event, attempts, delay):
"""
An iterator of pairs of (attempt number, event set), checking whether
`event` is set up to `attempts` number of times, and delaying `delay`
seconds in between.
Terminates as soon as `event` is set, or until `attempts` have been made.
Intended to be used in a loop, as in:
for num, ok in _retry(event_to_wait_for, 10, 1.0):
do_async_thing_that_sets_event()
_log('tried %d time(s) to set event', num)
if not ok:
raise Exception('failed to set event')
"""
event.clear()
attempted = 0
while attempted < attempts and not event.is_set():
yield attempted, event.is_set()
if event.wait(delay):
break
yield attempted, event.is_set() | [
"def",
"_retry",
"(",
"event",
",",
"attempts",
",",
"delay",
")",
":",
"event",
".",
"clear",
"(",
")",
"attempted",
"=",
"0",
"while",
"attempted",
"<",
"attempts",
"and",
"not",
"event",
".",
"is_set",
"(",
")",
":",
"yield",
"attempted",
",",
"event",
".",
"is_set",
"(",
")",
"if",
"event",
".",
"wait",
"(",
"delay",
")",
":",
"break",
"yield",
"attempted",
",",
"event",
".",
"is_set",
"(",
")"
] | An iterator of pairs of (attempt number, event set), checking whether
`event` is set up to `attempts` number of times, and delaying `delay`
seconds in between.
Terminates as soon as `event` is set, or until `attempts` have been made.
Intended to be used in a loop, as in:
for num, ok in _retry(event_to_wait_for, 10, 1.0):
do_async_thing_that_sets_event()
_log('tried %d time(s) to set event', num)
if not ok:
raise Exception('failed to set event') | [
"An",
"iterator",
"of",
"pairs",
"of",
"(",
"attempt",
"number",
"event",
"set",
")",
"checking",
"whether",
"event",
"is",
"set",
"up",
"to",
"attempts",
"number",
"of",
"times",
"and",
"delaying",
"delay",
"seconds",
"in",
"between",
"."
] | python | train |
python-openxml/python-docx | docx/blkcntnr.py | https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/blkcntnr.py#L42-L51 | def add_table(self, rows, cols, width):
"""
Return a table of *width* having *rows* rows and *cols* columns,
newly appended to the content in this container. *width* is evenly
distributed between the table columns.
"""
from .table import Table
tbl = CT_Tbl.new_tbl(rows, cols, width)
self._element._insert_tbl(tbl)
return Table(tbl, self) | [
"def",
"add_table",
"(",
"self",
",",
"rows",
",",
"cols",
",",
"width",
")",
":",
"from",
".",
"table",
"import",
"Table",
"tbl",
"=",
"CT_Tbl",
".",
"new_tbl",
"(",
"rows",
",",
"cols",
",",
"width",
")",
"self",
".",
"_element",
".",
"_insert_tbl",
"(",
"tbl",
")",
"return",
"Table",
"(",
"tbl",
",",
"self",
")"
] | Return a table of *width* having *rows* rows and *cols* columns,
newly appended to the content in this container. *width* is evenly
distributed between the table columns. | [
"Return",
"a",
"table",
"of",
"*",
"width",
"*",
"having",
"*",
"rows",
"*",
"rows",
"and",
"*",
"cols",
"*",
"columns",
"newly",
"appended",
"to",
"the",
"content",
"in",
"this",
"container",
".",
"*",
"width",
"*",
"is",
"evenly",
"distributed",
"between",
"the",
"table",
"columns",
"."
] | python | train |
twilio/twilio-python | twilio/rest/messaging/v1/service/alpha_sender.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/messaging/v1/service/alpha_sender.py#L137-L146 | def get(self, sid):
"""
Constructs a AlphaSenderContext
:param sid: The sid
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
"""
return AlphaSenderContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | [
"def",
"get",
"(",
"self",
",",
"sid",
")",
":",
"return",
"AlphaSenderContext",
"(",
"self",
".",
"_version",
",",
"service_sid",
"=",
"self",
".",
"_solution",
"[",
"'service_sid'",
"]",
",",
"sid",
"=",
"sid",
",",
")"
] | Constructs a AlphaSenderContext
:param sid: The sid
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderContext | [
"Constructs",
"a",
"AlphaSenderContext"
] | python | train |
denisenkom/pytds | src/pytds/tds.py | https://github.com/denisenkom/pytds/blob/7d875cab29134afdef719406831c1c6a0d7af48a/src/pytds/tds.py#L947-L960 | def _convert_params(self, parameters):
""" Converts a dict of list of parameters into a list of :class:`Column` instances.
:param parameters: Can be a list of parameter values, or a dict of parameter names to values.
:return: A list of :class:`Column` instances.
"""
if isinstance(parameters, dict):
return [self.make_param(name, value)
for name, value in parameters.items()]
else:
params = []
for parameter in parameters:
params.append(self.make_param('', parameter))
return params | [
"def",
"_convert_params",
"(",
"self",
",",
"parameters",
")",
":",
"if",
"isinstance",
"(",
"parameters",
",",
"dict",
")",
":",
"return",
"[",
"self",
".",
"make_param",
"(",
"name",
",",
"value",
")",
"for",
"name",
",",
"value",
"in",
"parameters",
".",
"items",
"(",
")",
"]",
"else",
":",
"params",
"=",
"[",
"]",
"for",
"parameter",
"in",
"parameters",
":",
"params",
".",
"append",
"(",
"self",
".",
"make_param",
"(",
"''",
",",
"parameter",
")",
")",
"return",
"params"
] | Converts a dict of list of parameters into a list of :class:`Column` instances.
:param parameters: Can be a list of parameter values, or a dict of parameter names to values.
:return: A list of :class:`Column` instances. | [
"Converts",
"a",
"dict",
"of",
"list",
"of",
"parameters",
"into",
"a",
"list",
"of",
":",
"class",
":",
"Column",
"instances",
"."
] | python | train |
sony/nnabla | python/src/nnabla/parameter.py | https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/parameter.py#L149-L167 | def pop_parameter(key):
'''Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None.
'''
names = key.split('/')
if len(names) > 1:
with parameter_scope(names[0]):
return pop_parameter('/'.join(names[1:]))
global current_scope
param = current_scope.get(key, None)
if param is not None:
del current_scope[key]
return param | [
"def",
"pop_parameter",
"(",
"key",
")",
":",
"names",
"=",
"key",
".",
"split",
"(",
"'/'",
")",
"if",
"len",
"(",
"names",
")",
">",
"1",
":",
"with",
"parameter_scope",
"(",
"names",
"[",
"0",
"]",
")",
":",
"return",
"pop_parameter",
"(",
"'/'",
".",
"join",
"(",
"names",
"[",
"1",
":",
"]",
")",
")",
"global",
"current_scope",
"param",
"=",
"current_scope",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"param",
"is",
"not",
"None",
":",
"del",
"current_scope",
"[",
"key",
"]",
"return",
"param"
] | Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None. | [
"Remove",
"and",
"get",
"parameter",
"by",
"key",
"."
] | python | train |
harlowja/failure | failure/finders.py | https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/finders.py#L100-L110 | def combine_or(matcher, *more_matchers):
"""Combines more than one matcher together (first that matches wins)."""
def matcher(cause):
for sub_matcher in itertools.chain([matcher], more_matchers):
cause_cls = sub_matcher(cause)
if cause_cls is not None:
return cause_cls
return None
return matcher | [
"def",
"combine_or",
"(",
"matcher",
",",
"*",
"more_matchers",
")",
":",
"def",
"matcher",
"(",
"cause",
")",
":",
"for",
"sub_matcher",
"in",
"itertools",
".",
"chain",
"(",
"[",
"matcher",
"]",
",",
"more_matchers",
")",
":",
"cause_cls",
"=",
"sub_matcher",
"(",
"cause",
")",
"if",
"cause_cls",
"is",
"not",
"None",
":",
"return",
"cause_cls",
"return",
"None",
"return",
"matcher"
] | Combines more than one matcher together (first that matches wins). | [
"Combines",
"more",
"than",
"one",
"matcher",
"together",
"(",
"first",
"that",
"matches",
"wins",
")",
"."
] | python | train |
deepmind/sonnet | sonnet/python/modules/basic.py | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic.py#L868-L874 | def transpose(self, name=None):
"""Returns transpose batch reshape."""
if name is None:
name = self.module_name + "_transpose"
return BatchReshape(shape=lambda: self.input_shape,
preserve_dims=self._preserve_dims,
name=name) | [
"def",
"transpose",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"self",
".",
"module_name",
"+",
"\"_transpose\"",
"return",
"BatchReshape",
"(",
"shape",
"=",
"lambda",
":",
"self",
".",
"input_shape",
",",
"preserve_dims",
"=",
"self",
".",
"_preserve_dims",
",",
"name",
"=",
"name",
")"
] | Returns transpose batch reshape. | [
"Returns",
"transpose",
"batch",
"reshape",
"."
] | python | train |
pypa/pipenv | pipenv/vendor/pipdeptree.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pipdeptree.py#L460-L476 | def conflicting_deps(tree):
"""Returns dependencies which are not present or conflict with the
requirements of other packages.
e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed
:param tree: the requirements tree (dict)
:returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage
:rtype: dict
"""
conflicting = defaultdict(list)
for p, rs in tree.items():
for req in rs:
if req.is_conflicting():
conflicting[p].append(req)
return conflicting | [
"def",
"conflicting_deps",
"(",
"tree",
")",
":",
"conflicting",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"p",
",",
"rs",
"in",
"tree",
".",
"items",
"(",
")",
":",
"for",
"req",
"in",
"rs",
":",
"if",
"req",
".",
"is_conflicting",
"(",
")",
":",
"conflicting",
"[",
"p",
"]",
".",
"append",
"(",
"req",
")",
"return",
"conflicting"
] | Returns dependencies which are not present or conflict with the
requirements of other packages.
e.g. will warn if pkg1 requires pkg2==2.0 and pkg2==1.0 is installed
:param tree: the requirements tree (dict)
:returns: dict of DistPackage -> list of unsatisfied/unknown ReqPackage
:rtype: dict | [
"Returns",
"dependencies",
"which",
"are",
"not",
"present",
"or",
"conflict",
"with",
"the",
"requirements",
"of",
"other",
"packages",
"."
] | python | train |
Jammy2211/PyAutoLens | autolens/model/profiles/mass_profiles.py | https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/model/profiles/mass_profiles.py#L1438-L1450 | def deflections_from_grid(self, grid):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
eta = np.multiply(1. / self.scale_radius, self.grid_to_grid_radii(grid=grid))
deflection_r = np.multiply(4. * self.kappa_s * self.scale_radius, self.deflection_func_sph(eta))
return self.grid_to_grid_cartesian(grid, deflection_r) | [
"def",
"deflections_from_grid",
"(",
"self",
",",
"grid",
")",
":",
"eta",
"=",
"np",
".",
"multiply",
"(",
"1.",
"/",
"self",
".",
"scale_radius",
",",
"self",
".",
"grid_to_grid_radii",
"(",
"grid",
"=",
"grid",
")",
")",
"deflection_r",
"=",
"np",
".",
"multiply",
"(",
"4.",
"*",
"self",
".",
"kappa_s",
"*",
"self",
".",
"scale_radius",
",",
"self",
".",
"deflection_func_sph",
"(",
"eta",
")",
")",
"return",
"self",
".",
"grid_to_grid_cartesian",
"(",
"grid",
",",
"deflection_r",
")"
] | Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on. | [
"Calculate",
"the",
"deflection",
"angles",
"at",
"a",
"given",
"set",
"of",
"arc",
"-",
"second",
"gridded",
"coordinates",
"."
] | python | valid |
quantopian/zipline | zipline/data/hdf5_daily_bars.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/hdf5_daily_bars.py#L360-L391 | def compute_asset_lifetimes(frames):
"""
Parameters
----------
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row for
each date and a column for each sid, as passed to write().
Returns
-------
start_date_ixs : np.array[int64]
The index of the first date with non-nan values, for each sid.
end_date_ixs : np.array[int64]
The index of the last date with non-nan values, for each sid.
"""
# Build a 2D array (dates x sids), where an entry is True if all
# fields are nan for the given day and sid.
is_null_matrix = np.logical_and.reduce(
[frames[field].isnull().values for field in FIELDS],
)
if not is_null_matrix.size:
empty = np.array([], dtype='int64')
return empty, empty.copy()
# Offset of the first null from the start of the input.
start_date_ixs = is_null_matrix.argmin(axis=0)
# Offset of the last null from the **end** of the input.
end_offsets = is_null_matrix[::-1].argmin(axis=0)
# Offset of the last null from the start of the input
end_date_ixs = is_null_matrix.shape[0] - end_offsets - 1
return start_date_ixs, end_date_ixs | [
"def",
"compute_asset_lifetimes",
"(",
"frames",
")",
":",
"# Build a 2D array (dates x sids), where an entry is True if all",
"# fields are nan for the given day and sid.",
"is_null_matrix",
"=",
"np",
".",
"logical_and",
".",
"reduce",
"(",
"[",
"frames",
"[",
"field",
"]",
".",
"isnull",
"(",
")",
".",
"values",
"for",
"field",
"in",
"FIELDS",
"]",
",",
")",
"if",
"not",
"is_null_matrix",
".",
"size",
":",
"empty",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'int64'",
")",
"return",
"empty",
",",
"empty",
".",
"copy",
"(",
")",
"# Offset of the first null from the start of the input.",
"start_date_ixs",
"=",
"is_null_matrix",
".",
"argmin",
"(",
"axis",
"=",
"0",
")",
"# Offset of the last null from the **end** of the input.",
"end_offsets",
"=",
"is_null_matrix",
"[",
":",
":",
"-",
"1",
"]",
".",
"argmin",
"(",
"axis",
"=",
"0",
")",
"# Offset of the last null from the start of the input",
"end_date_ixs",
"=",
"is_null_matrix",
".",
"shape",
"[",
"0",
"]",
"-",
"end_offsets",
"-",
"1",
"return",
"start_date_ixs",
",",
"end_date_ixs"
] | Parameters
----------
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row for
each date and a column for each sid, as passed to write().
Returns
-------
start_date_ixs : np.array[int64]
The index of the first date with non-nan values, for each sid.
end_date_ixs : np.array[int64]
The index of the last date with non-nan values, for each sid. | [
"Parameters",
"----------",
"frames",
":",
"dict",
"[",
"str",
"pd",
".",
"DataFrame",
"]",
"A",
"dict",
"mapping",
"each",
"OHLCV",
"field",
"to",
"a",
"dataframe",
"with",
"a",
"row",
"for",
"each",
"date",
"and",
"a",
"column",
"for",
"each",
"sid",
"as",
"passed",
"to",
"write",
"()",
"."
] | python | train |
thavel/synolopy | synolopy/cgi.py | https://github.com/thavel/synolopy/blob/fdb23cdde693b13a59af9873f03b2afab35cb50e/synolopy/cgi.py#L80-L86 | def auth_required(self):
"""
If any ancestor required an authentication, this node needs it too.
"""
if self._auth:
return self._auth, self
return self.__parent__.auth_required() | [
"def",
"auth_required",
"(",
"self",
")",
":",
"if",
"self",
".",
"_auth",
":",
"return",
"self",
".",
"_auth",
",",
"self",
"return",
"self",
".",
"__parent__",
".",
"auth_required",
"(",
")"
] | If any ancestor required an authentication, this node needs it too. | [
"If",
"any",
"ancestor",
"required",
"an",
"authentication",
"this",
"node",
"needs",
"it",
"too",
"."
] | python | train |
Azure/azure-sdk-for-python | azure-servicebus/azure/servicebus/control_client/servicebusservice.py | https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/control_client/servicebusservice.py#L535-L568 | def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True | [
"def",
"create_subscription",
"(",
"self",
",",
"topic_name",
",",
"subscription_name",
",",
"subscription",
"=",
"None",
",",
"fail_on_exist",
"=",
"False",
")",
":",
"_validate_not_none",
"(",
"'topic_name'",
",",
"topic_name",
")",
"_validate_not_none",
"(",
"'subscription_name'",
",",
"subscription_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'PUT'",
"request",
".",
"host",
"=",
"self",
".",
"_get_host",
"(",
")",
"request",
".",
"path",
"=",
"'/'",
"+",
"_str",
"(",
"topic_name",
")",
"+",
"'/subscriptions/'",
"+",
"_str",
"(",
"subscription_name",
")",
"+",
"''",
"request",
".",
"body",
"=",
"_get_request_body",
"(",
"_convert_subscription_to_xml",
"(",
"subscription",
")",
")",
"request",
".",
"path",
",",
"request",
".",
"query",
"=",
"self",
".",
"_httpclient",
".",
"_update_request_uri_query",
"(",
"request",
")",
"# pylint: disable=protected-access",
"request",
".",
"headers",
"=",
"self",
".",
"_update_service_bus_header",
"(",
"request",
")",
"if",
"not",
"fail_on_exist",
":",
"try",
":",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"True",
"except",
"AzureHttpError",
"as",
"ex",
":",
"_dont_fail_on_exist",
"(",
"ex",
")",
"return",
"False",
"else",
":",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"True"
] | Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name:
Name of the topic.
subscription_name:
Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists. | [
"Creates",
"a",
"new",
"subscription",
".",
"Once",
"created",
"this",
"subscription",
"resource",
"manifest",
"is",
"immutable",
"."
] | python | test |
limpyd/redis-limpyd | limpyd/fields.py | https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/fields.py#L100-L115 | def _call_command(self, name, *args, **kwargs):
"""
Check if the command to be executed is a modifier, to connect the object.
Then call _traverse_command.
"""
obj = getattr(self, '_instance', self) # _instance if a field, self if an instance
# The object may not be already connected, so if we want to update a
# field, connect it before.
# If the object as no PK yet, let the object create itself
if name in self.available_modifiers and obj._pk and not obj.connected:
obj.connect()
# Give priority to a "_call_{commmand}" method
meth = getattr(self, '_call_%s' % name, self._traverse_command)
return meth(name, *args, **kwargs) | [
"def",
"_call_command",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"obj",
"=",
"getattr",
"(",
"self",
",",
"'_instance'",
",",
"self",
")",
"# _instance if a field, self if an instance",
"# The object may not be already connected, so if we want to update a",
"# field, connect it before.",
"# If the object as no PK yet, let the object create itself",
"if",
"name",
"in",
"self",
".",
"available_modifiers",
"and",
"obj",
".",
"_pk",
"and",
"not",
"obj",
".",
"connected",
":",
"obj",
".",
"connect",
"(",
")",
"# Give priority to a \"_call_{commmand}\" method",
"meth",
"=",
"getattr",
"(",
"self",
",",
"'_call_%s'",
"%",
"name",
",",
"self",
".",
"_traverse_command",
")",
"return",
"meth",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Check if the command to be executed is a modifier, to connect the object.
Then call _traverse_command. | [
"Check",
"if",
"the",
"command",
"to",
"be",
"executed",
"is",
"a",
"modifier",
"to",
"connect",
"the",
"object",
".",
"Then",
"call",
"_traverse_command",
"."
] | python | train |
StackStorm/pybind | pybind/slxos/v17s_1_02/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L6912-L6935 | def _set_mpls_state(self, v, load=False):
"""
Setter method for mpls_state, mapped from YANG variable /mpls_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_state() directly.
YANG Description: MPLS Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_state.mpls_state, is_container='container', presence=False, yang_name="mpls-state", rest_name="mpls-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-mpls', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mpls_state.mpls_state, is_container='container', presence=False, yang_name="mpls-state", rest_name="mpls-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-mpls', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=True)""",
})
self.__mpls_state = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_mpls_state",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"mpls_state",
".",
"mpls_state",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"mpls-state\"",
",",
"rest_name",
"=",
"\"mpls-state\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'callpoint'",
":",
"u'mpls-mpls'",
",",
"u'cli-suppress-show-path'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls-operational'",
",",
"defining_module",
"=",
"'brocade-mpls-operational'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"mpls_state must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=mpls_state.mpls_state, is_container='container', presence=False, yang_name=\"mpls-state\", rest_name=\"mpls-state\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-mpls', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__mpls_state",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for mpls_state, mapped from YANG variable /mpls_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_state() directly.
YANG Description: MPLS Operational Information | [
"Setter",
"method",
"for",
"mpls_state",
"mapped",
"from",
"YANG",
"variable",
"/",
"mpls_state",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_mpls_state",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_mpls_state",
"()",
"directly",
"."
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/flow/source.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/source.py#L317-L334 | def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(GetStorageValue, self).fix_config(options)
opt = "storage_name"
if opt not in options:
options[opt] = "unknown"
if opt not in self.help:
self.help[opt] = "The name of the storage value to retrieve (string)."
return options | [
"def",
"fix_config",
"(",
"self",
",",
"options",
")",
":",
"options",
"=",
"super",
"(",
"GetStorageValue",
",",
"self",
")",
".",
"fix_config",
"(",
"options",
")",
"opt",
"=",
"\"storage_name\"",
"if",
"opt",
"not",
"in",
"options",
":",
"options",
"[",
"opt",
"]",
"=",
"\"unknown\"",
"if",
"opt",
"not",
"in",
"self",
".",
"help",
":",
"self",
".",
"help",
"[",
"opt",
"]",
"=",
"\"The name of the storage value to retrieve (string).\"",
"return",
"options"
] | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | [
"Fixes",
"the",
"options",
"if",
"necessary",
".",
"I",
".",
"e",
".",
"it",
"adds",
"all",
"required",
"elements",
"to",
"the",
"dictionary",
"."
] | python | train |
newville/wxmplot | examples/tifffile.py | https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/examples/tifffile.py#L874-L989 | def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TIFFtag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(validate[value]
for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
self.strips_per_image = int(math.floor(float(self.image_length +
self.rows_per_strip - 1) / self.rows_per_strip))
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if self.is_stk:
planes = tags['mm_uic2'].count
# consolidate mm_uci tags
self.mm_uic_tags = Record(tags['mm_uic2'].value)
for key in ('mm_uic3', 'mm_uic4', 'mm_uic1'):
if key in tags:
self.mm_uic_tags.update(tags[key].value)
if self.planar_configuration == 'contig':
self._shape = (planes, 1, self.image_length,
self.image_width, self.samples_per_pixel)
self.shape = tuple(self._shape[i] for i in (0, 2, 3, 4))
self.axes = "PYXS"
else:
self._shape = (planes, self.samples_per_pixel,
self.image_length, self.image_width, 1)
self.shape = self._shape[:4]
self.axes = "PSYX"
elif self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
self._shape = (1, 1, self.image_length, self.image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
self.shape = (3, self.image_length, self.image_width)
self.axes = "SYX"
else:
# LSM and FluoView
self.shape = (self.image_length, self.image_width)
self.axes = "YX"
elif self.is_rgb or self.samples_per_pixel > 1:
if self.planar_configuration == 'contig':
self._shape = (1, 1, self.image_length, self.image_width,
self.samples_per_pixel)
self.shape = (self.image_length, self.image_width,
self.samples_per_pixel)
self.axes = "YXS"
else:
self._shape = (1, self.samples_per_pixel, self.image_length,
self.image_width, 1)
self.shape = self._shape[1:-1]
self.axes = "SYX"
if self.is_rgb and 'extra_samples' in self.tags:
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha'):
if self.planar_configuration == 'contig':
self.shape = self.shape[:2] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, self.image_length, self.image_width, 1)
self.shape = self._shape[2:4]
self.axes = "YX"
if not self.compression and not 'strip_byte_counts' in tags:
self.strip_byte_counts = numpy.prod(self.shape) * (
self.bits_per_sample // 8) | [
"def",
"_process_tags",
"(",
"self",
")",
":",
"tags",
"=",
"self",
".",
"tags",
"for",
"code",
",",
"(",
"name",
",",
"default",
",",
"dtype",
",",
"count",
",",
"validate",
")",
"in",
"TIFF_TAGS",
".",
"items",
"(",
")",
":",
"if",
"not",
"(",
"name",
"in",
"tags",
"or",
"default",
"is",
"None",
")",
":",
"tags",
"[",
"name",
"]",
"=",
"TIFFtag",
"(",
"code",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"count",
",",
"value",
"=",
"default",
",",
"name",
"=",
"name",
")",
"if",
"name",
"in",
"tags",
"and",
"validate",
":",
"try",
":",
"if",
"tags",
"[",
"name",
"]",
".",
"count",
"==",
"1",
":",
"setattr",
"(",
"self",
",",
"name",
",",
"validate",
"[",
"tags",
"[",
"name",
"]",
".",
"value",
"]",
")",
"else",
":",
"setattr",
"(",
"self",
",",
"name",
",",
"tuple",
"(",
"validate",
"[",
"value",
"]",
"for",
"value",
"in",
"tags",
"[",
"name",
"]",
".",
"value",
")",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"%s.value (%s) not supported\"",
"%",
"(",
"name",
",",
"tags",
"[",
"name",
"]",
".",
"value",
")",
")",
"tag",
"=",
"tags",
"[",
"'bits_per_sample'",
"]",
"if",
"tag",
".",
"count",
"==",
"1",
":",
"self",
".",
"bits_per_sample",
"=",
"tag",
".",
"value",
"else",
":",
"value",
"=",
"tag",
".",
"value",
"[",
":",
"self",
".",
"samples_per_pixel",
"]",
"if",
"any",
"(",
"(",
"v",
"-",
"value",
"[",
"0",
"]",
"for",
"v",
"in",
"value",
")",
")",
":",
"self",
".",
"bits_per_sample",
"=",
"value",
"else",
":",
"self",
".",
"bits_per_sample",
"=",
"value",
"[",
"0",
"]",
"tag",
"=",
"tags",
"[",
"'sample_format'",
"]",
"if",
"tag",
".",
"count",
"==",
"1",
":",
"self",
".",
"sample_format",
"=",
"TIFF_SAMPLE_FORMATS",
"[",
"tag",
".",
"value",
"]",
"else",
":",
"value",
"=",
"tag",
".",
"value",
"[",
":",
"self",
".",
"samples_per_pixel",
"]",
"if",
"any",
"(",
"(",
"v",
"-",
"value",
"[",
"0",
"]",
"for",
"v",
"in",
"value",
")",
")",
":",
"self",
".",
"sample_format",
"=",
"[",
"TIFF_SAMPLE_FORMATS",
"[",
"v",
"]",
"for",
"v",
"in",
"value",
"]",
"else",
":",
"self",
".",
"sample_format",
"=",
"TIFF_SAMPLE_FORMATS",
"[",
"value",
"[",
"0",
"]",
"]",
"self",
".",
"strips_per_image",
"=",
"int",
"(",
"math",
".",
"floor",
"(",
"float",
"(",
"self",
".",
"image_length",
"+",
"self",
".",
"rows_per_strip",
"-",
"1",
")",
"/",
"self",
".",
"rows_per_strip",
")",
")",
"key",
"=",
"(",
"self",
".",
"sample_format",
",",
"self",
".",
"bits_per_sample",
")",
"self",
".",
"dtype",
"=",
"self",
".",
"_dtype",
"=",
"TIFF_SAMPLE_DTYPES",
".",
"get",
"(",
"key",
",",
"None",
")",
"if",
"self",
".",
"is_stk",
":",
"planes",
"=",
"tags",
"[",
"'mm_uic2'",
"]",
".",
"count",
"# consolidate mm_uci tags",
"self",
".",
"mm_uic_tags",
"=",
"Record",
"(",
"tags",
"[",
"'mm_uic2'",
"]",
".",
"value",
")",
"for",
"key",
"in",
"(",
"'mm_uic3'",
",",
"'mm_uic4'",
",",
"'mm_uic1'",
")",
":",
"if",
"key",
"in",
"tags",
":",
"self",
".",
"mm_uic_tags",
".",
"update",
"(",
"tags",
"[",
"key",
"]",
".",
"value",
")",
"if",
"self",
".",
"planar_configuration",
"==",
"'contig'",
":",
"self",
".",
"_shape",
"=",
"(",
"planes",
",",
"1",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"self",
".",
"samples_per_pixel",
")",
"self",
".",
"shape",
"=",
"tuple",
"(",
"self",
".",
"_shape",
"[",
"i",
"]",
"for",
"i",
"in",
"(",
"0",
",",
"2",
",",
"3",
",",
"4",
")",
")",
"self",
".",
"axes",
"=",
"\"PYXS\"",
"else",
":",
"self",
".",
"_shape",
"=",
"(",
"planes",
",",
"self",
".",
"samples_per_pixel",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"1",
")",
"self",
".",
"shape",
"=",
"self",
".",
"_shape",
"[",
":",
"4",
"]",
"self",
".",
"axes",
"=",
"\"PSYX\"",
"elif",
"self",
".",
"is_palette",
":",
"self",
".",
"dtype",
"=",
"self",
".",
"tags",
"[",
"'color_map'",
"]",
".",
"dtype",
"[",
"1",
"]",
"self",
".",
"color_map",
"=",
"numpy",
".",
"array",
"(",
"self",
".",
"color_map",
",",
"self",
".",
"dtype",
")",
"dmax",
"=",
"self",
".",
"color_map",
".",
"max",
"(",
")",
"if",
"dmax",
"<",
"256",
":",
"self",
".",
"dtype",
"=",
"numpy",
".",
"uint8",
"self",
".",
"color_map",
"=",
"self",
".",
"color_map",
".",
"astype",
"(",
"self",
".",
"dtype",
")",
"#else:",
"# self.dtype = numpy.uint8",
"# self.color_map >>= 8",
"# self.color_map = self.color_map.astype(self.dtype)",
"self",
".",
"color_map",
".",
"shape",
"=",
"(",
"3",
",",
"-",
"1",
")",
"self",
".",
"_shape",
"=",
"(",
"1",
",",
"1",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"1",
")",
"if",
"self",
".",
"color_map",
".",
"shape",
"[",
"1",
"]",
">=",
"2",
"**",
"self",
".",
"bits_per_sample",
":",
"self",
".",
"shape",
"=",
"(",
"3",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
")",
"self",
".",
"axes",
"=",
"\"SYX\"",
"else",
":",
"# LSM and FluoView",
"self",
".",
"shape",
"=",
"(",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
")",
"self",
".",
"axes",
"=",
"\"YX\"",
"elif",
"self",
".",
"is_rgb",
"or",
"self",
".",
"samples_per_pixel",
">",
"1",
":",
"if",
"self",
".",
"planar_configuration",
"==",
"'contig'",
":",
"self",
".",
"_shape",
"=",
"(",
"1",
",",
"1",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"self",
".",
"samples_per_pixel",
")",
"self",
".",
"shape",
"=",
"(",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"self",
".",
"samples_per_pixel",
")",
"self",
".",
"axes",
"=",
"\"YXS\"",
"else",
":",
"self",
".",
"_shape",
"=",
"(",
"1",
",",
"self",
".",
"samples_per_pixel",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"1",
")",
"self",
".",
"shape",
"=",
"self",
".",
"_shape",
"[",
"1",
":",
"-",
"1",
"]",
"self",
".",
"axes",
"=",
"\"SYX\"",
"if",
"self",
".",
"is_rgb",
"and",
"'extra_samples'",
"in",
"self",
".",
"tags",
":",
"extra_samples",
"=",
"self",
".",
"extra_samples",
"if",
"self",
".",
"tags",
"[",
"'extra_samples'",
"]",
".",
"count",
"==",
"1",
":",
"extra_samples",
"=",
"(",
"extra_samples",
",",
")",
"for",
"exs",
"in",
"extra_samples",
":",
"if",
"exs",
"in",
"(",
"'unassalpha'",
",",
"'assocalpha'",
")",
":",
"if",
"self",
".",
"planar_configuration",
"==",
"'contig'",
":",
"self",
".",
"shape",
"=",
"self",
".",
"shape",
"[",
":",
"2",
"]",
"+",
"(",
"4",
",",
")",
"else",
":",
"self",
".",
"shape",
"=",
"(",
"4",
",",
")",
"+",
"self",
".",
"shape",
"[",
"1",
":",
"]",
"break",
"else",
":",
"self",
".",
"_shape",
"=",
"(",
"1",
",",
"1",
",",
"self",
".",
"image_length",
",",
"self",
".",
"image_width",
",",
"1",
")",
"self",
".",
"shape",
"=",
"self",
".",
"_shape",
"[",
"2",
":",
"4",
"]",
"self",
".",
"axes",
"=",
"\"YX\"",
"if",
"not",
"self",
".",
"compression",
"and",
"not",
"'strip_byte_counts'",
"in",
"tags",
":",
"self",
".",
"strip_byte_counts",
"=",
"numpy",
".",
"prod",
"(",
"self",
".",
"shape",
")",
"*",
"(",
"self",
".",
"bits_per_sample",
"//",
"8",
")"
] | Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported. | [
"Validate",
"standard",
"tags",
"and",
"initialize",
"attributes",
"."
] | python | train |
SmokinCaterpillar/pypet | pypet/naturalnaming.py | https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L981-L1071 | def _add_prefix(self, split_names, start_node, group_type_name):
"""Adds the correct sub branch prefix to a given name.
Usually the prefix is the full name of the parent node. In case items are added
directly to the trajectory the prefixes are chosen according to the matching subbranch.
For example, this could be 'parameters' for parameters or 'results.run_00000004' for
results added to the fifth single run.
:param split_names:
List of names of the new node (e.g. ``['mynewgroupA', 'mynewgroupB', 'myresult']``).
:param start_node:
Parent node under which the new node should be added.
:param group_type_name:
Type name of subbranch the item belongs to
(e.g. 'PARAMETER_GROUP', 'RESULT_GROUP' etc).
:return: The name with the added prefix.
"""
root = self._root_instance
# If the start node of our insertion is root or one below root
# we might need to add prefixes.
# In case of derived parameters and results we also need to add prefixes containing the
# subbranch and the current run in case of a single run.
# For instance, a prefix could be 'results.runs.run_00000007'.
prepend = []
if start_node.v_depth < 3 and not group_type_name == GROUP:
if start_node.v_depth == 0:
if group_type_name == DERIVED_PARAMETER_GROUP:
if split_names[0] == 'derived_parameters':
return split_names
else:
prepend += ['derived_parameters']
elif group_type_name == RESULT_GROUP:
if split_names[0] == 'results':
return split_names
else:
prepend += ['results']
elif group_type_name == CONFIG_GROUP:
if split_names[0] == 'config':
return split_names
else:
prepend += ['config']
elif group_type_name == PARAMETER_GROUP:
if split_names[0] == 'parameters':
return split_names[0]
else:
prepend += ['parameters']
else:
raise RuntimeError('Why are you here?')
# Check if we have to add a prefix containing the current run
if root._is_run and root._auto_run_prepend:
dummy = root.f_wildcard('$', -1)
crun = root.f_wildcard('$')
if any(name in root._run_information for name in split_names):
pass
elif any(name == dummy for name in split_names):
pass
elif (group_type_name == RESULT_GROUP or
group_type_name == DERIVED_PARAMETER_GROUP):
if start_node.v_depth == 0:
prepend += ['runs', crun]
elif start_node.v_depth == 1:
if len(split_names) == 1 and split_names[0] == 'runs':
return split_names
else:
prepend += ['runs', crun]
elif start_node.v_depth == 2 and start_node.v_name == 'runs':
prepend += [crun]
if prepend:
split_names = prepend + split_names
return split_names | [
"def",
"_add_prefix",
"(",
"self",
",",
"split_names",
",",
"start_node",
",",
"group_type_name",
")",
":",
"root",
"=",
"self",
".",
"_root_instance",
"# If the start node of our insertion is root or one below root",
"# we might need to add prefixes.",
"# In case of derived parameters and results we also need to add prefixes containing the",
"# subbranch and the current run in case of a single run.",
"# For instance, a prefix could be 'results.runs.run_00000007'.",
"prepend",
"=",
"[",
"]",
"if",
"start_node",
".",
"v_depth",
"<",
"3",
"and",
"not",
"group_type_name",
"==",
"GROUP",
":",
"if",
"start_node",
".",
"v_depth",
"==",
"0",
":",
"if",
"group_type_name",
"==",
"DERIVED_PARAMETER_GROUP",
":",
"if",
"split_names",
"[",
"0",
"]",
"==",
"'derived_parameters'",
":",
"return",
"split_names",
"else",
":",
"prepend",
"+=",
"[",
"'derived_parameters'",
"]",
"elif",
"group_type_name",
"==",
"RESULT_GROUP",
":",
"if",
"split_names",
"[",
"0",
"]",
"==",
"'results'",
":",
"return",
"split_names",
"else",
":",
"prepend",
"+=",
"[",
"'results'",
"]",
"elif",
"group_type_name",
"==",
"CONFIG_GROUP",
":",
"if",
"split_names",
"[",
"0",
"]",
"==",
"'config'",
":",
"return",
"split_names",
"else",
":",
"prepend",
"+=",
"[",
"'config'",
"]",
"elif",
"group_type_name",
"==",
"PARAMETER_GROUP",
":",
"if",
"split_names",
"[",
"0",
"]",
"==",
"'parameters'",
":",
"return",
"split_names",
"[",
"0",
"]",
"else",
":",
"prepend",
"+=",
"[",
"'parameters'",
"]",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Why are you here?'",
")",
"# Check if we have to add a prefix containing the current run",
"if",
"root",
".",
"_is_run",
"and",
"root",
".",
"_auto_run_prepend",
":",
"dummy",
"=",
"root",
".",
"f_wildcard",
"(",
"'$'",
",",
"-",
"1",
")",
"crun",
"=",
"root",
".",
"f_wildcard",
"(",
"'$'",
")",
"if",
"any",
"(",
"name",
"in",
"root",
".",
"_run_information",
"for",
"name",
"in",
"split_names",
")",
":",
"pass",
"elif",
"any",
"(",
"name",
"==",
"dummy",
"for",
"name",
"in",
"split_names",
")",
":",
"pass",
"elif",
"(",
"group_type_name",
"==",
"RESULT_GROUP",
"or",
"group_type_name",
"==",
"DERIVED_PARAMETER_GROUP",
")",
":",
"if",
"start_node",
".",
"v_depth",
"==",
"0",
":",
"prepend",
"+=",
"[",
"'runs'",
",",
"crun",
"]",
"elif",
"start_node",
".",
"v_depth",
"==",
"1",
":",
"if",
"len",
"(",
"split_names",
")",
"==",
"1",
"and",
"split_names",
"[",
"0",
"]",
"==",
"'runs'",
":",
"return",
"split_names",
"else",
":",
"prepend",
"+=",
"[",
"'runs'",
",",
"crun",
"]",
"elif",
"start_node",
".",
"v_depth",
"==",
"2",
"and",
"start_node",
".",
"v_name",
"==",
"'runs'",
":",
"prepend",
"+=",
"[",
"crun",
"]",
"if",
"prepend",
":",
"split_names",
"=",
"prepend",
"+",
"split_names",
"return",
"split_names"
] | Adds the correct sub branch prefix to a given name.
Usually the prefix is the full name of the parent node. In case items are added
directly to the trajectory the prefixes are chosen according to the matching subbranch.
For example, this could be 'parameters' for parameters or 'results.run_00000004' for
results added to the fifth single run.
:param split_names:
List of names of the new node (e.g. ``['mynewgroupA', 'mynewgroupB', 'myresult']``).
:param start_node:
Parent node under which the new node should be added.
:param group_type_name:
Type name of subbranch the item belongs to
(e.g. 'PARAMETER_GROUP', 'RESULT_GROUP' etc).
:return: The name with the added prefix. | [
"Adds",
"the",
"correct",
"sub",
"branch",
"prefix",
"to",
"a",
"given",
"name",
"."
] | python | test |
takluyver/entrypoints | entrypoints.py | https://github.com/takluyver/entrypoints/blob/d7db29fd6136f86498d8c374f531d4c198d66bf6/entrypoints.py#L90-L106 | def from_string(cls, epstr, name, distro=None):
"""Parse an entry point from the syntax in entry_points.txt
:param str epstr: The entry point string (not including 'name =')
:param str name: The name of this entry point
:param Distribution distro: The distribution in which the entry point was found
:rtype: EntryPoint
:raises BadEntryPoint: if *epstr* can't be parsed as an entry point.
"""
m = entry_point_pattern.match(epstr)
if m:
mod, obj, extras = m.group('modulename', 'objectname', 'extras')
if extras is not None:
extras = re.split(r',\s*', extras)
return cls(name, mod, obj, extras, distro)
else:
raise BadEntryPoint(epstr) | [
"def",
"from_string",
"(",
"cls",
",",
"epstr",
",",
"name",
",",
"distro",
"=",
"None",
")",
":",
"m",
"=",
"entry_point_pattern",
".",
"match",
"(",
"epstr",
")",
"if",
"m",
":",
"mod",
",",
"obj",
",",
"extras",
"=",
"m",
".",
"group",
"(",
"'modulename'",
",",
"'objectname'",
",",
"'extras'",
")",
"if",
"extras",
"is",
"not",
"None",
":",
"extras",
"=",
"re",
".",
"split",
"(",
"r',\\s*'",
",",
"extras",
")",
"return",
"cls",
"(",
"name",
",",
"mod",
",",
"obj",
",",
"extras",
",",
"distro",
")",
"else",
":",
"raise",
"BadEntryPoint",
"(",
"epstr",
")"
] | Parse an entry point from the syntax in entry_points.txt
:param str epstr: The entry point string (not including 'name =')
:param str name: The name of this entry point
:param Distribution distro: The distribution in which the entry point was found
:rtype: EntryPoint
:raises BadEntryPoint: if *epstr* can't be parsed as an entry point. | [
"Parse",
"an",
"entry",
"point",
"from",
"the",
"syntax",
"in",
"entry_points",
".",
"txt"
] | python | test |
spulec/moto | moto/ssm/models.py | https://github.com/spulec/moto/blob/4a286c4bc288933bb023396e2784a6fdbb966bc9/moto/ssm/models.py#L355-L370 | def list_commands(self, **kwargs):
"""
https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_ListCommands.html
"""
commands = self._commands
command_id = kwargs.get('CommandId', None)
if command_id:
commands = [self.get_command_by_id(command_id)]
instance_id = kwargs.get('InstanceId', None)
if instance_id:
commands = self.get_commands_by_instance_id(instance_id)
return {
'Commands': [command.response_object() for command in commands]
} | [
"def",
"list_commands",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"commands",
"=",
"self",
".",
"_commands",
"command_id",
"=",
"kwargs",
".",
"get",
"(",
"'CommandId'",
",",
"None",
")",
"if",
"command_id",
":",
"commands",
"=",
"[",
"self",
".",
"get_command_by_id",
"(",
"command_id",
")",
"]",
"instance_id",
"=",
"kwargs",
".",
"get",
"(",
"'InstanceId'",
",",
"None",
")",
"if",
"instance_id",
":",
"commands",
"=",
"self",
".",
"get_commands_by_instance_id",
"(",
"instance_id",
")",
"return",
"{",
"'Commands'",
":",
"[",
"command",
".",
"response_object",
"(",
")",
"for",
"command",
"in",
"commands",
"]",
"}"
] | https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_ListCommands.html | [
"https",
":",
"//",
"docs",
".",
"aws",
".",
"amazon",
".",
"com",
"/",
"systems",
"-",
"manager",
"/",
"latest",
"/",
"APIReference",
"/",
"API_ListCommands",
".",
"html"
] | python | train |
GNS3/gns3-server | gns3server/controller/link.py | https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/controller/link.py#L339-L345 | def stop_capture(self):
"""
Stop capture on the link
"""
self._capturing = False
self._project.controller.notification.emit("link.updated", self.__json__()) | [
"def",
"stop_capture",
"(",
"self",
")",
":",
"self",
".",
"_capturing",
"=",
"False",
"self",
".",
"_project",
".",
"controller",
".",
"notification",
".",
"emit",
"(",
"\"link.updated\"",
",",
"self",
".",
"__json__",
"(",
")",
")"
] | Stop capture on the link | [
"Stop",
"capture",
"on",
"the",
"link"
] | python | train |
SpriteLink/NIPAP | nipap-www/nipapwww/controllers/error.py | https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/error.py#L22-L57 | def document(self):
"""Render the error document"""
request = self._py_object.request
resp = request.environ.get('pylons.original_response')
content = literal(resp.body) or cgi.escape(request.GET.get('message', ''))
page = """<!DOCTYPE html>
<html lang="en">
<head>
<title>NIPAP error</title>
<meta charset="utf-8">
<link rel="stylesheet" href="/nipap.css">
</head>
<body>
<div class="top_menu">
<div class="menu_entry" style="line-height: 0px">
<div class="menu_entry" style="font-size: 10pt; color: #CCCCCC; padding-top: 11px; font-weight: bold;">
NIPAP ERROR
</div>
</div>
</div>
<div class="content_outer">
<div class="content_inner">
<p>%s</p>
<p>Relevant information has been forwarded to the system administrator.</p>
</div>
<div style="height: 500px;"> </div>
</div>
</body>
</html>""" % content
# If the error was raised from the XhrController, return HTML-less response
if type(request.environ['pylons.original_request'].environ.get('pylons.controller')) == XhrController:
return content
else:
return page | [
"def",
"document",
"(",
"self",
")",
":",
"request",
"=",
"self",
".",
"_py_object",
".",
"request",
"resp",
"=",
"request",
".",
"environ",
".",
"get",
"(",
"'pylons.original_response'",
")",
"content",
"=",
"literal",
"(",
"resp",
".",
"body",
")",
"or",
"cgi",
".",
"escape",
"(",
"request",
".",
"GET",
".",
"get",
"(",
"'message'",
",",
"''",
")",
")",
"page",
"=",
"\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <title>NIPAP error</title>\n <meta charset=\"utf-8\">\n <link rel=\"stylesheet\" href=\"/nipap.css\">\n </head>\n <body>\n <div class=\"top_menu\">\n <div class=\"menu_entry\" style=\"line-height: 0px\">\n <div class=\"menu_entry\" style=\"font-size: 10pt; color: #CCCCCC; padding-top: 11px; font-weight: bold;\">\n NIPAP ERROR\n </div>\n </div>\n </div>\n <div class=\"content_outer\">\n <div class=\"content_inner\">\n <p>%s</p>\n <p>Relevant information has been forwarded to the system administrator.</p>\n </div>\n\t\t\t<div style=\"height: 500px;\"> </div>\n </div>\n </body>\n</html>\"\"\"",
"%",
"content",
"# If the error was raised from the XhrController, return HTML-less response",
"if",
"type",
"(",
"request",
".",
"environ",
"[",
"'pylons.original_request'",
"]",
".",
"environ",
".",
"get",
"(",
"'pylons.controller'",
")",
")",
"==",
"XhrController",
":",
"return",
"content",
"else",
":",
"return",
"page"
] | Render the error document | [
"Render",
"the",
"error",
"document"
] | python | train |
googledatalab/pydatalab | datalab/context/_project.py | https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/context/_project.py#L97-L107 | def get_default_id(credentials=None):
""" Get default project id.
Returns: the default project id if there is one, or None.
"""
project_id = _utils.get_project_id()
if project_id is None:
projects, _ = Projects(credentials)._retrieve_projects(None, 2)
if len(projects) == 1:
project_id = projects[0].id
return project_id | [
"def",
"get_default_id",
"(",
"credentials",
"=",
"None",
")",
":",
"project_id",
"=",
"_utils",
".",
"get_project_id",
"(",
")",
"if",
"project_id",
"is",
"None",
":",
"projects",
",",
"_",
"=",
"Projects",
"(",
"credentials",
")",
".",
"_retrieve_projects",
"(",
"None",
",",
"2",
")",
"if",
"len",
"(",
"projects",
")",
"==",
"1",
":",
"project_id",
"=",
"projects",
"[",
"0",
"]",
".",
"id",
"return",
"project_id"
] | Get default project id.
Returns: the default project id if there is one, or None. | [
"Get",
"default",
"project",
"id",
"."
] | python | train |
zibertscrem/hexdi | hexdi/__init__.py | https://github.com/zibertscrem/hexdi/blob/4875598299c53f984f2bb1b37060fd42bb7aba84/hexdi/__init__.py#L68-L76 | def bind_type(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype, lifetime_manager: hexdi.core.ltype):
"""
shortcut for bind_type on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object
:param lifetime_manager: type of lifetime manager for this binding
"""
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime_manager) | [
"def",
"bind_type",
"(",
"type_to_bind",
":",
"hexdi",
".",
"core",
".",
"restype",
",",
"accessor",
":",
"hexdi",
".",
"core",
".",
"clstype",
",",
"lifetime_manager",
":",
"hexdi",
".",
"core",
".",
"ltype",
")",
":",
"hexdi",
".",
"core",
".",
"get_root_container",
"(",
")",
".",
"bind_type",
"(",
"type_to_bind",
",",
"accessor",
",",
"lifetime_manager",
")"
] | shortcut for bind_type on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object
:param lifetime_manager: type of lifetime manager for this binding | [
"shortcut",
"for",
"bind_type",
"on",
"root",
"container"
] | python | train |
spyder-ide/spyder | spyder/plugins/variableexplorer/widgets/arrayeditor.py | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/arrayeditor.py#L573-L578 | def accept_changes(self):
"""Accept changes"""
for (i, j), value in list(self.model.changes.items()):
self.data[i, j] = value
if self.old_data_shape is not None:
self.data.shape = self.old_data_shape | [
"def",
"accept_changes",
"(",
"self",
")",
":",
"for",
"(",
"i",
",",
"j",
")",
",",
"value",
"in",
"list",
"(",
"self",
".",
"model",
".",
"changes",
".",
"items",
"(",
")",
")",
":",
"self",
".",
"data",
"[",
"i",
",",
"j",
"]",
"=",
"value",
"if",
"self",
".",
"old_data_shape",
"is",
"not",
"None",
":",
"self",
".",
"data",
".",
"shape",
"=",
"self",
".",
"old_data_shape"
] | Accept changes | [
"Accept",
"changes"
] | python | train |
numenta/htmresearch | htmresearch/frameworks/location/location_network_creation.py | https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/location_network_creation.py#L512-L555 | def learn(self, objects):
"""
Learns all provided objects
:param objects: dict mapping object name to array of sensations, where each
sensation is composed of location and feature SDR for each
column. For example:
{'obj1' : [[[1,1,1],[101,205,523, ..., 1021]],...], ...}
Note: Each column must have the same number of sensations as
the other columns.
:type objects: dict[str, array]
"""
self.setLearning(True)
for objectName, sensationList in objects.iteritems():
self.sendReset()
print "Learning :", objectName
prevLoc = [None] * self.numColumns
numFeatures = len(sensationList[0])
displacement = [0] * self.dimensions
for sensation in xrange(numFeatures):
for col in xrange(self.numColumns):
location = np.array(sensationList[col][sensation][0])
feature = sensationList[col][sensation][1]
# Compute displacement from previous location
if prevLoc[col] is not None:
displacement = location - prevLoc[col]
prevLoc[col] = location
# learn each pattern multiple times
for _ in xrange(self.repeat):
# Sense feature at location
self.motorInput[col].addDataToQueue(displacement)
self.sensorInput[col].addDataToQueue(feature, False, 0)
# Only move to the location on the first sensation.
displacement = [0] * self.dimensions
self.network.run(self.repeat * numFeatures)
# update L2 representations for the object
self.learnedObjects[objectName] = self.getL2Representations() | [
"def",
"learn",
"(",
"self",
",",
"objects",
")",
":",
"self",
".",
"setLearning",
"(",
"True",
")",
"for",
"objectName",
",",
"sensationList",
"in",
"objects",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"sendReset",
"(",
")",
"print",
"\"Learning :\"",
",",
"objectName",
"prevLoc",
"=",
"[",
"None",
"]",
"*",
"self",
".",
"numColumns",
"numFeatures",
"=",
"len",
"(",
"sensationList",
"[",
"0",
"]",
")",
"displacement",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"dimensions",
"for",
"sensation",
"in",
"xrange",
"(",
"numFeatures",
")",
":",
"for",
"col",
"in",
"xrange",
"(",
"self",
".",
"numColumns",
")",
":",
"location",
"=",
"np",
".",
"array",
"(",
"sensationList",
"[",
"col",
"]",
"[",
"sensation",
"]",
"[",
"0",
"]",
")",
"feature",
"=",
"sensationList",
"[",
"col",
"]",
"[",
"sensation",
"]",
"[",
"1",
"]",
"# Compute displacement from previous location",
"if",
"prevLoc",
"[",
"col",
"]",
"is",
"not",
"None",
":",
"displacement",
"=",
"location",
"-",
"prevLoc",
"[",
"col",
"]",
"prevLoc",
"[",
"col",
"]",
"=",
"location",
"# learn each pattern multiple times",
"for",
"_",
"in",
"xrange",
"(",
"self",
".",
"repeat",
")",
":",
"# Sense feature at location",
"self",
".",
"motorInput",
"[",
"col",
"]",
".",
"addDataToQueue",
"(",
"displacement",
")",
"self",
".",
"sensorInput",
"[",
"col",
"]",
".",
"addDataToQueue",
"(",
"feature",
",",
"False",
",",
"0",
")",
"# Only move to the location on the first sensation.",
"displacement",
"=",
"[",
"0",
"]",
"*",
"self",
".",
"dimensions",
"self",
".",
"network",
".",
"run",
"(",
"self",
".",
"repeat",
"*",
"numFeatures",
")",
"# update L2 representations for the object",
"self",
".",
"learnedObjects",
"[",
"objectName",
"]",
"=",
"self",
".",
"getL2Representations",
"(",
")"
] | Learns all provided objects
:param objects: dict mapping object name to array of sensations, where each
sensation is composed of location and feature SDR for each
column. For example:
{'obj1' : [[[1,1,1],[101,205,523, ..., 1021]],...], ...}
Note: Each column must have the same number of sensations as
the other columns.
:type objects: dict[str, array] | [
"Learns",
"all",
"provided",
"objects"
] | python | train |
qiniu/python-sdk | qiniu/auth.py | https://github.com/qiniu/python-sdk/blob/a69fbef4e3e6ea1ebe09f4610a5b18bb2c17de59/qiniu/auth.py#L230-L271 | def token_of_request(
self,
method,
host,
url,
qheaders,
content_type=None,
body=None):
"""
<Method> <PathWithRawQuery>
Host: <Host>
Content-Type: <ContentType>
[<X-Qiniu-*> Headers]
[<Body>] #这里的 <Body> 只有在 <ContentType> 存在且不为 application/octet-stream 时才签进去。
"""
parsed_url = urlparse(url)
netloc = parsed_url.netloc
path = parsed_url.path
query = parsed_url.query
if not host:
host = netloc
path_with_query = path
if query != '':
path_with_query = ''.join([path_with_query, '?', query])
data = ''.join(["%s %s" %
(method, path_with_query), "\n", "Host: %s" %
host, "\n"])
if content_type:
data += "Content-Type: %s" % (content_type) + "\n"
data += qheaders
data += "\n"
if content_type and content_type != "application/octet-stream" and body:
data += body.decode(encoding='UTF-8')
return '{0}:{1}'.format(self.__access_key, self.__token(data)) | [
"def",
"token_of_request",
"(",
"self",
",",
"method",
",",
"host",
",",
"url",
",",
"qheaders",
",",
"content_type",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"url",
")",
"netloc",
"=",
"parsed_url",
".",
"netloc",
"path",
"=",
"parsed_url",
".",
"path",
"query",
"=",
"parsed_url",
".",
"query",
"if",
"not",
"host",
":",
"host",
"=",
"netloc",
"path_with_query",
"=",
"path",
"if",
"query",
"!=",
"''",
":",
"path_with_query",
"=",
"''",
".",
"join",
"(",
"[",
"path_with_query",
",",
"'?'",
",",
"query",
"]",
")",
"data",
"=",
"''",
".",
"join",
"(",
"[",
"\"%s %s\"",
"%",
"(",
"method",
",",
"path_with_query",
")",
",",
"\"\\n\"",
",",
"\"Host: %s\"",
"%",
"host",
",",
"\"\\n\"",
"]",
")",
"if",
"content_type",
":",
"data",
"+=",
"\"Content-Type: %s\"",
"%",
"(",
"content_type",
")",
"+",
"\"\\n\"",
"data",
"+=",
"qheaders",
"data",
"+=",
"\"\\n\"",
"if",
"content_type",
"and",
"content_type",
"!=",
"\"application/octet-stream\"",
"and",
"body",
":",
"data",
"+=",
"body",
".",
"decode",
"(",
"encoding",
"=",
"'UTF-8'",
")",
"return",
"'{0}:{1}'",
".",
"format",
"(",
"self",
".",
"__access_key",
",",
"self",
".",
"__token",
"(",
"data",
")",
")"
] | <Method> <PathWithRawQuery>
Host: <Host>
Content-Type: <ContentType>
[<X-Qiniu-*> Headers]
[<Body>] #这里的 <Body> 只有在 <ContentType> 存在且不为 application/octet-stream 时才签进去。 | [
"<Method",
">",
"<PathWithRawQuery",
">",
"Host",
":",
"<Host",
">",
"Content",
"-",
"Type",
":",
"<ContentType",
">",
"[",
"<X",
"-",
"Qiniu",
"-",
"*",
">",
"Headers",
"]"
] | python | train |
lrq3000/pyFileFixity | pyFileFixity/lib/sortedcontainers/sortedlist.py | https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sortedlist.py#L1517-L1539 | def update(self, iterable):
"""Update the list by adding all elements from *iterable*."""
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
values = sorted(iterable, key=self._key)
if _maxes:
if len(values) * 4 >= self._len:
values.extend(chain.from_iterable(_lists))
values.sort(key=self._key)
self._clear()
else:
_add = self.add
for val in values:
_add(val)
return
_load, _index = self._load, self._index
_lists.extend(values[pos:(pos + _load)]
for pos in range(0, len(values), _load))
_keys.extend(list(map(self._key, _list)) for _list in _lists)
_maxes.extend(sublist[-1] for sublist in _keys)
self._len = len(values)
del _index[:] | [
"def",
"update",
"(",
"self",
",",
"iterable",
")",
":",
"_maxes",
",",
"_lists",
",",
"_keys",
"=",
"self",
".",
"_maxes",
",",
"self",
".",
"_lists",
",",
"self",
".",
"_keys",
"values",
"=",
"sorted",
"(",
"iterable",
",",
"key",
"=",
"self",
".",
"_key",
")",
"if",
"_maxes",
":",
"if",
"len",
"(",
"values",
")",
"*",
"4",
">=",
"self",
".",
"_len",
":",
"values",
".",
"extend",
"(",
"chain",
".",
"from_iterable",
"(",
"_lists",
")",
")",
"values",
".",
"sort",
"(",
"key",
"=",
"self",
".",
"_key",
")",
"self",
".",
"_clear",
"(",
")",
"else",
":",
"_add",
"=",
"self",
".",
"add",
"for",
"val",
"in",
"values",
":",
"_add",
"(",
"val",
")",
"return",
"_load",
",",
"_index",
"=",
"self",
".",
"_load",
",",
"self",
".",
"_index",
"_lists",
".",
"extend",
"(",
"values",
"[",
"pos",
":",
"(",
"pos",
"+",
"_load",
")",
"]",
"for",
"pos",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"values",
")",
",",
"_load",
")",
")",
"_keys",
".",
"extend",
"(",
"list",
"(",
"map",
"(",
"self",
".",
"_key",
",",
"_list",
")",
")",
"for",
"_list",
"in",
"_lists",
")",
"_maxes",
".",
"extend",
"(",
"sublist",
"[",
"-",
"1",
"]",
"for",
"sublist",
"in",
"_keys",
")",
"self",
".",
"_len",
"=",
"len",
"(",
"values",
")",
"del",
"_index",
"[",
":",
"]"
] | Update the list by adding all elements from *iterable*. | [
"Update",
"the",
"list",
"by",
"adding",
"all",
"elements",
"from",
"*",
"iterable",
"*",
"."
] | python | train |
merll/docker-map | dockermap/dep.py | https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/dep.py#L159-L181 | def merge_dependency(self, item, resolve_parent, parents):
"""
Merge dependencies of element with further dependencies. First parent dependencies are checked, and then
immediate dependencies of the current element should be added to the list, but without duplicating any entries.
:param item: Item.
:param resolve_parent: Function to resolve parent dependencies.
:type resolve_parent: function
:type parents: collections.Iterable
:return: List of recursively resolved dependencies of this container.
:rtype: list
:raise CircularDependency: If the current element depends on one found deeper in the hierarchy.
"""
dep = []
for parent_key in parents:
if item == parent_key:
raise CircularDependency(item, True)
parent_dep = resolve_parent(parent_key)
if item in parent_dep:
raise CircularDependency(item)
merge_list(dep, parent_dep)
merge_list(dep, parents)
return dep | [
"def",
"merge_dependency",
"(",
"self",
",",
"item",
",",
"resolve_parent",
",",
"parents",
")",
":",
"dep",
"=",
"[",
"]",
"for",
"parent_key",
"in",
"parents",
":",
"if",
"item",
"==",
"parent_key",
":",
"raise",
"CircularDependency",
"(",
"item",
",",
"True",
")",
"parent_dep",
"=",
"resolve_parent",
"(",
"parent_key",
")",
"if",
"item",
"in",
"parent_dep",
":",
"raise",
"CircularDependency",
"(",
"item",
")",
"merge_list",
"(",
"dep",
",",
"parent_dep",
")",
"merge_list",
"(",
"dep",
",",
"parents",
")",
"return",
"dep"
] | Merge dependencies of element with further dependencies. First parent dependencies are checked, and then
immediate dependencies of the current element should be added to the list, but without duplicating any entries.
:param item: Item.
:param resolve_parent: Function to resolve parent dependencies.
:type resolve_parent: function
:type parents: collections.Iterable
:return: List of recursively resolved dependencies of this container.
:rtype: list
:raise CircularDependency: If the current element depends on one found deeper in the hierarchy. | [
"Merge",
"dependencies",
"of",
"element",
"with",
"further",
"dependencies",
".",
"First",
"parent",
"dependencies",
"are",
"checked",
"and",
"then",
"immediate",
"dependencies",
"of",
"the",
"current",
"element",
"should",
"be",
"added",
"to",
"the",
"list",
"but",
"without",
"duplicating",
"any",
"entries",
"."
] | python | train |
maas/python-libmaas | maas/client/utils/diff.py | https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/diff.py#L25-L44 | def calculate_dict_diff(old_params: dict, new_params: dict):
"""Return the parameters based on the difference.
If a parameter exists in `old_params` but not in `new_params` then
parameter will be set to an empty string.
"""
# Ignore all None values as those cannot be saved.
old_params = remove_None(old_params)
new_params = remove_None(new_params)
params_diff = {}
for key, value in old_params.items():
if key in new_params:
if value != new_params[key]:
params_diff[key] = new_params[key]
else:
params_diff[key] = ''
for key, value in new_params.items():
if key not in old_params:
params_diff[key] = value
return params_diff | [
"def",
"calculate_dict_diff",
"(",
"old_params",
":",
"dict",
",",
"new_params",
":",
"dict",
")",
":",
"# Ignore all None values as those cannot be saved.",
"old_params",
"=",
"remove_None",
"(",
"old_params",
")",
"new_params",
"=",
"remove_None",
"(",
"new_params",
")",
"params_diff",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"old_params",
".",
"items",
"(",
")",
":",
"if",
"key",
"in",
"new_params",
":",
"if",
"value",
"!=",
"new_params",
"[",
"key",
"]",
":",
"params_diff",
"[",
"key",
"]",
"=",
"new_params",
"[",
"key",
"]",
"else",
":",
"params_diff",
"[",
"key",
"]",
"=",
"''",
"for",
"key",
",",
"value",
"in",
"new_params",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"old_params",
":",
"params_diff",
"[",
"key",
"]",
"=",
"value",
"return",
"params_diff"
] | Return the parameters based on the difference.
If a parameter exists in `old_params` but not in `new_params` then
parameter will be set to an empty string. | [
"Return",
"the",
"parameters",
"based",
"on",
"the",
"difference",
"."
] | python | train |
CQCL/pytket | pytket/cirq/cirq_convert.py | https://github.com/CQCL/pytket/blob/ae68f7402dcb5fb45221832cc6185d267bdd7a71/pytket/cirq/cirq_convert.py#L113-L157 | def tk_to_cirq(tkcirc: Circuit, indexed_qubits: List[QubitId]) -> cirq.Circuit:
"""Converts a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` object to a Cirq :py:class:`Circuit`.
:param tkcirc: The input :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit`
:param indexed_qubits: Map from :math:`\\mathrm{t|ket}\\rangle` qubit indices to Cirq :py:class:`QubitId` s
:return: The Cirq :py:class:`Circuit` corresponding to the input circuit
"""
grid = tkcirc._int_routing_grid()
qubits = _grid_to_qubits(grid)
oplst = []
slices = []
for s in grid:
news = set()
for pair in s:
if pair[0]>-1:
news.add(pair[0])
slices.append(news)
for s in slices:
for v in s:
op = tkcirc._unsigned_to_op(v)
optype = op.get_type()
if optype == OpType.Input or optype == OpType.Output:
continue
try:
gatetype = _ops2cirq_mapping[optype]
except KeyError as error:
raise NotImplementedError("Cannot convert tket Op to cirq gate: " + op.get_name()) from error
n_qubits = op.get_n_inputs()
qids = []
for i in range(n_qubits):
qbit = qubits[(v,i)]
qids.append(indexed_qubits[qbit])
params = op.get_params()
if gatetype in _rotation_types:
cirqop = gatetype(exponent=params[0])(*qids)
elif gatetype == cirq_common.MeasurementGate:
for q in qids:
cirqop = cirq_common.measure(q, key=op.get_desc())
else:
cirqop = gatetype(*qids)
oplst.append(cirqop)
return cirq.Circuit.from_ops(*oplst) | [
"def",
"tk_to_cirq",
"(",
"tkcirc",
":",
"Circuit",
",",
"indexed_qubits",
":",
"List",
"[",
"QubitId",
"]",
")",
"->",
"cirq",
".",
"Circuit",
":",
"grid",
"=",
"tkcirc",
".",
"_int_routing_grid",
"(",
")",
"qubits",
"=",
"_grid_to_qubits",
"(",
"grid",
")",
"oplst",
"=",
"[",
"]",
"slices",
"=",
"[",
"]",
"for",
"s",
"in",
"grid",
":",
"news",
"=",
"set",
"(",
")",
"for",
"pair",
"in",
"s",
":",
"if",
"pair",
"[",
"0",
"]",
">",
"-",
"1",
":",
"news",
".",
"add",
"(",
"pair",
"[",
"0",
"]",
")",
"slices",
".",
"append",
"(",
"news",
")",
"for",
"s",
"in",
"slices",
":",
"for",
"v",
"in",
"s",
":",
"op",
"=",
"tkcirc",
".",
"_unsigned_to_op",
"(",
"v",
")",
"optype",
"=",
"op",
".",
"get_type",
"(",
")",
"if",
"optype",
"==",
"OpType",
".",
"Input",
"or",
"optype",
"==",
"OpType",
".",
"Output",
":",
"continue",
"try",
":",
"gatetype",
"=",
"_ops2cirq_mapping",
"[",
"optype",
"]",
"except",
"KeyError",
"as",
"error",
":",
"raise",
"NotImplementedError",
"(",
"\"Cannot convert tket Op to cirq gate: \"",
"+",
"op",
".",
"get_name",
"(",
")",
")",
"from",
"error",
"n_qubits",
"=",
"op",
".",
"get_n_inputs",
"(",
")",
"qids",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_qubits",
")",
":",
"qbit",
"=",
"qubits",
"[",
"(",
"v",
",",
"i",
")",
"]",
"qids",
".",
"append",
"(",
"indexed_qubits",
"[",
"qbit",
"]",
")",
"params",
"=",
"op",
".",
"get_params",
"(",
")",
"if",
"gatetype",
"in",
"_rotation_types",
":",
"cirqop",
"=",
"gatetype",
"(",
"exponent",
"=",
"params",
"[",
"0",
"]",
")",
"(",
"*",
"qids",
")",
"elif",
"gatetype",
"==",
"cirq_common",
".",
"MeasurementGate",
":",
"for",
"q",
"in",
"qids",
":",
"cirqop",
"=",
"cirq_common",
".",
"measure",
"(",
"q",
",",
"key",
"=",
"op",
".",
"get_desc",
"(",
")",
")",
"else",
":",
"cirqop",
"=",
"gatetype",
"(",
"*",
"qids",
")",
"oplst",
".",
"append",
"(",
"cirqop",
")",
"return",
"cirq",
".",
"Circuit",
".",
"from_ops",
"(",
"*",
"oplst",
")"
] | Converts a :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit` object to a Cirq :py:class:`Circuit`.
:param tkcirc: The input :math:`\\mathrm{t|ket}\\rangle` :py:class:`Circuit`
:param indexed_qubits: Map from :math:`\\mathrm{t|ket}\\rangle` qubit indices to Cirq :py:class:`QubitId` s
:return: The Cirq :py:class:`Circuit` corresponding to the input circuit | [
"Converts",
"a",
":",
"math",
":",
"\\\\",
"mathrm",
"{",
"t|ket",
"}",
"\\\\",
"rangle",
":",
"py",
":",
"class",
":",
"Circuit",
"object",
"to",
"a",
"Cirq",
":",
"py",
":",
"class",
":",
"Circuit",
".",
":",
"param",
"tkcirc",
":",
"The",
"input",
":",
"math",
":",
"\\\\",
"mathrm",
"{",
"t|ket",
"}",
"\\\\",
"rangle",
":",
"py",
":",
"class",
":",
"Circuit",
":",
"param",
"indexed_qubits",
":",
"Map",
"from",
":",
"math",
":",
"\\\\",
"mathrm",
"{",
"t|ket",
"}",
"\\\\",
"rangle",
"qubit",
"indices",
"to",
"Cirq",
":",
"py",
":",
"class",
":",
"QubitId",
"s",
":",
"return",
":",
"The",
"Cirq",
":",
"py",
":",
"class",
":",
"Circuit",
"corresponding",
"to",
"the",
"input",
"circuit"
] | python | train |
hydpy-dev/hydpy | hydpy/auxs/xmltools.py | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/xmltools.py#L1695-L1733 | def get_subsequencesinsertion(cls, subsequences, indent) -> str:
"""Return the insertion string required for the given group of
sequences.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> from hydpy import prepare_model
>>> model = prepare_model('hland_v1')
>>> print(XSDWriter.get_subsequencesinsertion(
... model.sequences.fluxes, 1)) # doctest: +ELLIPSIS
<element name="fluxes"
minOccurs="0">
<complexType>
<sequence>
<element
name="tmean"
minOccurs="0"/>
<element
name="tc"
minOccurs="0"/>
...
<element
name="qt"
minOccurs="0"/>
</sequence>
</complexType>
</element>
"""
blanks = ' ' * (indent*4)
lines = [f'{blanks}<element name="{subsequences.name}"',
f'{blanks} minOccurs="0">',
f'{blanks} <complexType>',
f'{blanks} <sequence>']
for sequence in subsequences:
lines.append(cls.get_sequenceinsertion(sequence, indent + 3))
lines.extend([f'{blanks} </sequence>',
f'{blanks} </complexType>',
f'{blanks}</element>'])
return '\n'.join(lines) | [
"def",
"get_subsequencesinsertion",
"(",
"cls",
",",
"subsequences",
",",
"indent",
")",
"->",
"str",
":",
"blanks",
"=",
"' '",
"*",
"(",
"indent",
"*",
"4",
")",
"lines",
"=",
"[",
"f'{blanks}<element name=\"{subsequences.name}\"'",
",",
"f'{blanks} minOccurs=\"0\">'",
",",
"f'{blanks} <complexType>'",
",",
"f'{blanks} <sequence>'",
"]",
"for",
"sequence",
"in",
"subsequences",
":",
"lines",
".",
"append",
"(",
"cls",
".",
"get_sequenceinsertion",
"(",
"sequence",
",",
"indent",
"+",
"3",
")",
")",
"lines",
".",
"extend",
"(",
"[",
"f'{blanks} </sequence>'",
",",
"f'{blanks} </complexType>'",
",",
"f'{blanks}</element>'",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | Return the insertion string required for the given group of
sequences.
>>> from hydpy.auxs.xmltools import XSDWriter
>>> from hydpy import prepare_model
>>> model = prepare_model('hland_v1')
>>> print(XSDWriter.get_subsequencesinsertion(
... model.sequences.fluxes, 1)) # doctest: +ELLIPSIS
<element name="fluxes"
minOccurs="0">
<complexType>
<sequence>
<element
name="tmean"
minOccurs="0"/>
<element
name="tc"
minOccurs="0"/>
...
<element
name="qt"
minOccurs="0"/>
</sequence>
</complexType>
</element> | [
"Return",
"the",
"insertion",
"string",
"required",
"for",
"the",
"given",
"group",
"of",
"sequences",
"."
] | python | train |
tensorflow/mesh | examples/mnist.py | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/examples/mnist.py#L207-L236 | def run_mnist():
"""Run MNIST training and eval loop."""
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir)
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds_batched.repeat(FLAGS.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
# Train and evaluate model.
for _ in range(FLAGS.train_epochs // FLAGS.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=None)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print("\nEvaluation results:\n\t%s\n" % eval_results) | [
"def",
"run_mnist",
"(",
")",
":",
"mnist_classifier",
"=",
"tf",
".",
"estimator",
".",
"Estimator",
"(",
"model_fn",
"=",
"model_fn",
",",
"model_dir",
"=",
"FLAGS",
".",
"model_dir",
")",
"# Set up training and evaluation input functions.",
"def",
"train_input_fn",
"(",
")",
":",
"\"\"\"Prepare data for training.\"\"\"",
"# When choosing shuffle buffer sizes, larger sizes result in better",
"# randomness, while smaller sizes use less memory. MNIST is a small",
"# enough dataset that we can easily shuffle the full epoch.",
"ds",
"=",
"dataset",
".",
"train",
"(",
"FLAGS",
".",
"data_dir",
")",
"ds_batched",
"=",
"ds",
".",
"cache",
"(",
")",
".",
"shuffle",
"(",
"buffer_size",
"=",
"50000",
")",
".",
"batch",
"(",
"FLAGS",
".",
"batch_size",
")",
"# Iterate through the dataset a set number (`epochs_between_evals`) of times",
"# during each training session.",
"ds",
"=",
"ds_batched",
".",
"repeat",
"(",
"FLAGS",
".",
"epochs_between_evals",
")",
"return",
"ds",
"def",
"eval_input_fn",
"(",
")",
":",
"return",
"dataset",
".",
"test",
"(",
"FLAGS",
".",
"data_dir",
")",
".",
"batch",
"(",
"FLAGS",
".",
"batch_size",
")",
".",
"make_one_shot_iterator",
"(",
")",
".",
"get_next",
"(",
")",
"# Train and evaluate model.",
"for",
"_",
"in",
"range",
"(",
"FLAGS",
".",
"train_epochs",
"//",
"FLAGS",
".",
"epochs_between_evals",
")",
":",
"mnist_classifier",
".",
"train",
"(",
"input_fn",
"=",
"train_input_fn",
",",
"hooks",
"=",
"None",
")",
"eval_results",
"=",
"mnist_classifier",
".",
"evaluate",
"(",
"input_fn",
"=",
"eval_input_fn",
")",
"print",
"(",
"\"\\nEvaluation results:\\n\\t%s\\n\"",
"%",
"eval_results",
")"
] | Run MNIST training and eval loop. | [
"Run",
"MNIST",
"training",
"and",
"eval",
"loop",
"."
] | python | train |
Aperture-py/aperture-lib | aperturelib/watermark.py | https://github.com/Aperture-py/aperture-lib/blob/5c54af216319f297ddf96181a16f088cf1ba23f3/aperturelib/watermark.py#L59-L152 | def watermark_text(image, text, corner=2):
'''Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image
'''
# Load Font
FONT_PATH = ''
if resource_exists(__name__, 'resources/fonts/SourceSansPro-Regular.ttf'):
FONT_PATH = resource_filename(
__name__, 'resources/fonts/SourceSansPro-Regular.ttf')
padding = 5
was_P = image.mode == 'P'
was_L = image.mode == 'L'
# Fix PIL palette issue by converting palette images to RGBA
if image.mode not in ['RGB', 'RGBA']:
if image.format in ['JPG', 'JPEG']:
image = image.convert('RGB')
else:
image = image.convert('RGBA')
# Get drawable image
img_draw = ImageDraw.Draw(image)
fontsize = 1 # starting font size
# portion of image width you want text height to be.
# default font size will have a height that is ~1/20
# the height of the base image.
img_fraction = 0.05
# attempt to use Aperture default font. If that fails, use ImageFont default
try:
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
was_over = False
inc = 2
while True:
if font.getsize(text)[1] > img_fraction * image.height:
if not was_over:
was_over = True
inc = -1
else:
if was_over:
break
# iterate until the text size is just larger than the criteria
fontsize += inc
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
fontsize -= 1
font = ImageFont.truetype(font=FONT_PATH, size=fontsize)
except:
# replace with log message
print('Failed to load Aperture font. Using default font instead.')
font = ImageFont.load_default() # Bad because default is suuuuper small
# get position of text
pos = get_pos(corner, image.size, font.getsize(text), padding)
# draw a thin black border
img_draw.text((pos[0] - 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0] + 1, pos[1]), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] - 1), text, font=font, fill='black')
img_draw.text((pos[0], pos[1] + 1), text, font=font, fill='black')
# draw the actual text
img_draw.text(pos, text, font=font, fill='white')
# Remove cached font file
cleanup_resources()
del img_draw
if was_P:
image = image.convert('P', palette=Image.ADAPTIVE, colors=256)
elif was_L:
image = image.convert('L')
return image | [
"def",
"watermark_text",
"(",
"image",
",",
"text",
",",
"corner",
"=",
"2",
")",
":",
"# Load Font",
"FONT_PATH",
"=",
"''",
"if",
"resource_exists",
"(",
"__name__",
",",
"'resources/fonts/SourceSansPro-Regular.ttf'",
")",
":",
"FONT_PATH",
"=",
"resource_filename",
"(",
"__name__",
",",
"'resources/fonts/SourceSansPro-Regular.ttf'",
")",
"padding",
"=",
"5",
"was_P",
"=",
"image",
".",
"mode",
"==",
"'P'",
"was_L",
"=",
"image",
".",
"mode",
"==",
"'L'",
"# Fix PIL palette issue by converting palette images to RGBA",
"if",
"image",
".",
"mode",
"not",
"in",
"[",
"'RGB'",
",",
"'RGBA'",
"]",
":",
"if",
"image",
".",
"format",
"in",
"[",
"'JPG'",
",",
"'JPEG'",
"]",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGB'",
")",
"else",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'RGBA'",
")",
"# Get drawable image",
"img_draw",
"=",
"ImageDraw",
".",
"Draw",
"(",
"image",
")",
"fontsize",
"=",
"1",
"# starting font size",
"# portion of image width you want text height to be.",
"# default font size will have a height that is ~1/20",
"# the height of the base image.",
"img_fraction",
"=",
"0.05",
"# attempt to use Aperture default font. If that fails, use ImageFont default",
"try",
":",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"was_over",
"=",
"False",
"inc",
"=",
"2",
"while",
"True",
":",
"if",
"font",
".",
"getsize",
"(",
"text",
")",
"[",
"1",
"]",
">",
"img_fraction",
"*",
"image",
".",
"height",
":",
"if",
"not",
"was_over",
":",
"was_over",
"=",
"True",
"inc",
"=",
"-",
"1",
"else",
":",
"if",
"was_over",
":",
"break",
"# iterate until the text size is just larger than the criteria",
"fontsize",
"+=",
"inc",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"fontsize",
"-=",
"1",
"font",
"=",
"ImageFont",
".",
"truetype",
"(",
"font",
"=",
"FONT_PATH",
",",
"size",
"=",
"fontsize",
")",
"except",
":",
"# replace with log message",
"print",
"(",
"'Failed to load Aperture font. Using default font instead.'",
")",
"font",
"=",
"ImageFont",
".",
"load_default",
"(",
")",
"# Bad because default is suuuuper small",
"# get position of text",
"pos",
"=",
"get_pos",
"(",
"corner",
",",
"image",
".",
"size",
",",
"font",
".",
"getsize",
"(",
"text",
")",
",",
"padding",
")",
"# draw a thin black border",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
"-",
"1",
",",
"pos",
"[",
"1",
"]",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
"+",
"1",
",",
"pos",
"[",
"1",
"]",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
"-",
"1",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"img_draw",
".",
"text",
"(",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
"+",
"1",
")",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'black'",
")",
"# draw the actual text",
"img_draw",
".",
"text",
"(",
"pos",
",",
"text",
",",
"font",
"=",
"font",
",",
"fill",
"=",
"'white'",
")",
"# Remove cached font file",
"cleanup_resources",
"(",
")",
"del",
"img_draw",
"if",
"was_P",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'P'",
",",
"palette",
"=",
"Image",
".",
"ADAPTIVE",
",",
"colors",
"=",
"256",
")",
"elif",
"was_L",
":",
"image",
"=",
"image",
".",
"convert",
"(",
"'L'",
")",
"return",
"image"
] | Adds a text watermark to an instance of a PIL Image.
The text will be sized so that the height of the text is
roughly 1/20th the height of the base image. The text will
be white with a thin black outline.
Args:
image: An instance of a PIL Image. This is the base image.
text: Text to use as a watermark.
corner: An integer between 0 and 3 representing the corner
where the watermark image should be placed on top of the
base image. 0 is top left, 1 is top right, 2 is bottom
right and 3 is bottom left. NOTE: Right now, this is
permanently set to 2 (bottom right) but this can be
changed in the future by either creating a new cmd-line
flag or putting this in the config file.
Returns: The watermarked image | [
"Adds",
"a",
"text",
"watermark",
"to",
"an",
"instance",
"of",
"a",
"PIL",
"Image",
"."
] | python | train |
timmahrt/ProMo | promo/morph_utils/morph_sequence.py | https://github.com/timmahrt/ProMo/blob/99d9f5cc01ff328a62973c5a5da910cc905ae4d5/promo/morph_utils/morph_sequence.py#L79-L95 | def _makeTimingAbsolute(relativeDataList, startTime, endTime):
'''
Maps values from 0 to 1 to the provided start and end time
Input is a list of tuples of the form
([(time1, pitch1), (time2, pitch2),...]
'''
timingSeq = [row[0] for row in relativeDataList]
valueSeq = [list(row[1:]) for row in relativeDataList]
absTimingSeq = makeSequenceAbsolute(timingSeq, startTime, endTime)
absDataList = [tuple([time, ] + row) for time, row
in zip(absTimingSeq, valueSeq)]
return absDataList | [
"def",
"_makeTimingAbsolute",
"(",
"relativeDataList",
",",
"startTime",
",",
"endTime",
")",
":",
"timingSeq",
"=",
"[",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"relativeDataList",
"]",
"valueSeq",
"=",
"[",
"list",
"(",
"row",
"[",
"1",
":",
"]",
")",
"for",
"row",
"in",
"relativeDataList",
"]",
"absTimingSeq",
"=",
"makeSequenceAbsolute",
"(",
"timingSeq",
",",
"startTime",
",",
"endTime",
")",
"absDataList",
"=",
"[",
"tuple",
"(",
"[",
"time",
",",
"]",
"+",
"row",
")",
"for",
"time",
",",
"row",
"in",
"zip",
"(",
"absTimingSeq",
",",
"valueSeq",
")",
"]",
"return",
"absDataList"
] | Maps values from 0 to 1 to the provided start and end time
Input is a list of tuples of the form
([(time1, pitch1), (time2, pitch2),...] | [
"Maps",
"values",
"from",
"0",
"to",
"1",
"to",
"the",
"provided",
"start",
"and",
"end",
"time"
] | python | train |
vertexproject/synapse | synapse/cortex.py | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L1570-L1580 | async def eval(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield Nodes only.
'''
if user is None:
user = self.auth.getUserByName('root')
await self.boss.promote('storm', user=user, info={'query': text})
async with await self.snap(user=user) as snap:
async for node in snap.eval(text, opts=opts, user=user):
yield node | [
"async",
"def",
"eval",
"(",
"self",
",",
"text",
",",
"opts",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"if",
"user",
"is",
"None",
":",
"user",
"=",
"self",
".",
"auth",
".",
"getUserByName",
"(",
"'root'",
")",
"await",
"self",
".",
"boss",
".",
"promote",
"(",
"'storm'",
",",
"user",
"=",
"user",
",",
"info",
"=",
"{",
"'query'",
":",
"text",
"}",
")",
"async",
"with",
"await",
"self",
".",
"snap",
"(",
"user",
"=",
"user",
")",
"as",
"snap",
":",
"async",
"for",
"node",
"in",
"snap",
".",
"eval",
"(",
"text",
",",
"opts",
"=",
"opts",
",",
"user",
"=",
"user",
")",
":",
"yield",
"node"
] | Evaluate a storm query and yield Nodes only. | [
"Evaluate",
"a",
"storm",
"query",
"and",
"yield",
"Nodes",
"only",
"."
] | python | train |
spencerahill/aospy | aospy/region.py | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/region.py#L232-L264 | def mask_var(self, data, lon_cyclic=True, lon_str=LON_STR,
lat_str=LAT_STR):
"""Mask the given data outside this region.
Parameters
----------
data : xarray.DataArray
The array to be regionally masked.
lon_cyclic : bool, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lon_str, lat_str : str, optional
The names of the longitude and latitude dimensions, respectively,
in the data to be masked. Defaults are
``aospy.internal_names.LON_STR`` and
``aospy.internal_names.LON_STR``, respectively.
Returns
-------
xarray.DataArray
The original array with points outside of the region masked.
"""
# TODO: is this still necessary?
if not lon_cyclic:
if self.west_bound > self.east_bound:
raise ValueError("Longitudes of data to be masked are "
"specified as non-cyclic, but Region's "
"definition requires wraparound longitudes.")
masked = data.where(self._make_mask(data, lon_str=lon_str,
lat_str=lat_str))
return masked | [
"def",
"mask_var",
"(",
"self",
",",
"data",
",",
"lon_cyclic",
"=",
"True",
",",
"lon_str",
"=",
"LON_STR",
",",
"lat_str",
"=",
"LAT_STR",
")",
":",
"# TODO: is this still necessary?",
"if",
"not",
"lon_cyclic",
":",
"if",
"self",
".",
"west_bound",
">",
"self",
".",
"east_bound",
":",
"raise",
"ValueError",
"(",
"\"Longitudes of data to be masked are \"",
"\"specified as non-cyclic, but Region's \"",
"\"definition requires wraparound longitudes.\"",
")",
"masked",
"=",
"data",
".",
"where",
"(",
"self",
".",
"_make_mask",
"(",
"data",
",",
"lon_str",
"=",
"lon_str",
",",
"lat_str",
"=",
"lat_str",
")",
")",
"return",
"masked"
] | Mask the given data outside this region.
Parameters
----------
data : xarray.DataArray
The array to be regionally masked.
lon_cyclic : bool, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lon_str, lat_str : str, optional
The names of the longitude and latitude dimensions, respectively,
in the data to be masked. Defaults are
``aospy.internal_names.LON_STR`` and
``aospy.internal_names.LON_STR``, respectively.
Returns
-------
xarray.DataArray
The original array with points outside of the region masked. | [
"Mask",
"the",
"given",
"data",
"outside",
"this",
"region",
"."
] | python | train |
linode/linode_api4-python | linode_api4/objects/linode.py | https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/linode.py#L321-L328 | def invalidate(self):
""" Clear out cached properties """
if hasattr(self, '_avail_backups'):
del self._avail_backups
if hasattr(self, '_ips'):
del self._ips
Base.invalidate(self) | [
"def",
"invalidate",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'_avail_backups'",
")",
":",
"del",
"self",
".",
"_avail_backups",
"if",
"hasattr",
"(",
"self",
",",
"'_ips'",
")",
":",
"del",
"self",
".",
"_ips",
"Base",
".",
"invalidate",
"(",
"self",
")"
] | Clear out cached properties | [
"Clear",
"out",
"cached",
"properties"
] | python | train |
astropy/astropy-helpers | astropy_helpers/version_helpers.py | https://github.com/astropy/astropy-helpers/blob/f5a27d3f84a98ea0eebb85e0cf3e7214c6bc0d09/astropy_helpers/version_helpers.py#L43-L85 | def _version_split(version):
"""
Split a version string into major, minor, and bugfix numbers. If any of
those numbers are missing the default is zero. Any pre/post release
modifiers are ignored.
Examples
========
>>> _version_split('1.2.3')
(1, 2, 3)
>>> _version_split('1.2')
(1, 2, 0)
>>> _version_split('1.2rc1')
(1, 2, 0)
>>> _version_split('1')
(1, 0, 0)
>>> _version_split('')
(0, 0, 0)
"""
parsed_version = pkg_resources.parse_version(version)
if hasattr(parsed_version, 'base_version'):
# New version parsing for setuptools >= 8.0
if parsed_version.base_version:
parts = [int(part)
for part in parsed_version.base_version.split('.')]
else:
parts = []
else:
parts = []
for part in parsed_version:
if part.startswith('*'):
# Ignore any .dev, a, b, rc, etc.
break
parts.append(int(part))
if len(parts) < 3:
parts += [0] * (3 - len(parts))
# In principle a version could have more parts (like 1.2.3.4) but we only
# support <major>.<minor>.<micro>
return tuple(parts[:3]) | [
"def",
"_version_split",
"(",
"version",
")",
":",
"parsed_version",
"=",
"pkg_resources",
".",
"parse_version",
"(",
"version",
")",
"if",
"hasattr",
"(",
"parsed_version",
",",
"'base_version'",
")",
":",
"# New version parsing for setuptools >= 8.0",
"if",
"parsed_version",
".",
"base_version",
":",
"parts",
"=",
"[",
"int",
"(",
"part",
")",
"for",
"part",
"in",
"parsed_version",
".",
"base_version",
".",
"split",
"(",
"'.'",
")",
"]",
"else",
":",
"parts",
"=",
"[",
"]",
"else",
":",
"parts",
"=",
"[",
"]",
"for",
"part",
"in",
"parsed_version",
":",
"if",
"part",
".",
"startswith",
"(",
"'*'",
")",
":",
"# Ignore any .dev, a, b, rc, etc.",
"break",
"parts",
".",
"append",
"(",
"int",
"(",
"part",
")",
")",
"if",
"len",
"(",
"parts",
")",
"<",
"3",
":",
"parts",
"+=",
"[",
"0",
"]",
"*",
"(",
"3",
"-",
"len",
"(",
"parts",
")",
")",
"# In principle a version could have more parts (like 1.2.3.4) but we only",
"# support <major>.<minor>.<micro>",
"return",
"tuple",
"(",
"parts",
"[",
":",
"3",
"]",
")"
] | Split a version string into major, minor, and bugfix numbers. If any of
those numbers are missing the default is zero. Any pre/post release
modifiers are ignored.
Examples
========
>>> _version_split('1.2.3')
(1, 2, 3)
>>> _version_split('1.2')
(1, 2, 0)
>>> _version_split('1.2rc1')
(1, 2, 0)
>>> _version_split('1')
(1, 0, 0)
>>> _version_split('')
(0, 0, 0) | [
"Split",
"a",
"version",
"string",
"into",
"major",
"minor",
"and",
"bugfix",
"numbers",
".",
"If",
"any",
"of",
"those",
"numbers",
"are",
"missing",
"the",
"default",
"is",
"zero",
".",
"Any",
"pre",
"/",
"post",
"release",
"modifiers",
"are",
"ignored",
"."
] | python | train |
tensorflow/lucid | lucid/misc/io/serialize_array.py | https://github.com/tensorflow/lucid/blob/d1a1e2e4fd4be61b89b8cba20dc425a5ae34576e/lucid/misc/io/serialize_array.py#L126-L161 | def array_to_jsbuffer(array):
"""Serialize 1d NumPy array to JS TypedArray.
Data is serialized to base64-encoded string, which is much faster
and memory-efficient than json list serialization.
Args:
array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES.
Returns:
JS code that evaluates to a TypedArray as string.
Raises:
TypeError: if array dtype or shape not supported.
"""
if array.ndim != 1:
raise TypeError('Only 1d arrays can be converted JS TypedArray.')
if array.dtype.name not in JS_ARRAY_TYPES:
raise TypeError('Array dtype not supported by JS TypedArray.')
js_type_name = array.dtype.name.capitalize() + 'Array'
data_base64 = base64.b64encode(array.tobytes()).decode('ascii')
code = """
(function() {
const data = atob("%s");
const buf = new Uint8Array(data.length);
for (var i=0; i<data.length; ++i) {
buf[i] = data.charCodeAt(i);
}
var array_type = %s;
if (array_type == Uint8Array) {
return buf;
}
return new array_type(buf.buffer);
})()
""" % (data_base64, js_type_name)
return code | [
"def",
"array_to_jsbuffer",
"(",
"array",
")",
":",
"if",
"array",
".",
"ndim",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"'Only 1d arrays can be converted JS TypedArray.'",
")",
"if",
"array",
".",
"dtype",
".",
"name",
"not",
"in",
"JS_ARRAY_TYPES",
":",
"raise",
"TypeError",
"(",
"'Array dtype not supported by JS TypedArray.'",
")",
"js_type_name",
"=",
"array",
".",
"dtype",
".",
"name",
".",
"capitalize",
"(",
")",
"+",
"'Array'",
"data_base64",
"=",
"base64",
".",
"b64encode",
"(",
"array",
".",
"tobytes",
"(",
")",
")",
".",
"decode",
"(",
"'ascii'",
")",
"code",
"=",
"\"\"\"\n (function() {\n const data = atob(\"%s\");\n const buf = new Uint8Array(data.length);\n for (var i=0; i<data.length; ++i) {\n buf[i] = data.charCodeAt(i);\n }\n var array_type = %s;\n if (array_type == Uint8Array) {\n return buf;\n }\n return new array_type(buf.buffer);\n })()\n \"\"\"",
"%",
"(",
"data_base64",
",",
"js_type_name",
")",
"return",
"code"
] | Serialize 1d NumPy array to JS TypedArray.
Data is serialized to base64-encoded string, which is much faster
and memory-efficient than json list serialization.
Args:
array: 1d NumPy array, dtype must be one of JS_ARRAY_TYPES.
Returns:
JS code that evaluates to a TypedArray as string.
Raises:
TypeError: if array dtype or shape not supported. | [
"Serialize",
"1d",
"NumPy",
"array",
"to",
"JS",
"TypedArray",
"."
] | python | train |
alefnula/tea | tea/console/format.py | https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/console/format.py#L97-L123 | def print_page(text):
"""Format the text and prints it on stdout.
Text is formatted by adding a ASCII frame around it and coloring the text.
Colors can be added to text using color tags, for example:
My [FG_BLUE]blue[NORMAL] text.
My [BG_BLUE]blue background[NORMAL] text.
"""
color_re = re.compile(r"\[(?P<color>[FB]G_[A-Z_]+|NORMAL)\]")
width = max([len(strip_colors(x)) for x in text.splitlines()])
print("\n" + hbar(width))
for line in text.splitlines():
if line == "[HBAR]":
print(hbar(width))
continue
tail = width - len(strip_colors(line))
sys.stdout.write("| ")
previous = 0
end = len(line)
for match in color_re.finditer(line):
sys.stdout.write(line[previous : match.start()])
set_color(match.groupdict()["color"])
previous = match.end()
sys.stdout.write(line[previous:end])
sys.stdout.write(" " * tail + " |\n")
print(hbar(width)) | [
"def",
"print_page",
"(",
"text",
")",
":",
"color_re",
"=",
"re",
".",
"compile",
"(",
"r\"\\[(?P<color>[FB]G_[A-Z_]+|NORMAL)\\]\"",
")",
"width",
"=",
"max",
"(",
"[",
"len",
"(",
"strip_colors",
"(",
"x",
")",
")",
"for",
"x",
"in",
"text",
".",
"splitlines",
"(",
")",
"]",
")",
"print",
"(",
"\"\\n\"",
"+",
"hbar",
"(",
"width",
")",
")",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
"==",
"\"[HBAR]\"",
":",
"print",
"(",
"hbar",
"(",
"width",
")",
")",
"continue",
"tail",
"=",
"width",
"-",
"len",
"(",
"strip_colors",
"(",
"line",
")",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"| \"",
")",
"previous",
"=",
"0",
"end",
"=",
"len",
"(",
"line",
")",
"for",
"match",
"in",
"color_re",
".",
"finditer",
"(",
"line",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
"[",
"previous",
":",
"match",
".",
"start",
"(",
")",
"]",
")",
"set_color",
"(",
"match",
".",
"groupdict",
"(",
")",
"[",
"\"color\"",
"]",
")",
"previous",
"=",
"match",
".",
"end",
"(",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"line",
"[",
"previous",
":",
"end",
"]",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\" \"",
"*",
"tail",
"+",
"\" |\\n\"",
")",
"print",
"(",
"hbar",
"(",
"width",
")",
")"
] | Format the text and prints it on stdout.
Text is formatted by adding a ASCII frame around it and coloring the text.
Colors can be added to text using color tags, for example:
My [FG_BLUE]blue[NORMAL] text.
My [BG_BLUE]blue background[NORMAL] text. | [
"Format",
"the",
"text",
"and",
"prints",
"it",
"on",
"stdout",
".",
"Text",
"is",
"formatted",
"by",
"adding",
"a",
"ASCII",
"frame",
"around",
"it",
"and",
"coloring",
"the",
"text",
".",
"Colors",
"can",
"be",
"added",
"to",
"text",
"using",
"color",
"tags",
"for",
"example",
":",
"My",
"[",
"FG_BLUE",
"]",
"blue",
"[",
"NORMAL",
"]",
"text",
".",
"My",
"[",
"BG_BLUE",
"]",
"blue",
"background",
"[",
"NORMAL",
"]",
"text",
"."
] | python | train |
Valuehorizon/valuehorizon-companies | companies/models.py | https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L215-L231 | def get_all_related_companies(self, include_self=False):
"""
Return all parents and subsidiaries of the company
Include the company if include_self = True
"""
parents = self.get_all_parents()
subsidiaries = self.get_all_children()
related_companies = parents | subsidiaries
if include_self is True:
company_qs = Company.objects.filter(id=self.id)
related_companies = related_companies | company_qs
related_companies_ids = [company.id for company in list(set(related_companies))]
related_companies = Company.objects.filter(id__in=related_companies_ids)
return related_companies | [
"def",
"get_all_related_companies",
"(",
"self",
",",
"include_self",
"=",
"False",
")",
":",
"parents",
"=",
"self",
".",
"get_all_parents",
"(",
")",
"subsidiaries",
"=",
"self",
".",
"get_all_children",
"(",
")",
"related_companies",
"=",
"parents",
"|",
"subsidiaries",
"if",
"include_self",
"is",
"True",
":",
"company_qs",
"=",
"Company",
".",
"objects",
".",
"filter",
"(",
"id",
"=",
"self",
".",
"id",
")",
"related_companies",
"=",
"related_companies",
"|",
"company_qs",
"related_companies_ids",
"=",
"[",
"company",
".",
"id",
"for",
"company",
"in",
"list",
"(",
"set",
"(",
"related_companies",
")",
")",
"]",
"related_companies",
"=",
"Company",
".",
"objects",
".",
"filter",
"(",
"id__in",
"=",
"related_companies_ids",
")",
"return",
"related_companies"
] | Return all parents and subsidiaries of the company
Include the company if include_self = True | [
"Return",
"all",
"parents",
"and",
"subsidiaries",
"of",
"the",
"company",
"Include",
"the",
"company",
"if",
"include_self",
"=",
"True"
] | python | train |
benfred/implicit | setup.py | https://github.com/benfred/implicit/blob/6b16c50d1d514a814f2e5b8cf2a829ff23dbba63/setup.py#L81-L98 | def extract_gcc_binaries():
"""Try to find GCC on OSX for OpenMP support."""
patterns = ['/opt/local/bin/g++-mp-[0-9].[0-9]',
'/opt/local/bin/g++-mp-[0-9]',
'/usr/local/bin/g++-[0-9].[0-9]',
'/usr/local/bin/g++-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
return gcc
else:
return None
else:
return None | [
"def",
"extract_gcc_binaries",
"(",
")",
":",
"patterns",
"=",
"[",
"'/opt/local/bin/g++-mp-[0-9].[0-9]'",
",",
"'/opt/local/bin/g++-mp-[0-9]'",
",",
"'/usr/local/bin/g++-[0-9].[0-9]'",
",",
"'/usr/local/bin/g++-[0-9]'",
"]",
"if",
"'darwin'",
"in",
"platform",
".",
"platform",
"(",
")",
".",
"lower",
"(",
")",
":",
"gcc_binaries",
"=",
"[",
"]",
"for",
"pattern",
"in",
"patterns",
":",
"gcc_binaries",
"+=",
"glob",
".",
"glob",
"(",
"pattern",
")",
"gcc_binaries",
".",
"sort",
"(",
")",
"if",
"gcc_binaries",
":",
"_",
",",
"gcc",
"=",
"os",
".",
"path",
".",
"split",
"(",
"gcc_binaries",
"[",
"-",
"1",
"]",
")",
"return",
"gcc",
"else",
":",
"return",
"None",
"else",
":",
"return",
"None"
] | Try to find GCC on OSX for OpenMP support. | [
"Try",
"to",
"find",
"GCC",
"on",
"OSX",
"for",
"OpenMP",
"support",
"."
] | python | train |
ryanpetrello/cleaver | cleaver/reports/web/bottle.py | https://github.com/ryanpetrello/cleaver/blob/0ef266e1a0603c6d414df4b9926ce2a59844db35/cleaver/reports/web/bottle.py#L581-L618 | def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
if isinstance(app, basestring):
prefix, app = app, prefix
depr('Parameter order of Bottle.mount() changed.') # 0.10
parts = [p for p in prefix.split('/') if p]
if not parts: raise ValueError('Empty path prefix.')
path_depth = len(parts)
options.setdefault('skip', True)
options.setdefault('method', 'ANY')
@self.route('/%s/:#.*#' % '/'.join(parts), **options)
def mountpoint():
try:
request.path_shift(path_depth)
rs = BaseResponse([], 200)
def start_response(status, header):
rs.status = status
for name, value in header: rs.add_header(name, value)
return rs.body.append
rs.body = itertools.chain(rs.body, app(request.environ, start_response))
return HTTPResponse(rs.body, rs.status_code, rs.headers)
finally:
request.path_shift(-path_depth)
if not prefix.endswith('/'):
self.route('/' + '/'.join(parts), callback=mountpoint, **options) | [
"def",
"mount",
"(",
"self",
",",
"prefix",
",",
"app",
",",
"*",
"*",
"options",
")",
":",
"if",
"isinstance",
"(",
"app",
",",
"basestring",
")",
":",
"prefix",
",",
"app",
"=",
"app",
",",
"prefix",
"depr",
"(",
"'Parameter order of Bottle.mount() changed.'",
")",
"# 0.10",
"parts",
"=",
"[",
"p",
"for",
"p",
"in",
"prefix",
".",
"split",
"(",
"'/'",
")",
"if",
"p",
"]",
"if",
"not",
"parts",
":",
"raise",
"ValueError",
"(",
"'Empty path prefix.'",
")",
"path_depth",
"=",
"len",
"(",
"parts",
")",
"options",
".",
"setdefault",
"(",
"'skip'",
",",
"True",
")",
"options",
".",
"setdefault",
"(",
"'method'",
",",
"'ANY'",
")",
"@",
"self",
".",
"route",
"(",
"'/%s/:#.*#'",
"%",
"'/'",
".",
"join",
"(",
"parts",
")",
",",
"*",
"*",
"options",
")",
"def",
"mountpoint",
"(",
")",
":",
"try",
":",
"request",
".",
"path_shift",
"(",
"path_depth",
")",
"rs",
"=",
"BaseResponse",
"(",
"[",
"]",
",",
"200",
")",
"def",
"start_response",
"(",
"status",
",",
"header",
")",
":",
"rs",
".",
"status",
"=",
"status",
"for",
"name",
",",
"value",
"in",
"header",
":",
"rs",
".",
"add_header",
"(",
"name",
",",
"value",
")",
"return",
"rs",
".",
"body",
".",
"append",
"rs",
".",
"body",
"=",
"itertools",
".",
"chain",
"(",
"rs",
".",
"body",
",",
"app",
"(",
"request",
".",
"environ",
",",
"start_response",
")",
")",
"return",
"HTTPResponse",
"(",
"rs",
".",
"body",
",",
"rs",
".",
"status_code",
",",
"rs",
".",
"headers",
")",
"finally",
":",
"request",
".",
"path_shift",
"(",
"-",
"path_depth",
")",
"if",
"not",
"prefix",
".",
"endswith",
"(",
"'/'",
")",
":",
"self",
".",
"route",
"(",
"'/'",
"+",
"'/'",
".",
"join",
"(",
"parts",
")",
",",
"callback",
"=",
"mountpoint",
",",
"*",
"*",
"options",
")"
] | Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call. | [
"Mount",
"an",
"application",
"(",
":",
"class",
":",
"Bottle",
"or",
"plain",
"WSGI",
")",
"to",
"a",
"specific",
"URL",
"prefix",
".",
"Example",
"::"
] | python | train |
AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L8077-L8102 | def lstlec(string, n, lenvals, array):
"""
Given a character string and an ordered array of character
strings, find the index of the largest array element less than
or equal to the given string.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlec_c.html
:param string: Upper bound value to search against.
:type string: str
:param n: Number elements in array.
:type n: int
:param lenvals: String length.
:type lenvals: int
:param array: Array of possible lower bounds.
:type array: list
:return:
index of the last element of array that is
lexically less than or equal to string.
:rtype: int
"""
string = stypes.stringToCharP(string)
array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=n)
n = ctypes.c_int(n)
lenvals = ctypes.c_int(lenvals)
return libspice.lstlec_c(string, n, lenvals, array) | [
"def",
"lstlec",
"(",
"string",
",",
"n",
",",
"lenvals",
",",
"array",
")",
":",
"string",
"=",
"stypes",
".",
"stringToCharP",
"(",
"string",
")",
"array",
"=",
"stypes",
".",
"listToCharArrayPtr",
"(",
"array",
",",
"xLen",
"=",
"lenvals",
",",
"yLen",
"=",
"n",
")",
"n",
"=",
"ctypes",
".",
"c_int",
"(",
"n",
")",
"lenvals",
"=",
"ctypes",
".",
"c_int",
"(",
"lenvals",
")",
"return",
"libspice",
".",
"lstlec_c",
"(",
"string",
",",
"n",
",",
"lenvals",
",",
"array",
")"
] | Given a character string and an ordered array of character
strings, find the index of the largest array element less than
or equal to the given string.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlec_c.html
:param string: Upper bound value to search against.
:type string: str
:param n: Number elements in array.
:type n: int
:param lenvals: String length.
:type lenvals: int
:param array: Array of possible lower bounds.
:type array: list
:return:
index of the last element of array that is
lexically less than or equal to string.
:rtype: int | [
"Given",
"a",
"character",
"string",
"and",
"an",
"ordered",
"array",
"of",
"character",
"strings",
"find",
"the",
"index",
"of",
"the",
"largest",
"array",
"element",
"less",
"than",
"or",
"equal",
"to",
"the",
"given",
"string",
"."
] | python | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.