nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py
python
SBCSGroupProber.__init__
(self)
[]
def __init__(self): CharSetGroupProber.__init__(self) self._mProbers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), SingleByteCharSetProber(Latin2HungarianModel), SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), ] hebrewProber = HebrewProber() logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, False, hebrewProber) visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True, hebrewProber) hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber) self._mProbers.extend([hebrewProber, logicalHebrewProber, visualHebrewProber]) self.reset()
[ "def", "__init__", "(", "self", ")", ":", "CharSetGroupProber", ".", "__init__", "(", "self", ")", "self", ".", "_mProbers", "=", "[", "SingleByteCharSetProber", "(", "Win1251CyrillicModel", ")", ",", "SingleByteCharSetProber", "(", "Koi8rModel", ")", ",", "SingleByteCharSetProber", "(", "Latin5CyrillicModel", ")", ",", "SingleByteCharSetProber", "(", "MacCyrillicModel", ")", ",", "SingleByteCharSetProber", "(", "Ibm866Model", ")", ",", "SingleByteCharSetProber", "(", "Ibm855Model", ")", ",", "SingleByteCharSetProber", "(", "Latin7GreekModel", ")", ",", "SingleByteCharSetProber", "(", "Win1253GreekModel", ")", ",", "SingleByteCharSetProber", "(", "Latin5BulgarianModel", ")", ",", "SingleByteCharSetProber", "(", "Win1251BulgarianModel", ")", ",", "SingleByteCharSetProber", "(", "Latin2HungarianModel", ")", ",", "SingleByteCharSetProber", "(", "Win1250HungarianModel", ")", ",", "SingleByteCharSetProber", "(", "TIS620ThaiModel", ")", ",", "]", "hebrewProber", "=", "HebrewProber", "(", ")", "logicalHebrewProber", "=", "SingleByteCharSetProber", "(", "Win1255HebrewModel", ",", "False", ",", "hebrewProber", ")", "visualHebrewProber", "=", "SingleByteCharSetProber", "(", "Win1255HebrewModel", ",", "True", ",", "hebrewProber", ")", "hebrewProber", ".", "set_model_probers", "(", "logicalHebrewProber", ",", "visualHebrewProber", ")", "self", ".", "_mProbers", ".", "extend", "(", "[", "hebrewProber", ",", "logicalHebrewProber", ",", "visualHebrewProber", "]", ")", "self", ".", "reset", "(", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter5-LogisticRegression/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py#L43-L69
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/google/appengine/tools/bulkloader.py
python
ParseKey
(key_string)
return datastore.Key(encoded=key_string)
Turn a key stored in the database into a Key or None. Args: key_string: The string representation of a Key. Returns: A datastore.Key instance or None
Turn a key stored in the database into a Key or None.
[ "Turn", "a", "key", "stored", "in", "the", "database", "into", "a", "Key", "or", "None", "." ]
def ParseKey(key_string): """Turn a key stored in the database into a Key or None. Args: key_string: The string representation of a Key. Returns: A datastore.Key instance or None """ if not key_string: return None if key_string == 'None': return None return datastore.Key(encoded=key_string)
[ "def", "ParseKey", "(", "key_string", ")", ":", "if", "not", "key_string", ":", "return", "None", "if", "key_string", "==", "'None'", ":", "return", "None", "return", "datastore", ".", "Key", "(", "encoded", "=", "key_string", ")" ]
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/tools/bulkloader.py#L2599-L2612
riptano/ccm
ce612ea71587bf263ed513cb8f8d5dfcf7c8dadb
ccmlib/remote.py
python
SSHClient.execute_python_script
(self, script)
return output
Execute a python script of the remote server :param script: Inline script to convert to a file and execute remotely :return: The output of the script execution
Execute a python script of the remote server
[ "Execute", "a", "python", "script", "of", "the", "remote", "server" ]
def execute_python_script(self, script): """ Execute a python script of the remote server :param script: Inline script to convert to a file and execute remotely :return: The output of the script execution """ # Create the local file to copy to remote file_handle, filename = tempfile.mkstemp() temp_file = os.fdopen(file_handle, "wt") temp_file.write(script) temp_file.close() # Put the file into the remote user directory self.put(filename, "python_execute.py") command = ["python", "python_execute.py"] # Execute the python script on the remote system, clean up, and return the output output = self.execute(command, False) self.remove("python_execute.py") os.unlink(filename) return output
[ "def", "execute_python_script", "(", "self", ",", "script", ")", ":", "# Create the local file to copy to remote", "file_handle", ",", "filename", "=", "tempfile", ".", "mkstemp", "(", ")", "temp_file", "=", "os", ".", "fdopen", "(", "file_handle", ",", "\"wt\"", ")", "temp_file", ".", "write", "(", "script", ")", "temp_file", ".", "close", "(", ")", "# Put the file into the remote user directory", "self", ".", "put", "(", "filename", ",", "\"python_execute.py\"", ")", "command", "=", "[", "\"python\"", ",", "\"python_execute.py\"", "]", "# Execute the python script on the remote system, clean up, and return the output", "output", "=", "self", ".", "execute", "(", "command", ",", "False", ")", "self", ".", "remove", "(", "\"python_execute.py\"", ")", "os", ".", "unlink", "(", "filename", ")", "return", "output" ]
https://github.com/riptano/ccm/blob/ce612ea71587bf263ed513cb8f8d5dfcf7c8dadb/ccmlib/remote.py#L257-L278
idanr1986/cuckoo-droid
1350274639473d3d2b0ac740cae133ca53ab7444
analyzer/android/lib/api/androguard/dvm.py
python
FieldAnnotation.get_field_idx
(self)
return self.get_field_idx
Return the index into the field_ids list for the identity of the field being annotated :rtype: int
Return the index into the field_ids list for the identity of the field being annotated
[ "Return", "the", "index", "into", "the", "field_ids", "list", "for", "the", "identity", "of", "the", "field", "being", "annotated" ]
def get_field_idx(self) : """ Return the index into the field_ids list for the identity of the field being annotated :rtype: int """ return self.get_field_idx
[ "def", "get_field_idx", "(", "self", ")", ":", "return", "self", ".", "get_field_idx" ]
https://github.com/idanr1986/cuckoo-droid/blob/1350274639473d3d2b0ac740cae133ca53ab7444/analyzer/android/lib/api/androguard/dvm.py#L724-L730
asappresearch/flambe
98f10f859fe9223fd2d1d76d430f77cdbddc0956
flambe/tokenizer/label.py
python
LabelTokenizer.__init__
(self, multilabel_sep: Optional[str] = None)
Initialize the tokenizer. Parameters ---------- multilabel_sep : Optional[str], optional Used to split multi label inputs, if given
Initialize the tokenizer.
[ "Initialize", "the", "tokenizer", "." ]
def __init__(self, multilabel_sep: Optional[str] = None) -> None: """Initialize the tokenizer. Parameters ---------- multilabel_sep : Optional[str], optional Used to split multi label inputs, if given """ self.multilabel_sep = multilabel_sep
[ "def", "__init__", "(", "self", ",", "multilabel_sep", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "self", ".", "multilabel_sep", "=", "multilabel_sep" ]
https://github.com/asappresearch/flambe/blob/98f10f859fe9223fd2d1d76d430f77cdbddc0956/flambe/tokenizer/label.py#L14-L23
jellyfin/jellyfin-kodi
e21e059e000f06890b33e2794a7e57959fdf19a3
jellyfin_kodi/objects/utils.py
python
get_grouped_set
()
return result.get('result', {}).get('value', False)
Get if boxsets should be grouped
Get if boxsets should be grouped
[ "Get", "if", "boxsets", "should", "be", "grouped" ]
def get_grouped_set(): ''' Get if boxsets should be grouped ''' result = JSONRPC('Settings.GetSettingValue').execute({'setting': "videolibrary.groupmoviesets"}) return result.get('result', {}).get('value', False)
[ "def", "get_grouped_set", "(", ")", ":", "result", "=", "JSONRPC", "(", "'Settings.GetSettingValue'", ")", ".", "execute", "(", "{", "'setting'", ":", "\"videolibrary.groupmoviesets\"", "}", ")", "return", "result", ".", "get", "(", "'result'", ",", "{", "}", ")", ".", "get", "(", "'value'", ",", "False", ")" ]
https://github.com/jellyfin/jellyfin-kodi/blob/e21e059e000f06890b33e2794a7e57959fdf19a3/jellyfin_kodi/objects/utils.py#L16-L21
brython-dev/brython
9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3
www/src/Lib/html/parser.py
python
HTMLParser.get_starttag_text
(self)
return self.__starttag_text
Return full source of start tag: '<...>'.
Return full source of start tag: '<...>'.
[ "Return", "full", "source", "of", "start", "tag", ":", "<", "...", ">", "." ]
def get_starttag_text(self): """Return full source of start tag: '<...>'.""" return self.__starttag_text
[ "def", "get_starttag_text", "(", "self", ")", ":", "return", "self", ".", "__starttag_text" ]
https://github.com/brython-dev/brython/blob/9cba5fb7f43a9b52fff13e89b403e02a1dfaa5f3/www/src/Lib/html/parser.py#L118-L120
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Toggl-Time-Tracking/alp/request/requests/packages/oauthlib/oauth1/rfc5849/__init__.py
python
Server.validate_timestamp_and_nonce
(self, client_key, timestamp, nonce, request_token=None, access_token=None)
Validates that the nonce has not been used before. Per `Section 3.3`_ of the spec. "A nonce is a random string, uniquely generated by the client to allow the server to verify that a request has never been made before and helps prevent replay attacks when requests are made over a non-secure channel. The nonce value MUST be unique across all requests with the same timestamp, client credentials, and token combinations." .. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
Validates that the nonce has not been used before.
[ "Validates", "that", "the", "nonce", "has", "not", "been", "used", "before", "." ]
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce, request_token=None, access_token=None): """Validates that the nonce has not been used before. Per `Section 3.3`_ of the spec. "A nonce is a random string, uniquely generated by the client to allow the server to verify that a request has never been made before and helps prevent replay attacks when requests are made over a non-secure channel. The nonce value MUST be unique across all requests with the same timestamp, client credentials, and token combinations." .. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3 """ raise NotImplementedError("Subclasses must implement this function.")
[ "def", "validate_timestamp_and_nonce", "(", "self", ",", "client_key", ",", "timestamp", ",", "nonce", ",", "request_token", "=", "None", ",", "access_token", "=", "None", ")", ":", "raise", "NotImplementedError", "(", "\"Subclasses must implement this function.\"", ")" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Toggl-Time-Tracking/alp/request/requests/packages/oauthlib/oauth1/rfc5849/__init__.py#L548-L563
darksigma/Fundamentals-of-Deep-Learning-Book
fa75f5186834b59de1950838b18803fdcef93101
archive/seq2seq/tmp_seq2seq_model.py
python
Seq2SeqModel.__init__
(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32)
Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. size: number of units in each layer of the model. num_layers: number of layers in the model. max_gradient_norm: gradients will be clipped to maximally this norm. batch_size: the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate: learning rate to start with. learning_rate_decay_factor: decay learning rate by this much when needed. use_lstm: if true, we use LSTM cells instead of GRU cells. num_samples: number of samples for sampled softmax. forward_only: if set, we do not construct the backward pass in the model.
Create the model.
[ "Create", "the", "model", "." ]
def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32): """Create the model. Args: source_vocab_size: size of the source vocabulary. target_vocab_size: size of the target vocabulary. buckets: a list of pairs (I, O), where I specifies maximum input length that will be processed in that bucket, and O specifies maximum output length. Training instances that have inputs longer than I or outputs longer than O will be pushed to the next bucket and padded accordingly. We assume that the list is sorted, e.g., [(2, 4), (8, 16)]. size: number of units in each layer of the model. num_layers: number of layers in the model. max_gradient_norm: gradients will be clipped to maximally this norm. batch_size: the size of the batches used during training; the model construction is independent of batch_size, so it can be changed after initialization if this is convenient, e.g., for decoding. learning_rate: learning rate to start with. learning_rate_decay_factor: decay learning rate by this much when needed. use_lstm: if true, we use LSTM cells instead of GRU cells. num_samples: number of samples for sampled softmax. forward_only: if set, we do not construct the backward pass in the model. """ self.source_vocab_size = source_vocab_size self.target_vocab_size = target_vocab_size self.buckets = buckets self.batch_size = batch_size self.learning_rate = tf.Variable(float(learning_rate), trainable=False) self.learning_rate_decay_op = self.learning_rate.assign( self.learning_rate * learning_rate_decay_factor) self.global_step = tf.Variable(0, trainable=False) # If we use sampled softmax, we need an output projection. output_projection = None softmax_loss_function = None # Sampled softmax only makes sense if we sample less than vocabulary size. if num_samples > 0 and num_samples < self.target_vocab_size: with tf.device("/cpu:0"): w = tf.get_variable("proj_w", [size, self.target_vocab_size]) w_t = tf.transpose(w) b = tf.get_variable("proj_b", [self.target_vocab_size]) output_projection = (w, b) def sampled_loss(inputs, labels): with tf.device("/cpu:0"): labels = tf.reshape(labels, [-1, 1]) return tf.nn.sampled_softmax_loss(w_t, b, inputs, labels, num_samples, self.target_vocab_size) softmax_loss_function = sampled_loss # Create the internal multi-layer cell for our RNN. single_cell = tf.nn.rnn_cell.GRUCell(size) if use_lstm: single_cell = tf.nn.rnn_cell.BasicLSTMCell(size) cell = single_cell if num_layers > 1: cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * num_layers) # The seq2seq function: we use embedding for the input and attention. def seq2seq_f(encoder_inputs, decoder_inputs, do_decode): return seq2seq.embedding_attention_seq2seq( encoder_inputs, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=size, output_projection=output_projection, feed_previous=do_decode) # Feeds for inputs. self.encoder_inputs = [] self.decoder_inputs = [] self.target_weights = [] for i in xrange(buckets[-1][0]): # Last bucket is the biggest one. self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="encoder{0}".format(i))) for i in xrange(buckets[-1][1] + 1): self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name="decoder{0}".format(i))) self.target_weights.append(tf.placeholder(tf.float32, shape=[None], name="weight{0}".format(i))) # Our targets are decoder inputs shifted by one. targets = [self.decoder_inputs[i + 1] for i in xrange(len(self.decoder_inputs) - 1)] # Training outputs and losses. if forward_only: self.outputs, self.losses = seq2seq.model_with_buckets( self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), softmax_loss_function=softmax_loss_function) # If we use output projection, we need to project outputs for decoding. if output_projection is not None: for b in xrange(len(buckets)): self.outputs[b] = [ tf.matmul(output, output_projection[0]) + output_projection[1] for output in self.outputs[b] ] else: self.outputs, self.losses = seq2seq.model_with_buckets( self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, False), softmax_loss_function=softmax_loss_function) # Gradients and SGD update operation for training the model. params = tf.trainable_variables() if not forward_only: self.gradient_norms = [] self.updates = [] opt = tf.train.GradientDescentOptimizer(self.learning_rate) for b in xrange(len(buckets)): gradients = tf.gradients(self.losses[b], params) clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm) self.gradient_norms.append(norm) self.updates.append(opt.apply_gradients( zip(clipped_gradients, params), global_step=self.global_step)) self.saver = tf.train.Saver(tf.all_variables())
[ "def", "__init__", "(", "self", ",", "source_vocab_size", ",", "target_vocab_size", ",", "buckets", ",", "size", ",", "num_layers", ",", "max_gradient_norm", ",", "batch_size", ",", "learning_rate", ",", "learning_rate_decay_factor", ",", "use_lstm", "=", "False", ",", "num_samples", "=", "512", ",", "forward_only", "=", "False", ",", "dtype", "=", "tf", ".", "float32", ")", ":", "self", ".", "source_vocab_size", "=", "source_vocab_size", "self", ".", "target_vocab_size", "=", "target_vocab_size", "self", ".", "buckets", "=", "buckets", "self", ".", "batch_size", "=", "batch_size", "self", ".", "learning_rate", "=", "tf", ".", "Variable", "(", "float", "(", "learning_rate", ")", ",", "trainable", "=", "False", ")", "self", ".", "learning_rate_decay_op", "=", "self", ".", "learning_rate", ".", "assign", "(", "self", ".", "learning_rate", "*", "learning_rate_decay_factor", ")", "self", ".", "global_step", "=", "tf", ".", "Variable", "(", "0", ",", "trainable", "=", "False", ")", "# If we use sampled softmax, we need an output projection.", "output_projection", "=", "None", "softmax_loss_function", "=", "None", "# Sampled softmax only makes sense if we sample less than vocabulary size.", "if", "num_samples", ">", "0", "and", "num_samples", "<", "self", ".", "target_vocab_size", ":", "with", "tf", ".", "device", "(", "\"/cpu:0\"", ")", ":", "w", "=", "tf", ".", "get_variable", "(", "\"proj_w\"", ",", "[", "size", ",", "self", ".", "target_vocab_size", "]", ")", "w_t", "=", "tf", ".", "transpose", "(", "w", ")", "b", "=", "tf", ".", "get_variable", "(", "\"proj_b\"", ",", "[", "self", ".", "target_vocab_size", "]", ")", "output_projection", "=", "(", "w", ",", "b", ")", "def", "sampled_loss", "(", "inputs", ",", "labels", ")", ":", "with", "tf", ".", "device", "(", "\"/cpu:0\"", ")", ":", "labels", "=", "tf", ".", "reshape", "(", "labels", ",", "[", "-", "1", ",", "1", "]", ")", "return", "tf", ".", "nn", ".", "sampled_softmax_loss", "(", "w_t", ",", "b", ",", "inputs", ",", "labels", ",", "num_samples", ",", "self", ".", "target_vocab_size", ")", "softmax_loss_function", "=", "sampled_loss", "# Create the internal multi-layer cell for our RNN.", "single_cell", "=", "tf", ".", "nn", ".", "rnn_cell", ".", "GRUCell", "(", "size", ")", "if", "use_lstm", ":", "single_cell", "=", "tf", ".", "nn", ".", "rnn_cell", ".", "BasicLSTMCell", "(", "size", ")", "cell", "=", "single_cell", "if", "num_layers", ">", "1", ":", "cell", "=", "tf", ".", "nn", ".", "rnn_cell", ".", "MultiRNNCell", "(", "[", "single_cell", "]", "*", "num_layers", ")", "# The seq2seq function: we use embedding for the input and attention.", "def", "seq2seq_f", "(", "encoder_inputs", ",", "decoder_inputs", ",", "do_decode", ")", ":", "return", "seq2seq", ".", "embedding_attention_seq2seq", "(", "encoder_inputs", ",", "decoder_inputs", ",", "cell", ",", "num_encoder_symbols", "=", "source_vocab_size", ",", "num_decoder_symbols", "=", "target_vocab_size", ",", "embedding_size", "=", "size", ",", "output_projection", "=", "output_projection", ",", "feed_previous", "=", "do_decode", ")", "# Feeds for inputs.", "self", ".", "encoder_inputs", "=", "[", "]", "self", ".", "decoder_inputs", "=", "[", "]", "self", ".", "target_weights", "=", "[", "]", "for", "i", "in", "xrange", "(", "buckets", "[", "-", "1", "]", "[", "0", "]", ")", ":", "# Last bucket is the biggest one.", "self", ".", "encoder_inputs", ".", "append", "(", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "shape", "=", "[", "None", "]", ",", "name", "=", "\"encoder{0}\"", ".", "format", "(", "i", ")", ")", ")", "for", "i", "in", "xrange", "(", "buckets", "[", "-", "1", "]", "[", "1", "]", "+", "1", ")", ":", "self", ".", "decoder_inputs", ".", "append", "(", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "shape", "=", "[", "None", "]", ",", "name", "=", "\"decoder{0}\"", ".", "format", "(", "i", ")", ")", ")", "self", ".", "target_weights", ".", "append", "(", "tf", ".", "placeholder", "(", "tf", ".", "float32", ",", "shape", "=", "[", "None", "]", ",", "name", "=", "\"weight{0}\"", ".", "format", "(", "i", ")", ")", ")", "# Our targets are decoder inputs shifted by one.", "targets", "=", "[", "self", ".", "decoder_inputs", "[", "i", "+", "1", "]", "for", "i", "in", "xrange", "(", "len", "(", "self", ".", "decoder_inputs", ")", "-", "1", ")", "]", "# Training outputs and losses.", "if", "forward_only", ":", "self", ".", "outputs", ",", "self", ".", "losses", "=", "seq2seq", ".", "model_with_buckets", "(", "self", ".", "encoder_inputs", ",", "self", ".", "decoder_inputs", ",", "targets", ",", "self", ".", "target_weights", ",", "buckets", ",", "lambda", "x", ",", "y", ":", "seq2seq_f", "(", "x", ",", "y", ",", "True", ")", ",", "softmax_loss_function", "=", "softmax_loss_function", ")", "# If we use output projection, we need to project outputs for decoding.", "if", "output_projection", "is", "not", "None", ":", "for", "b", "in", "xrange", "(", "len", "(", "buckets", ")", ")", ":", "self", ".", "outputs", "[", "b", "]", "=", "[", "tf", ".", "matmul", "(", "output", ",", "output_projection", "[", "0", "]", ")", "+", "output_projection", "[", "1", "]", "for", "output", "in", "self", ".", "outputs", "[", "b", "]", "]", "else", ":", "self", ".", "outputs", ",", "self", ".", "losses", "=", "seq2seq", ".", "model_with_buckets", "(", "self", ".", "encoder_inputs", ",", "self", ".", "decoder_inputs", ",", "targets", ",", "self", ".", "target_weights", ",", "buckets", ",", "lambda", "x", ",", "y", ":", "seq2seq_f", "(", "x", ",", "y", ",", "False", ")", ",", "softmax_loss_function", "=", "softmax_loss_function", ")", "# Gradients and SGD update operation for training the model.", "params", "=", "tf", ".", "trainable_variables", "(", ")", "if", "not", "forward_only", ":", "self", ".", "gradient_norms", "=", "[", "]", "self", ".", "updates", "=", "[", "]", "opt", "=", "tf", ".", "train", ".", "GradientDescentOptimizer", "(", "self", ".", "learning_rate", ")", "for", "b", "in", "xrange", "(", "len", "(", "buckets", ")", ")", ":", "gradients", "=", "tf", ".", "gradients", "(", "self", ".", "losses", "[", "b", "]", ",", "params", ")", "clipped_gradients", ",", "norm", "=", "tf", ".", "clip_by_global_norm", "(", "gradients", ",", "max_gradient_norm", ")", "self", ".", "gradient_norms", ".", "append", "(", "norm", ")", "self", ".", "updates", ".", "append", "(", "opt", ".", "apply_gradients", "(", "zip", "(", "clipped_gradients", ",", "params", ")", ",", "global_step", "=", "self", ".", "global_step", ")", ")", "self", ".", "saver", "=", "tf", ".", "train", ".", "Saver", "(", "tf", ".", "all_variables", "(", ")", ")" ]
https://github.com/darksigma/Fundamentals-of-Deep-Learning-Book/blob/fa75f5186834b59de1950838b18803fdcef93101/archive/seq2seq/tmp_seq2seq_model.py#L47-L169
gramps-project/gramps
04d4651a43eb210192f40a9f8c2bad8ee8fa3753
gramps/gui/widgets/undoablebuffer.py
python
UndoableBuffer.redo
(self)
redo inserts or deletions redone actions are moved to undo stack
redo inserts or deletions
[ "redo", "inserts", "or", "deletions" ]
def redo(self): """redo inserts or deletions redone actions are moved to undo stack""" if not self.redo_stack: return self.begin_not_undoable_action() self.undo_in_progress = True redo_action = self.redo_stack.pop() self.undo_stack.append(redo_action) if isinstance(redo_action, self.insertclass): self._redo_insert(redo_action) elif isinstance(redo_action, self.deleteclass): self._redo_delete(redo_action) else: self._handle_redo(redo_action) self.end_not_undoable_action() self.undo_in_progress = False
[ "def", "redo", "(", "self", ")", ":", "if", "not", "self", ".", "redo_stack", ":", "return", "self", ".", "begin_not_undoable_action", "(", ")", "self", ".", "undo_in_progress", "=", "True", "redo_action", "=", "self", ".", "redo_stack", ".", "pop", "(", ")", "self", ".", "undo_stack", ".", "append", "(", "redo_action", ")", "if", "isinstance", "(", "redo_action", ",", "self", ".", "insertclass", ")", ":", "self", ".", "_redo_insert", "(", "redo_action", ")", "elif", "isinstance", "(", "redo_action", ",", "self", ".", "deleteclass", ")", ":", "self", ".", "_redo_delete", "(", "redo_action", ")", "else", ":", "self", ".", "_handle_redo", "(", "redo_action", ")", "self", ".", "end_not_undoable_action", "(", ")", "self", ".", "undo_in_progress", "=", "False" ]
https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gui/widgets/undoablebuffer.py#L272-L289
DSE-MSU/DeepRobust
2bcde200a5969dae32cddece66206a52c87c43e8
deeprobust/image/defense/AWP.py
python
pgd_AWP.__init__
(self, model, proxy, proxy_optim, gamma)
[]
def __init__(self, model, proxy, proxy_optim, gamma): super(pgd_AWP, self).__init__() self.model = model self.proxy = proxy self.proxy_optim = proxy_optim self.gamma = gamma
[ "def", "__init__", "(", "self", ",", "model", ",", "proxy", ",", "proxy_optim", ",", "gamma", ")", ":", "super", "(", "pgd_AWP", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "model", "=", "model", "self", ".", "proxy", "=", "proxy", "self", ".", "proxy_optim", "=", "proxy_optim", "self", ".", "gamma", "=", "gamma" ]
https://github.com/DSE-MSU/DeepRobust/blob/2bcde200a5969dae32cddece66206a52c87c43e8/deeprobust/image/defense/AWP.py#L45-L50
microsoft/nni
31f11f51249660930824e888af0d4e022823285c
nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py
python
MovementPruner._wrap_modules
(self, layer: LayerInfo, config: Dict)
return wrapper
Create a wrapper module to replace the original one. Different from the parent function, use `PrunerScoredModuleWrapper` instead of `PrunerModuleWrapper`. Parameters ---------- layer The layer to instrument the mask. config The configuration for generating the mask.
Create a wrapper module to replace the original one. Different from the parent function, use `PrunerScoredModuleWrapper` instead of `PrunerModuleWrapper`.
[ "Create", "a", "wrapper", "module", "to", "replace", "the", "original", "one", ".", "Different", "from", "the", "parent", "function", "use", "PrunerScoredModuleWrapper", "instead", "of", "PrunerModuleWrapper", "." ]
def _wrap_modules(self, layer: LayerInfo, config: Dict): """ Create a wrapper module to replace the original one. Different from the parent function, use `PrunerScoredModuleWrapper` instead of `PrunerModuleWrapper`. Parameters ---------- layer The layer to instrument the mask. config The configuration for generating the mask. """ _logger.debug("Module detected to compress : %s.", layer.name) wrapper = PrunerScoredModuleWrapper(layer.module, layer.name, config, self) assert hasattr(layer.module, 'weight'), "module %s does not have 'weight' attribute" % layer.name # move newly registered buffers to the same device of weight wrapper.to(layer.module.weight.device) return wrapper
[ "def", "_wrap_modules", "(", "self", ",", "layer", ":", "LayerInfo", ",", "config", ":", "Dict", ")", ":", "_logger", ".", "debug", "(", "\"Module detected to compress : %s.\"", ",", "layer", ".", "name", ")", "wrapper", "=", "PrunerScoredModuleWrapper", "(", "layer", ".", "module", ",", "layer", ".", "name", ",", "config", ",", "self", ")", "assert", "hasattr", "(", "layer", ".", "module", ",", "'weight'", ")", ",", "\"module %s does not have 'weight' attribute\"", "%", "layer", ".", "name", "# move newly registered buffers to the same device of weight", "wrapper", ".", "to", "(", "layer", ".", "module", ".", "weight", ".", "device", ")", "return", "wrapper" ]
https://github.com/microsoft/nni/blob/31f11f51249660930824e888af0d4e022823285c/nni/algorithms/compression/v2/pytorch/pruning/movement_pruner.py#L254-L271
plaid/plaid-python
8c60fca608e426f3ff30da8857775946d29e122c
plaid/model/account_base.py
python
AccountBase.__init__
(self, account_id, balances, mask, name, official_name, type, subtype, *args, **kwargs)
AccountBase - a model defined in OpenAPI Args: account_id (str): Plaid’s unique identifier for the account. This value will not change unless Plaid can't reconcile the account with the data returned by the financial institution. This may occur, for example, when the name of the account changes. If this happens a new `account_id` will be assigned to the account. The `account_id` can also change if the `access_token` is deleted and the same credentials that were used to generate that `access_token` are used to generate a new `access_token` on a later date. In that case, the new `account_id` will be different from the old `account_id`. If an account with a specific `account_id` disappears instead of changing, the account is likely closed. Closed accounts are not returned by the Plaid API. Like all Plaid identifiers, the `account_id` is case sensitive. balances (AccountBalance): mask (str, none_type): The last 2-4 alphanumeric characters of an account's official account number. Note that the mask may be non-unique between an Item's accounts, and it may also not match the mask that the bank displays to the user. name (str): The name of the account, either assigned by the user or by the financial institution itself official_name (str, none_type): The official name of the account as given by the financial institution type (AccountType): subtype (AccountSubtype): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) verification_status (str): The current verification status of an Auth Item initiated through Automated or Manual micro-deposits. Returned for Auth Items only. `pending_automatic_verification`: The Item is pending automatic verification `pending_manual_verification`: The Item is pending manual micro-deposit verification. Items remain in this state until the user successfully verifies the two amounts. `automatically_verified`: The Item has successfully been automatically verified `manually_verified`: The Item has successfully been manually verified `verification_expired`: Plaid was unable to automatically verify the deposit within 7 calendar days and will no longer attempt to validate the Item. Users may retry by submitting their information again through Link. `verification_failed`: The Item failed manual micro-deposit verification because the user exhausted all 3 verification attempts. Users may retry by submitting their information again through Link. . [optional] # noqa: E501
AccountBase - a model defined in OpenAPI
[ "AccountBase", "-", "a", "model", "defined", "in", "OpenAPI" ]
def __init__(self, account_id, balances, mask, name, official_name, type, subtype, *args, **kwargs): # noqa: E501 """AccountBase - a model defined in OpenAPI Args: account_id (str): Plaid’s unique identifier for the account. This value will not change unless Plaid can't reconcile the account with the data returned by the financial institution. This may occur, for example, when the name of the account changes. If this happens a new `account_id` will be assigned to the account. The `account_id` can also change if the `access_token` is deleted and the same credentials that were used to generate that `access_token` are used to generate a new `access_token` on a later date. In that case, the new `account_id` will be different from the old `account_id`. If an account with a specific `account_id` disappears instead of changing, the account is likely closed. Closed accounts are not returned by the Plaid API. Like all Plaid identifiers, the `account_id` is case sensitive. balances (AccountBalance): mask (str, none_type): The last 2-4 alphanumeric characters of an account's official account number. Note that the mask may be non-unique between an Item's accounts, and it may also not match the mask that the bank displays to the user. name (str): The name of the account, either assigned by the user or by the financial institution itself official_name (str, none_type): The official name of the account as given by the financial institution type (AccountType): subtype (AccountSubtype): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) verification_status (str): The current verification status of an Auth Item initiated through Automated or Manual micro-deposits. Returned for Auth Items only. `pending_automatic_verification`: The Item is pending automatic verification `pending_manual_verification`: The Item is pending manual micro-deposit verification. Items remain in this state until the user successfully verifies the two amounts. `automatically_verified`: The Item has successfully been automatically verified `manually_verified`: The Item has successfully been manually verified `verification_expired`: Plaid was unable to automatically verify the deposit within 7 calendar days and will no longer attempt to validate the Item. Users may retry by submitting their information again through Link. `verification_failed`: The Item failed manual micro-deposit verification because the user exhausted all 3 verification attempts. Users may retry by submitting their information again through Link. . [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.account_id = account_id self.balances = balances self.mask = mask self.name = name self.official_name = official_name self.type = type self.subtype = subtype for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value)
[ "def", "__init__", "(", "self", ",", "account_id", ",", "balances", ",", "mask", ",", "name", ",", "official_name", ",", "type", ",", "subtype", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "_check_type", "=", "kwargs", ".", "pop", "(", "'_check_type'", ",", "True", ")", "_spec_property_naming", "=", "kwargs", ".", "pop", "(", "'_spec_property_naming'", ",", "False", ")", "_path_to_item", "=", "kwargs", ".", "pop", "(", "'_path_to_item'", ",", "(", ")", ")", "_configuration", "=", "kwargs", ".", "pop", "(", "'_configuration'", ",", "None", ")", "_visited_composed_classes", "=", "kwargs", ".", "pop", "(", "'_visited_composed_classes'", ",", "(", ")", ")", "if", "args", ":", "raise", "ApiTypeError", "(", "\"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"", "%", "(", "args", ",", "self", ".", "__class__", ".", "__name__", ",", ")", ",", "path_to_item", "=", "_path_to_item", ",", "valid_classes", "=", "(", "self", ".", "__class__", ",", ")", ",", ")", "self", ".", "_data_store", "=", "{", "}", "self", ".", "_check_type", "=", "_check_type", "self", ".", "_spec_property_naming", "=", "_spec_property_naming", "self", ".", "_path_to_item", "=", "_path_to_item", "self", ".", "_configuration", "=", "_configuration", "self", ".", "_visited_composed_classes", "=", "_visited_composed_classes", "+", "(", "self", ".", "__class__", ",", ")", "self", ".", "account_id", "=", "account_id", "self", ".", "balances", "=", "balances", "self", ".", "mask", "=", "mask", "self", ".", "name", "=", "name", "self", ".", "official_name", "=", "official_name", "self", ".", "type", "=", "type", "self", ".", "subtype", "=", "subtype", "for", "var_name", ",", "var_value", "in", "kwargs", ".", "items", "(", ")", ":", "if", "var_name", "not", "in", "self", ".", "attribute_map", "and", "self", ".", "_configuration", "is", "not", "None", "and", "self", ".", "_configuration", ".", "discard_unknown_keys", "and", "self", ".", "additional_properties_type", "is", "None", ":", "# discard variable.", "continue", "setattr", "(", "self", ",", "var_name", ",", "var_value", ")" ]
https://github.com/plaid/plaid-python/blob/8c60fca608e426f3ff30da8857775946d29e122c/plaid/model/account_base.py#L135-L218
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/odict/odict.py
python
Keys.__setitem__
(self, index, name)
You cannot assign to keys, but you can do slice assignment to re-order them. You can only do slice assignment if the new set of keys is a reordering of the original set.
You cannot assign to keys, but you can do slice assignment to re-order them.
[ "You", "cannot", "assign", "to", "keys", "but", "you", "can", "do", "slice", "assignment", "to", "re", "-", "order", "them", "." ]
def __setitem__(self, index, name): """ You cannot assign to keys, but you can do slice assignment to re-order them. You can only do slice assignment if the new set of keys is a reordering of the original set. """ if isinstance(index, types.SliceType): # FIXME: efficiency? # check length is the same indexes = range(len(self._main._sequence))[index] if len(indexes) != len(name): raise ValueError('attempt to assign sequence of size %s ' 'to slice of size %s' % (len(name), len(indexes))) # check they are the same keys # FIXME: Use set old_keys = self._main._sequence[index] new_keys = list(name) old_keys.sort() new_keys.sort() if old_keys != new_keys: raise KeyError('Keylist is not the same as current keylist.') orig_vals = [self._main[k] for k in name] del self._main[index] vals = zip(indexes, name, orig_vals) vals.sort() for i, k, v in vals: if self._main.strict and k in self._main: raise ValueError('slice assignment must be from ' 'unique keys') self._main.insert(i, k, v) else: raise ValueError('Cannot assign to keys')
[ "def", "__setitem__", "(", "self", ",", "index", ",", "name", ")", ":", "if", "isinstance", "(", "index", ",", "types", ".", "SliceType", ")", ":", "# FIXME: efficiency?", "# check length is the same", "indexes", "=", "range", "(", "len", "(", "self", ".", "_main", ".", "_sequence", ")", ")", "[", "index", "]", "if", "len", "(", "indexes", ")", "!=", "len", "(", "name", ")", ":", "raise", "ValueError", "(", "'attempt to assign sequence of size %s '", "'to slice of size %s'", "%", "(", "len", "(", "name", ")", ",", "len", "(", "indexes", ")", ")", ")", "# check they are the same keys", "# FIXME: Use set", "old_keys", "=", "self", ".", "_main", ".", "_sequence", "[", "index", "]", "new_keys", "=", "list", "(", "name", ")", "old_keys", ".", "sort", "(", ")", "new_keys", ".", "sort", "(", ")", "if", "old_keys", "!=", "new_keys", ":", "raise", "KeyError", "(", "'Keylist is not the same as current keylist.'", ")", "orig_vals", "=", "[", "self", ".", "_main", "[", "k", "]", "for", "k", "in", "name", "]", "del", "self", ".", "_main", "[", "index", "]", "vals", "=", "zip", "(", "indexes", ",", "name", ",", "orig_vals", ")", "vals", ".", "sort", "(", ")", "for", "i", ",", "k", ",", "v", "in", "vals", ":", "if", "self", ".", "_main", ".", "strict", "and", "k", "in", "self", ".", "_main", ":", "raise", "ValueError", "(", "'slice assignment must be from '", "'unique keys'", ")", "self", ".", "_main", ".", "insert", "(", "i", ",", "k", ",", "v", ")", "else", ":", "raise", "ValueError", "(", "'Cannot assign to keys'", ")" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit - MAC OSX/tools/sqli/thirdparty/odict/odict.py#L898-L931
buke/GreenOdoo
3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df
runtime/python/lib/python2.7/site-packages/gdata/apps/multidomain/data.py
python
AliasEntry.SetUserEmail
(self, value)
Set the user email address of this Alias object. Args: value: string The new user email address to give this object.
Set the user email address of this Alias object.
[ "Set", "the", "user", "email", "address", "of", "this", "Alias", "object", "." ]
def SetUserEmail(self, value): """Set the user email address of this Alias object. Args: value: string The new user email address to give this object. """ self._SetProperty(USER_EMAIL, value)
[ "def", "SetUserEmail", "(", "self", ",", "value", ")", ":", "self", ".", "_SetProperty", "(", "USER_EMAIL", ",", "value", ")" ]
https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/gdata/apps/multidomain/data.py#L366-L372
a1600012888/YOPO-You-Only-Propagate-Once
b8ae668be829a0ca50647ae21676a932a9634365
lib/base_model/network.py
python
PreActBlock.forward
(self, x)
return out
[]
def forward(self, x): out = F.relu(self.bn1(x)) shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out += shortcut return out
[ "def", "forward", "(", "self", ",", "x", ")", ":", "out", "=", "F", ".", "relu", "(", "self", ".", "bn1", "(", "x", ")", ")", "shortcut", "=", "self", ".", "shortcut", "(", "out", ")", "if", "hasattr", "(", "self", ",", "'shortcut'", ")", "else", "x", "out", "=", "self", ".", "conv1", "(", "out", ")", "out", "=", "self", ".", "conv2", "(", "F", ".", "relu", "(", "self", ".", "bn2", "(", "out", ")", ")", ")", "out", "+=", "shortcut", "return", "out" ]
https://github.com/a1600012888/YOPO-You-Only-Propagate-Once/blob/b8ae668be829a0ca50647ae21676a932a9634365/lib/base_model/network.py#L28-L34
vt-vl-lab/DF-Net
53f4e016b881d55624042f755235eb8d7d248209
core/UnFlow/src/e2eflow/util.py
python
config_dict
(config_path=CONFIG_PATH)
return d
Returns the config as dictionary, where the elements have intuitively correct types.
Returns the config as dictionary, where the elements have intuitively correct types.
[ "Returns", "the", "config", "as", "dictionary", "where", "the", "elements", "have", "intuitively", "correct", "types", "." ]
def config_dict(config_path=CONFIG_PATH): """Returns the config as dictionary, where the elements have intuitively correct types. """ config = configparser.ConfigParser() config.read(config_path) d = dict() for section_key in config.sections(): sd = dict() section = config[section_key] for key in section: val = section[key] try: sd[key] = int(val) except ValueError: try: sd[key] = float(val) except ValueError: try: sd[key] = section.getboolean(key) except ValueError: sd[key] = val d[section_key] = sd return d
[ "def", "config_dict", "(", "config_path", "=", "CONFIG_PATH", ")", ":", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "config_path", ")", "d", "=", "dict", "(", ")", "for", "section_key", "in", "config", ".", "sections", "(", ")", ":", "sd", "=", "dict", "(", ")", "section", "=", "config", "[", "section_key", "]", "for", "key", "in", "section", ":", "val", "=", "section", "[", "key", "]", "try", ":", "sd", "[", "key", "]", "=", "int", "(", "val", ")", "except", "ValueError", ":", "try", ":", "sd", "[", "key", "]", "=", "float", "(", "val", ")", "except", "ValueError", ":", "try", ":", "sd", "[", "key", "]", "=", "section", ".", "getboolean", "(", "key", ")", "except", "ValueError", ":", "sd", "[", "key", "]", "=", "val", "d", "[", "section_key", "]", "=", "sd", "return", "d" ]
https://github.com/vt-vl-lab/DF-Net/blob/53f4e016b881d55624042f755235eb8d7d248209/core/UnFlow/src/e2eflow/util.py#L37-L62
beeware/ouroboros
a29123c6fab6a807caffbb7587cf548e0c370296
ouroboros/multiprocessing/pool.py
python
Pool._join_exited_workers
(self)
return cleaned
Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up.
Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up.
[ "Cleanup", "after", "any", "worker", "processes", "which", "have", "exited", "due", "to", "reaching", "their", "specified", "lifetime", ".", "Returns", "True", "if", "any", "workers", "were", "cleaned", "up", "." ]
def _join_exited_workers(self): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(self._pool))): worker = self._pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] return cleaned
[ "def", "_join_exited_workers", "(", "self", ")", ":", "cleaned", "=", "False", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "self", ".", "_pool", ")", ")", ")", ":", "worker", "=", "self", ".", "_pool", "[", "i", "]", "if", "worker", ".", "exitcode", "is", "not", "None", ":", "# worker exited", "util", ".", "debug", "(", "'cleaning up worker %d'", "%", "i", ")", "worker", ".", "join", "(", ")", "cleaned", "=", "True", "del", "self", ".", "_pool", "[", "i", "]", "return", "cleaned" ]
https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/multiprocessing/pool.py#L204-L217
isnowfy/pydown
71ecc891868cd2a34b7e5fe662c99474f2d0fd7f
markdown/extensions/abbr.py
python
AbbrExtension.extendMarkdown
(self, md, md_globals)
Insert AbbrPreprocessor before ReferencePreprocessor.
Insert AbbrPreprocessor before ReferencePreprocessor.
[ "Insert", "AbbrPreprocessor", "before", "ReferencePreprocessor", "." ]
def extendMarkdown(self, md, md_globals): """ Insert AbbrPreprocessor before ReferencePreprocessor. """ md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
[ "def", "extendMarkdown", "(", "self", ",", "md", ",", "md_globals", ")", ":", "md", ".", "preprocessors", ".", "add", "(", "'abbr'", ",", "AbbrPreprocessor", "(", "md", ")", ",", "'<reference'", ")" ]
https://github.com/isnowfy/pydown/blob/71ecc891868cd2a34b7e5fe662c99474f2d0fd7f/markdown/extensions/abbr.py#L36-L38
michael-lazar/rtv
b3d5bf16a70dba685e05db35308cc8a6d2b7f7aa
rtv/packages/praw/__init__.py
python
AuthenticatedReddit.is_logged_in
(self)
return self._authentication is True
Return True when the session is authenticated via username/password. Username and passwords are provided via :meth:`~praw.__init__.AuthenticatedReddit.login`.
Return True when the session is authenticated via username/password.
[ "Return", "True", "when", "the", "session", "is", "authenticated", "via", "username", "/", "password", "." ]
def is_logged_in(self): """Return True when the session is authenticated via username/password. Username and passwords are provided via :meth:`~praw.__init__.AuthenticatedReddit.login`. """ return self._authentication is True
[ "def", "is_logged_in", "(", "self", ")", ":", "return", "self", ".", "_authentication", "is", "True" ]
https://github.com/michael-lazar/rtv/blob/b3d5bf16a70dba685e05db35308cc8a6d2b7f7aa/rtv/packages/praw/__init__.py#L1455-L1462
kaaedit/kaa
e6a8819a5ecba04b7db8303bd5736b5a7c9b822d
kaa/filetype/python/pythonmode.py
python
PythonMode.init_themes
(self)
[]
def init_themes(self): super().init_themes() self.themes.append(PythonThemes)
[ "def", "init_themes", "(", "self", ")", ":", "super", "(", ")", ".", "init_themes", "(", ")", "self", ".", "themes", ".", "append", "(", "PythonThemes", ")" ]
https://github.com/kaaedit/kaa/blob/e6a8819a5ecba04b7db8303bd5736b5a7c9b822d/kaa/filetype/python/pythonmode.py#L96-L98
JiYou/openstack
8607dd488bde0905044b303eb6e52bdea6806923
packages/source/nova/nova/db/sqlalchemy/api.py
python
instance_info_cache_delete
(context, instance_uuid)
Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object
Deletes an existing instance_info_cache record
[ "Deletes", "an", "existing", "instance_info_cache", "record" ]
def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ soft_delete()
[ "def", "instance_info_cache_delete", "(", "context", ",", "instance_uuid", ")", ":", "model_query", "(", "context", ",", "models", ".", "InstanceInfoCache", ")", ".", "filter_by", "(", "instance_uuid", "=", "instance_uuid", ")", ".", "soft_delete", "(", ")" ]
https://github.com/JiYou/openstack/blob/8607dd488bde0905044b303eb6e52bdea6806923/packages/source/nova/nova/db/sqlalchemy/api.py#L2027-L2035
ifwe/digsby
f5fe00244744aa131e07f09348d10563f3d8fa99
digsby/lib/pyxmpp/clientstream.py
python
ClientStream.do_bind
(self,stanza)
Do the resource binding requested by a client connected. [server only] :Parameters: - `stanza`: resource binding request stanza. :Types: - `stanza`: `pyxmpp.Iq`
Do the resource binding requested by a client connected.
[ "Do", "the", "resource", "binding", "requested", "by", "a", "client", "connected", "." ]
def do_bind(self,stanza): """Do the resource binding requested by a client connected. [server only] :Parameters: - `stanza`: resource binding request stanza. :Types: - `stanza`: `pyxmpp.Iq`""" fr=stanza.get_from() if fr and fr!=self.peer: r=stanza.make_error_response("forbidden") self.send(r) r.free() return resource_n=stanza.xpath_eval("bind:bind/bind:resource",{"bind":BIND_NS}) if resource_n: resource=resource_n[0].getContent() else: resource="auto" if not resource: r=stanza.make_error_response("bad-request") else: self.unset_iq_set_handler("bind",BIND_NS) r=stanza.make_result_response() self.peer.set_resource(resource) q=r.new_query(BIND_NS,"bind") q.newTextChild(None,"jid",to_utf8(self.peer.as_unicode())) self.state_change("authorized",self.peer) r.set_to(None) self.send(r) r.free()
[ "def", "do_bind", "(", "self", ",", "stanza", ")", ":", "fr", "=", "stanza", ".", "get_from", "(", ")", "if", "fr", "and", "fr", "!=", "self", ".", "peer", ":", "r", "=", "stanza", ".", "make_error_response", "(", "\"forbidden\"", ")", "self", ".", "send", "(", "r", ")", "r", ".", "free", "(", ")", "return", "resource_n", "=", "stanza", ".", "xpath_eval", "(", "\"bind:bind/bind:resource\"", ",", "{", "\"bind\"", ":", "BIND_NS", "}", ")", "if", "resource_n", ":", "resource", "=", "resource_n", "[", "0", "]", ".", "getContent", "(", ")", "else", ":", "resource", "=", "\"auto\"", "if", "not", "resource", ":", "r", "=", "stanza", ".", "make_error_response", "(", "\"bad-request\"", ")", "else", ":", "self", ".", "unset_iq_set_handler", "(", "\"bind\"", ",", "BIND_NS", ")", "r", "=", "stanza", ".", "make_result_response", "(", ")", "self", ".", "peer", ".", "set_resource", "(", "resource", ")", "q", "=", "r", ".", "new_query", "(", "BIND_NS", ",", "\"bind\"", ")", "q", ".", "newTextChild", "(", "None", ",", "\"jid\"", ",", "to_utf8", "(", "self", ".", "peer", ".", "as_unicode", "(", ")", ")", ")", "self", ".", "state_change", "(", "\"authorized\"", ",", "self", ".", "peer", ")", "r", ".", "set_to", "(", "None", ")", "self", ".", "send", "(", "r", ")", "r", ".", "free", "(", ")" ]
https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/lib/pyxmpp/clientstream.py#L219-L250
hhyo/Archery
c9b057d37e47894ca8531e5cd10afdb064cd0122
sql/utils/sql_utils.py
python
get_base_sqlitem_list
(full_sql)
return list
把参数 full_sql 转变为 SqlItem列表 :param full_sql: 完整sql字符串, 每个SQL以分号;间隔, 不包含plsql执行块和plsql对象定义块 :return: SqlItem对象列表
把参数 full_sql 转变为 SqlItem列表 :param full_sql: 完整sql字符串, 每个SQL以分号;间隔, 不包含plsql执行块和plsql对象定义块 :return: SqlItem对象列表
[ "把参数", "full_sql", "转变为", "SqlItem列表", ":", "param", "full_sql", ":", "完整sql字符串", "每个SQL以分号", ";", "间隔", "不包含plsql执行块和plsql对象定义块", ":", "return", ":", "SqlItem对象列表" ]
def get_base_sqlitem_list(full_sql): ''' 把参数 full_sql 转变为 SqlItem列表 :param full_sql: 完整sql字符串, 每个SQL以分号;间隔, 不包含plsql执行块和plsql对象定义块 :return: SqlItem对象列表 ''' list = [] for statement in sqlparse.split(full_sql): statement = sqlparse.format(statement, strip_comments=True, reindent=True, keyword_case='lower') if len(statement) <= 0: continue item = SqlItem(statement=statement) list.append(item) return list
[ "def", "get_base_sqlitem_list", "(", "full_sql", ")", ":", "list", "=", "[", "]", "for", "statement", "in", "sqlparse", ".", "split", "(", "full_sql", ")", ":", "statement", "=", "sqlparse", ".", "format", "(", "statement", ",", "strip_comments", "=", "True", ",", "reindent", "=", "True", ",", "keyword_case", "=", "'lower'", ")", "if", "len", "(", "statement", ")", "<=", "0", ":", "continue", "item", "=", "SqlItem", "(", "statement", "=", "statement", ")", "list", ".", "append", "(", "item", ")", "return", "list" ]
https://github.com/hhyo/Archery/blob/c9b057d37e47894ca8531e5cd10afdb064cd0122/sql/utils/sql_utils.py#L133-L145
sfepy/sfepy
02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25
sfepy/discrete/materials.py
python
Material.reset
(self)
Clear all data created by a call to ``time_update()``, set ``self.mode`` to ``None``.
Clear all data created by a call to ``time_update()``, set ``self.mode`` to ``None``.
[ "Clear", "all", "data", "created", "by", "a", "call", "to", "time_update", "()", "set", "self", ".", "mode", "to", "None", "." ]
def reset(self): """ Clear all data created by a call to ``time_update()``, set ``self.mode`` to ``None``. """ self.mode = None self.datas = {} self.special_names = set() self.constant_names = set() self.extra_args = {}
[ "def", "reset", "(", "self", ")", ":", "self", ".", "mode", "=", "None", "self", ".", "datas", "=", "{", "}", "self", ".", "special_names", "=", "set", "(", ")", "self", ".", "constant_names", "=", "set", "(", ")", "self", ".", "extra_args", "=", "{", "}" ]
https://github.com/sfepy/sfepy/blob/02ec7bb2ab39ee1dfe1eb4cd509f0ffb7dcc8b25/sfepy/discrete/materials.py#L344-L353
analysiscenter/batchflow
294747da0bca309785f925be891441fdd824e9fa
batchflow/pipeline_executor.py
python
PipelineExecutor.run
(self, *args, **kwargs)
return self.pipeline
Execute all lazy actions for each batch in the dataset See also -------- :meth:`~.PipelineExecutor.gen_batch`
Execute all lazy actions for each batch in the dataset
[ "Execute", "all", "lazy", "actions", "for", "each", "batch", "in", "the", "dataset" ]
def run(self, *args, **kwargs): """ Execute all lazy actions for each batch in the dataset See also -------- :meth:`~.PipelineExecutor.gen_batch` """ for _ in self.pipeline.gen_batch(*args, **kwargs): pass return self.pipeline
[ "def", "run", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "_", "in", "self", ".", "pipeline", ".", "gen_batch", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pass", "return", "self", ".", "pipeline" ]
https://github.com/analysiscenter/batchflow/blob/294747da0bca309785f925be891441fdd824e9fa/batchflow/pipeline_executor.py#L254-L264
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/ipaddress.py
python
_BaseV4._string_from_ip_int
(cls, ip_int)
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] if isinstance(b, bytes) else b) for b in _compat_to_bytes(ip_int, 4, 'big'))
Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation.
Turns a 32-bit integer into dotted decimal notation.
[ "Turns", "a", "32", "-", "bit", "integer", "into", "dotted", "decimal", "notation", "." ]
def _string_from_ip_int(cls, ip_int): """Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation. """ return '.'.join(_compat_str(struct.unpack(b'!B', b)[0] if isinstance(b, bytes) else b) for b in _compat_to_bytes(ip_int, 4, 'big'))
[ "def", "_string_from_ip_int", "(", "cls", ",", "ip_int", ")", ":", "return", "'.'", ".", "join", "(", "_compat_str", "(", "struct", ".", "unpack", "(", "b'!B'", ",", "b", ")", "[", "0", "]", "if", "isinstance", "(", "b", ",", "bytes", ")", "else", "b", ")", "for", "b", "in", "_compat_to_bytes", "(", "ip_int", ",", "4", ",", "'big'", ")", ")" ]
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/cf_demo_client/cf_env/lib/python2.7/site-packages/pip/_vendor/ipaddress.py#L1309-L1322
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/jinja2/filters.py
python
do_wordcount
(s)
return len(_word_re.findall(s))
Count the words in that string.
Count the words in that string.
[ "Count", "the", "words", "in", "that", "string", "." ]
def do_wordcount(s): """Count the words in that string.""" return len(_word_re.findall(s))
[ "def", "do_wordcount", "(", "s", ")", ":", "return", "len", "(", "_word_re", ".", "findall", "(", "s", ")", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/jinja2/filters.py#L636-L638
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Parser/asdl.py
python
ASDLParser.typestring
(self, tok)
return tok.type
[]
def typestring(self, tok): return tok.type
[ "def", "typestring", "(", "self", ",", "tok", ")", ":", "return", "tok", ".", "type" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Parser/asdl.py#L105-L106
roam-qgis/Roam
6bfa836a2735f611b7f26de18ae4a4581f7e83ef
scripts/fabricate.py
python
AtimesRunner._age_atimes
(self, filetimes)
return adjusted
Age files' atimes and mtimes to be at least FAT_xx_resolution old. Only adjust if the given filetimes dict says it isn't that old, and return a new dict of filetimes with the ages adjusted.
Age files' atimes and mtimes to be at least FAT_xx_resolution old. Only adjust if the given filetimes dict says it isn't that old, and return a new dict of filetimes with the ages adjusted.
[ "Age", "files", "atimes", "and", "mtimes", "to", "be", "at", "least", "FAT_xx_resolution", "old", ".", "Only", "adjust", "if", "the", "given", "filetimes", "dict", "says", "it", "isn", "t", "that", "old", "and", "return", "a", "new", "dict", "of", "filetimes", "with", "the", "ages", "adjusted", "." ]
def _age_atimes(self, filetimes): """ Age files' atimes and mtimes to be at least FAT_xx_resolution old. Only adjust if the given filetimes dict says it isn't that old, and return a new dict of filetimes with the ages adjusted. """ adjusted = {} now = time.time() for filename, entry in filetimes.items(): if now-entry[0] < FAT_atime_resolution or now-entry[1] < FAT_mtime_resolution: entry = entry[0] - FAT_atime_resolution, entry[1] - FAT_mtime_resolution self._utime(filename, entry[0], entry[1]) adjusted[filename] = entry return adjusted
[ "def", "_age_atimes", "(", "self", ",", "filetimes", ")", ":", "adjusted", "=", "{", "}", "now", "=", "time", ".", "time", "(", ")", "for", "filename", ",", "entry", "in", "filetimes", ".", "items", "(", ")", ":", "if", "now", "-", "entry", "[", "0", "]", "<", "FAT_atime_resolution", "or", "now", "-", "entry", "[", "1", "]", "<", "FAT_mtime_resolution", ":", "entry", "=", "entry", "[", "0", "]", "-", "FAT_atime_resolution", ",", "entry", "[", "1", "]", "-", "FAT_mtime_resolution", "self", ".", "_utime", "(", "filename", ",", "entry", "[", "0", "]", ",", "entry", "[", "1", "]", ")", "adjusted", "[", "filename", "]", "=", "entry", "return", "adjusted" ]
https://github.com/roam-qgis/Roam/blob/6bfa836a2735f611b7f26de18ae4a4581f7e83ef/scripts/fabricate.py#L408-L419
houtianze/bypy
10fd0f18378174a775a05a366cc20ba6609f96c6
bypy/bypy.py
python
ByPy.__proceed_remote_gather
(self, walkresult, remotepath, dirjs, filejs, args = None)
return walkresult
[]
def __proceed_remote_gather(self, walkresult, remotepath, dirjs, filejs, args = None): # NOTE: the '+ 1' is due to the trailing slash '/' # be careful about the trailing '/', it bit me once, bitterly rootrdir = args rootlen = len(rootrdir) dlen = len(remotepath) + 1 for d in dirjs: self.__remote_dir_contents.get(remotepath[rootlen:]).add( d['path'][dlen:], PathDictTree('D', size = d['size'], md5 = d['md5'] if 'md5' in d else '')) # Baidu made another fuck up here: # f['md5'] doesn't have the correct MD5 value, but f['block_list'][0] has # This makes no sense, and I'm not going to change the correct code to adapt its wrong behaviors # --- Code below for reference --- # fmd5 = f['md5'] # bl = 'block_list' # if bl in f and f[bl]: # fmd5 = f[bl][0] # f['path'][dlen:], PathDictTree('F', size = f['size'], md5 = fmd5)) for f in filejs: self.__remote_dir_contents.get(remotepath[rootlen:]).add( f['path'][dlen:], PathDictTree('F', size = f['size'], md5 = f['md5'])) return walkresult
[ "def", "__proceed_remote_gather", "(", "self", ",", "walkresult", ",", "remotepath", ",", "dirjs", ",", "filejs", ",", "args", "=", "None", ")", ":", "# NOTE: the '+ 1' is due to the trailing slash '/'", "# be careful about the trailing '/', it bit me once, bitterly", "rootrdir", "=", "args", "rootlen", "=", "len", "(", "rootrdir", ")", "dlen", "=", "len", "(", "remotepath", ")", "+", "1", "for", "d", "in", "dirjs", ":", "self", ".", "__remote_dir_contents", ".", "get", "(", "remotepath", "[", "rootlen", ":", "]", ")", ".", "add", "(", "d", "[", "'path'", "]", "[", "dlen", ":", "]", ",", "PathDictTree", "(", "'D'", ",", "size", "=", "d", "[", "'size'", "]", ",", "md5", "=", "d", "[", "'md5'", "]", "if", "'md5'", "in", "d", "else", "''", ")", ")", "# Baidu made another fuck up here:", "# f['md5'] doesn't have the correct MD5 value, but f['block_list'][0] has", "# This makes no sense, and I'm not going to change the correct code to adapt its wrong behaviors", "# --- Code below for reference ---", "# fmd5 = f['md5']", "# bl = 'block_list'", "# if bl in f and f[bl]:", "# \tfmd5 = f[bl][0]", "# f['path'][dlen:], PathDictTree('F', size = f['size'], md5 = fmd5))", "for", "f", "in", "filejs", ":", "self", ".", "__remote_dir_contents", ".", "get", "(", "remotepath", "[", "rootlen", ":", "]", ")", ".", "add", "(", "f", "[", "'path'", "]", "[", "dlen", ":", "]", ",", "PathDictTree", "(", "'F'", ",", "size", "=", "f", "[", "'size'", "]", ",", "md5", "=", "f", "[", "'md5'", "]", ")", ")", "return", "walkresult" ]
https://github.com/houtianze/bypy/blob/10fd0f18378174a775a05a366cc20ba6609f96c6/bypy/bypy.py#L2670-L2693
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Toggl-Time-Tracking/alp/request/requests/packages/urllib3/packages/ordered_dict.py
python
OrderedDict.clear
(self)
od.clear() -> None. Remove all items from od.
od.clear() -> None. Remove all items from od.
[ "od", ".", "clear", "()", "-", ">", "None", ".", "Remove", "all", "items", "from", "od", "." ]
def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self)
[ "def", "clear", "(", "self", ")", ":", "try", ":", "for", "node", "in", "self", ".", "__map", ".", "itervalues", "(", ")", ":", "del", "node", "[", ":", "]", "root", "=", "self", ".", "__root", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", "]", "self", ".", "__map", ".", "clear", "(", ")", "except", "AttributeError", ":", "pass", "dict", ".", "clear", "(", "self", ")" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Toggl-Time-Tracking/alp/request/requests/packages/urllib3/packages/ordered_dict.py#L80-L90
mikedh/trimesh
6b1e05616b44e6dd708d9bc748b211656ebb27ec
trimesh/util.py
python
grid_arange
(bounds, step)
return grid
Return a grid from an (2,dimension) bounds with samples step distance apart. Parameters ------------ bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]] step: float, or (dimension) floats, separation between points Returns --------- grid: (n, dimension), points inside the specified bounds
Return a grid from an (2,dimension) bounds with samples step distance apart.
[ "Return", "a", "grid", "from", "an", "(", "2", "dimension", ")", "bounds", "with", "samples", "step", "distance", "apart", "." ]
def grid_arange(bounds, step): """ Return a grid from an (2,dimension) bounds with samples step distance apart. Parameters ------------ bounds: (2,dimension) list of [[min x, min y, etc], [max x, max y, etc]] step: float, or (dimension) floats, separation between points Returns --------- grid: (n, dimension), points inside the specified bounds """ bounds = np.asanyarray(bounds, dtype=np.float64) if len(bounds) != 2: raise ValueError('bounds must be (2, dimension!') # allow single float or per-dimension spacing step = np.asanyarray(step, dtype=np.float64) if step.shape == (): step = np.tile(step, bounds.shape[1]) grid_elements = [np.arange(*b, step=s) for b, s in zip(bounds.T, step)] grid = np.vstack(np.meshgrid(*grid_elements, indexing='ij') ).reshape(bounds.shape[1], -1).T return grid
[ "def", "grid_arange", "(", "bounds", ",", "step", ")", ":", "bounds", "=", "np", ".", "asanyarray", "(", "bounds", ",", "dtype", "=", "np", ".", "float64", ")", "if", "len", "(", "bounds", ")", "!=", "2", ":", "raise", "ValueError", "(", "'bounds must be (2, dimension!'", ")", "# allow single float or per-dimension spacing", "step", "=", "np", ".", "asanyarray", "(", "step", ",", "dtype", "=", "np", ".", "float64", ")", "if", "step", ".", "shape", "==", "(", ")", ":", "step", "=", "np", ".", "tile", "(", "step", ",", "bounds", ".", "shape", "[", "1", "]", ")", "grid_elements", "=", "[", "np", ".", "arange", "(", "*", "b", ",", "step", "=", "s", ")", "for", "b", ",", "s", "in", "zip", "(", "bounds", ".", "T", ",", "step", ")", "]", "grid", "=", "np", ".", "vstack", "(", "np", ".", "meshgrid", "(", "*", "grid_elements", ",", "indexing", "=", "'ij'", ")", ")", ".", "reshape", "(", "bounds", ".", "shape", "[", "1", "]", ",", "-", "1", ")", ".", "T", "return", "grid" ]
https://github.com/mikedh/trimesh/blob/6b1e05616b44e6dd708d9bc748b211656ebb27ec/trimesh/util.py#L708-L733
researchmm/tasn
5dba8ccc096cedc63913730eeea14a9647911129
tasn-mxnet/python/mxnet/symbol/symbol.py
python
Symbol.sqrt
(self, *args, **kwargs)
return op.sqrt(self, *args, **kwargs)
Convenience fluent method for :py:func:`sqrt`. The arguments are the same as for :py:func:`sqrt`, with this array as data.
Convenience fluent method for :py:func:`sqrt`.
[ "Convenience", "fluent", "method", "for", ":", "py", ":", "func", ":", "sqrt", "." ]
def sqrt(self, *args, **kwargs): """Convenience fluent method for :py:func:`sqrt`. The arguments are the same as for :py:func:`sqrt`, with this array as data. """ return op.sqrt(self, *args, **kwargs)
[ "def", "sqrt", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "op", ".", "sqrt", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/researchmm/tasn/blob/5dba8ccc096cedc63913730eeea14a9647911129/tasn-mxnet/python/mxnet/symbol/symbol.py#L2346-L2352
carpedm20/NAF-tensorflow
5754bd40fe135f79272b333ba2b911b02ca293f7
src/exploration.py
python
OUExploration.__init__
(self, env, sigma=0.3, mu=0, theta=0.15)
[]
def __init__(self, env, sigma=0.3, mu=0, theta=0.15): super(OUExploration, self).__init__(env) self.mu = mu self.theta = theta self.sigma = sigma self.state = np.ones(self.action_size) * self.mu self.reset()
[ "def", "__init__", "(", "self", ",", "env", ",", "sigma", "=", "0.3", ",", "mu", "=", "0", ",", "theta", "=", "0.15", ")", ":", "super", "(", "OUExploration", ",", "self", ")", ".", "__init__", "(", "env", ")", "self", ".", "mu", "=", "mu", "self", ".", "theta", "=", "theta", "self", ".", "sigma", "=", "sigma", "self", ".", "state", "=", "np", ".", "ones", "(", "self", ".", "action_size", ")", "*", "self", ".", "mu", "self", ".", "reset", "(", ")" ]
https://github.com/carpedm20/NAF-tensorflow/blob/5754bd40fe135f79272b333ba2b911b02ca293f7/src/exploration.py#L17-L25
mrkipling/maraschino
c6be9286937783ae01df2d6d8cebfc8b2734a7d7
lib/sqlalchemy/orm/query.py
python
Query.except_
(self, *q)
return self._from_selectable( expression.except_(*([self]+ list(q))) )
Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples.
Produce an EXCEPT of this Query against one or more queries.
[ "Produce", "an", "EXCEPT", "of", "this", "Query", "against", "one", "or", "more", "queries", "." ]
def except_(self, *q): """Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._from_selectable( expression.except_(*([self]+ list(q))) )
[ "def", "except_", "(", "self", ",", "*", "q", ")", ":", "return", "self", ".", "_from_selectable", "(", "expression", ".", "except_", "(", "*", "(", "[", "self", "]", "+", "list", "(", "q", ")", ")", ")", ")" ]
https://github.com/mrkipling/maraschino/blob/c6be9286937783ae01df2d6d8cebfc8b2734a7d7/lib/sqlalchemy/orm/query.py#L1243-L1252
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/FreshDesk/Integrations/FreshDesk/FreshDesk.py
python
ticket_reply_command
()
Reply to a specified ticket. demisto parameter: (number) ticket_id ID of the ticket you wish to respond to demisto parameter: (string) body Content of the reply in HTML format demisto parameter: (string) from_email The email address from which the reply is sent. By default the global support email will be used. demisto parameter: (number) user_id ID of the agent who is adding the note demisto parameter: (list) cc_emails Array of email address strings added in the 'cc' field of the outgoing ticket email. demisto parameter: (list) bcc_emails Array of email address strings added in the 'bcc' field of the outgoing ticket email. demisto parameter: (list) attachments Entry IDs of files to attach to the reply. The total size of these attachments cannot exceed 15MB. returns: Ticket Reply Object
Reply to a specified ticket.
[ "Reply", "to", "a", "specified", "ticket", "." ]
def ticket_reply_command(): """ Reply to a specified ticket. demisto parameter: (number) ticket_id ID of the ticket you wish to respond to demisto parameter: (string) body Content of the reply in HTML format demisto parameter: (string) from_email The email address from which the reply is sent. By default the global support email will be used. demisto parameter: (number) user_id ID of the agent who is adding the note demisto parameter: (list) cc_emails Array of email address strings added in the 'cc' field of the outgoing ticket email. demisto parameter: (list) bcc_emails Array of email address strings added in the 'bcc' field of the outgoing ticket email. demisto parameter: (list) attachments Entry IDs of files to attach to the reply. The total size of these attachments cannot exceed 15MB. returns: Ticket Reply Object """ args = demisto.args() # Make request and get raw response reply = ticket_reply(args) # Parse response into context context = {string_to_context_key(key): val for key, val in reply.iteritems() if val} context = reformat_conversation_context(context) # Parse attachments into context context, context_readable = attachments_into_context(reply, context) context = reformat_ticket_context(context) context_readable = reformat_ticket_context(context_readable) complete_context = { 'ID': int(reply.get('ticket_id')), 'Conversation': context } title = 'Reply to Ticket #{}'.format(reply.get('ticket_id')) human_readable = tableToMarkdown(title, context_readable, removeNull=True) demisto.results({ 'Type': entryTypes['note'], 'ContentsFormat': formats['json'], 'Contents': reply, 'ReadableContentsFormat': formats['markdown'], 'HumanReadable': human_readable, 'EntryContext': { 'Freshdesk.Ticket(val.ID && val.ID === obj.ID)': complete_context } })
[ "def", "ticket_reply_command", "(", ")", ":", "args", "=", "demisto", ".", "args", "(", ")", "# Make request and get raw response", "reply", "=", "ticket_reply", "(", "args", ")", "# Parse response into context", "context", "=", "{", "string_to_context_key", "(", "key", ")", ":", "val", "for", "key", ",", "val", "in", "reply", ".", "iteritems", "(", ")", "if", "val", "}", "context", "=", "reformat_conversation_context", "(", "context", ")", "# Parse attachments into context", "context", ",", "context_readable", "=", "attachments_into_context", "(", "reply", ",", "context", ")", "context", "=", "reformat_ticket_context", "(", "context", ")", "context_readable", "=", "reformat_ticket_context", "(", "context_readable", ")", "complete_context", "=", "{", "'ID'", ":", "int", "(", "reply", ".", "get", "(", "'ticket_id'", ")", ")", ",", "'Conversation'", ":", "context", "}", "title", "=", "'Reply to Ticket #{}'", ".", "format", "(", "reply", ".", "get", "(", "'ticket_id'", ")", ")", "human_readable", "=", "tableToMarkdown", "(", "title", ",", "context_readable", ",", "removeNull", "=", "True", ")", "demisto", ".", "results", "(", "{", "'Type'", ":", "entryTypes", "[", "'note'", "]", ",", "'ContentsFormat'", ":", "formats", "[", "'json'", "]", ",", "'Contents'", ":", "reply", ",", "'ReadableContentsFormat'", ":", "formats", "[", "'markdown'", "]", ",", "'HumanReadable'", ":", "human_readable", ",", "'EntryContext'", ":", "{", "'Freshdesk.Ticket(val.ID && val.ID === obj.ID)'", ":", "complete_context", "}", "}", ")" ]
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/FreshDesk/Integrations/FreshDesk/FreshDesk.py#L1313-L1366
CrowdStrike/automactc
0df8d738be5f3e5053c4a6facda4014ea8ba4b36
modules/common/dep/cffi38/api.py
python
FFI.from_buffer
(self, cdecl, python_buffer=_unspecified, require_writable=False)
return self._backend.from_buffer(cdecl, python_buffer, require_writable)
Return a cdata of the given type pointing to the data of the given Python object, which must support the buffer interface. Note that this is not meant to be used on the built-in types str or unicode (you can build 'char[]' arrays explicitly) but only on objects containing large quantities of raw data in some other format, like 'array.array' or numpy arrays. The first argument is optional and default to 'char[]'.
Return a cdata of the given type pointing to the data of the given Python object, which must support the buffer interface. Note that this is not meant to be used on the built-in types str or unicode (you can build 'char[]' arrays explicitly) but only on objects containing large quantities of raw data in some other format, like 'array.array' or numpy arrays.
[ "Return", "a", "cdata", "of", "the", "given", "type", "pointing", "to", "the", "data", "of", "the", "given", "Python", "object", "which", "must", "support", "the", "buffer", "interface", ".", "Note", "that", "this", "is", "not", "meant", "to", "be", "used", "on", "the", "built", "-", "in", "types", "str", "or", "unicode", "(", "you", "can", "build", "char", "[]", "arrays", "explicitly", ")", "but", "only", "on", "objects", "containing", "large", "quantities", "of", "raw", "data", "in", "some", "other", "format", "like", "array", ".", "array", "or", "numpy", "arrays", "." ]
def from_buffer(self, cdecl, python_buffer=_unspecified, require_writable=False): """Return a cdata of the given type pointing to the data of the given Python object, which must support the buffer interface. Note that this is not meant to be used on the built-in types str or unicode (you can build 'char[]' arrays explicitly) but only on objects containing large quantities of raw data in some other format, like 'array.array' or numpy arrays. The first argument is optional and default to 'char[]'. """ if python_buffer is _unspecified: cdecl, python_buffer = self.BCharA, cdecl elif isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._backend.from_buffer(cdecl, python_buffer, require_writable)
[ "def", "from_buffer", "(", "self", ",", "cdecl", ",", "python_buffer", "=", "_unspecified", ",", "require_writable", "=", "False", ")", ":", "if", "python_buffer", "is", "_unspecified", ":", "cdecl", ",", "python_buffer", "=", "self", ".", "BCharA", ",", "cdecl", "elif", "isinstance", "(", "cdecl", ",", "basestring", ")", ":", "cdecl", "=", "self", ".", "_typeof", "(", "cdecl", ")", "return", "self", ".", "_backend", ".", "from_buffer", "(", "cdecl", ",", "python_buffer", ",", "require_writable", ")" ]
https://github.com/CrowdStrike/automactc/blob/0df8d738be5f3e5053c4a6facda4014ea8ba4b36/modules/common/dep/cffi38/api.py#L350-L366
jtriley/StarCluster
bc7c950e73f193eac9aab986b6764939cfdad978
starcluster/node.py
python
Node.getpwnam
(self, username)
return umap.get(username)
Remote version of the getpwnam method in the standard pwd module returns a pwd.struct_passwd
Remote version of the getpwnam method in the standard pwd module
[ "Remote", "version", "of", "the", "getpwnam", "method", "in", "the", "standard", "pwd", "module" ]
def getpwnam(self, username): """ Remote version of the getpwnam method in the standard pwd module returns a pwd.struct_passwd """ umap = self.get_user_map() return umap.get(username)
[ "def", "getpwnam", "(", "self", ",", "username", ")", ":", "umap", "=", "self", ".", "get_user_map", "(", ")", "return", "umap", ".", "get", "(", "username", ")" ]
https://github.com/jtriley/StarCluster/blob/bc7c950e73f193eac9aab986b6764939cfdad978/starcluster/node.py#L454-L461
F8LEFT/DecLLVM
d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c
python/idaapi.py
python
can_define_item
(*args)
return _idaapi.can_define_item(*args)
can_define_item(ea, length, flags) -> bool
can_define_item(ea, length, flags) -> bool
[ "can_define_item", "(", "ea", "length", "flags", ")", "-", ">", "bool" ]
def can_define_item(*args): """ can_define_item(ea, length, flags) -> bool """ return _idaapi.can_define_item(*args)
[ "def", "can_define_item", "(", "*", "args", ")", ":", "return", "_idaapi", ".", "can_define_item", "(", "*", "args", ")" ]
https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L23080-L23084
aws-samples/aws-kube-codesuite
ab4e5ce45416b83bffb947ab8d234df5437f4fca
src/kubernetes/client/models/v1_api_group.py
python
V1APIGroup.to_dict
(self)
return result
Returns the model properties as a dict
Returns the model properties as a dict
[ "Returns", "the", "model", "properties", "as", "a", "dict" ]
def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
[ "def", "to_dict", "(", "self", ")", ":", "result", "=", "{", "}", "for", "attr", ",", "_", "in", "iteritems", "(", "self", ".", "swagger_types", ")", ":", "value", "=", "getattr", "(", "self", ",", "attr", ")", "if", "isinstance", "(", "value", ",", "list", ")", ":", "result", "[", "attr", "]", "=", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "to_dict", "(", ")", "if", "hasattr", "(", "x", ",", "\"to_dict\"", ")", "else", "x", ",", "value", ")", ")", "elif", "hasattr", "(", "value", ",", "\"to_dict\"", ")", ":", "result", "[", "attr", "]", "=", "value", ".", "to_dict", "(", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "result", "[", "attr", "]", "=", "dict", "(", "map", "(", "lambda", "item", ":", "(", "item", "[", "0", "]", ",", "item", "[", "1", "]", ".", "to_dict", "(", ")", ")", "if", "hasattr", "(", "item", "[", "1", "]", ",", "\"to_dict\"", ")", "else", "item", ",", "value", ".", "items", "(", ")", ")", ")", "else", ":", "result", "[", "attr", "]", "=", "value", "return", "result" ]
https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_api_group.py#L202-L226
openstack/cinder
23494a6d6c51451688191e1847a458f1d3cdcaa5
cinder/volume/drivers/nec/volume_helper.py
python
MStorageDriver.manage_existing_snapshot
(self, snapshot, existing_ref)
Brings an existing backend storage object under Cinder management. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object.
Brings an existing backend storage object under Cinder management.
[ "Brings", "an", "existing", "backend", "storage", "object", "under", "Cinder", "management", "." ]
def manage_existing_snapshot(self, snapshot, existing_ref): """Brings an existing backend storage object under Cinder management. Rename the backend storage object so that it matches the snapshot['name'] which is how drivers traditionally map between a cinder snapshot and the associated backend storage object. """ LOG.debug('manage_existing_snapshots Start.') xml = self._cli.view_all(self._properties['ismview_path']) pools, lds, ldsets, used_ldns, hostports, max_ld_count = ( self.configs(xml)) newname, oldname = self._reference_to_ldname('snapshot', snapshot, existing_ref) param_source = self.get_ldname(snapshot.volume_id, self._properties['ld_name_format']) ref_source = self._cli.get_bvname(oldname) if param_source[3:] != ref_source: reason = _('Snapshot source is unmatched.') raise exception.ManageExistingInvalidReference( existing_ref=existing_ref, reason=reason) if (lds[oldname]['pool_num'] not in self._properties['pool_backup_pools']): reason = _('Volume type is unmatched.') raise exception.ManageExistingVolumeTypeMismatch( existing_ref=existing_ref, reason=reason) try: self._cli.changeldname(None, newname, oldname) except exception.CinderException as e: LOG.warning('Unable to manage existing snapshot ' '(reference = %(ref)s), (%(exception)s)', {'ref': existing_ref['source-name'], 'exception': e})
[ "def", "manage_existing_snapshot", "(", "self", ",", "snapshot", ",", "existing_ref", ")", ":", "LOG", ".", "debug", "(", "'manage_existing_snapshots Start.'", ")", "xml", "=", "self", ".", "_cli", ".", "view_all", "(", "self", ".", "_properties", "[", "'ismview_path'", "]", ")", "pools", ",", "lds", ",", "ldsets", ",", "used_ldns", ",", "hostports", ",", "max_ld_count", "=", "(", "self", ".", "configs", "(", "xml", ")", ")", "newname", ",", "oldname", "=", "self", ".", "_reference_to_ldname", "(", "'snapshot'", ",", "snapshot", ",", "existing_ref", ")", "param_source", "=", "self", ".", "get_ldname", "(", "snapshot", ".", "volume_id", ",", "self", ".", "_properties", "[", "'ld_name_format'", "]", ")", "ref_source", "=", "self", ".", "_cli", ".", "get_bvname", "(", "oldname", ")", "if", "param_source", "[", "3", ":", "]", "!=", "ref_source", ":", "reason", "=", "_", "(", "'Snapshot source is unmatched.'", ")", "raise", "exception", ".", "ManageExistingInvalidReference", "(", "existing_ref", "=", "existing_ref", ",", "reason", "=", "reason", ")", "if", "(", "lds", "[", "oldname", "]", "[", "'pool_num'", "]", "not", "in", "self", ".", "_properties", "[", "'pool_backup_pools'", "]", ")", ":", "reason", "=", "_", "(", "'Volume type is unmatched.'", ")", "raise", "exception", ".", "ManageExistingVolumeTypeMismatch", "(", "existing_ref", "=", "existing_ref", ",", "reason", "=", "reason", ")", "try", ":", "self", ".", "_cli", ".", "changeldname", "(", "None", ",", "newname", ",", "oldname", ")", "except", "exception", ".", "CinderException", "as", "e", ":", "LOG", ".", "warning", "(", "'Unable to manage existing snapshot '", "'(reference = %(ref)s), (%(exception)s)'", ",", "{", "'ref'", ":", "existing_ref", "[", "'source-name'", "]", ",", "'exception'", ":", "e", "}", ")" ]
https://github.com/openstack/cinder/blob/23494a6d6c51451688191e1847a458f1d3cdcaa5/cinder/volume/drivers/nec/volume_helper.py#L1725-L1759
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Tools/bgen/bgen/bgenBuffer.py
python
FixedInputOutputBufferType.getargsCheck
(self, name)
[]
def getargsCheck(self, name): Output("if (%s__in_len__ != %s)", name, self.size) OutLbrace() Output('PyErr_SetString(PyExc_TypeError, "buffer length should be %s");', self.size) Output("goto %s__error__;", name) self.label_needed = 1 OutRbrace() self.transferSize(name)
[ "def", "getargsCheck", "(", "self", ",", "name", ")", ":", "Output", "(", "\"if (%s__in_len__ != %s)\"", ",", "name", ",", "self", ".", "size", ")", "OutLbrace", "(", ")", "Output", "(", "'PyErr_SetString(PyExc_TypeError, \"buffer length should be %s\");'", ",", "self", ".", "size", ")", "Output", "(", "\"goto %s__error__;\"", ",", "name", ")", "self", ".", "label_needed", "=", "1", "OutRbrace", "(", ")", "self", ".", "transferSize", "(", "name", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Tools/bgen/bgen/bgenBuffer.py#L83-L91
thouska/spotpy
92c3aad416ccd6becbeb345c58cae36b3a63d892
spotpy/algorithms/dds.py
python
DDSGenerator.neigh_value_continuous
(self, s, x_min, x_max, r)
return x_new
select a RANDOM neighbouring real value of a SINGLE decision variable CEE 509, HW 5 by Bryan Tolson, Mar 5, 2003 AND ALSO CEE PROJECT variables: x_range is the range of the real variable (s_max-s_min) :param s: is a current SINGLE decision variable VALUE :param x_min: is the min of variable s :param x_max: is the max of variable s :param r: is the neighbourhood parameter (replaces V parameter~see not It is defined as the ratio of the std deviation of the desired normal random number/x_range. Eg: std dev desired = r * x_range for comparison: variance (V) = (r * x_range)^2 :return: x_new, a new sample of values in beetween a given range
select a RANDOM neighbouring real value of a SINGLE decision variable CEE 509, HW 5 by Bryan Tolson, Mar 5, 2003 AND ALSO CEE PROJECT variables: x_range is the range of the real variable (s_max-s_min)
[ "select", "a", "RANDOM", "neighbouring", "real", "value", "of", "a", "SINGLE", "decision", "variable", "CEE", "509", "HW", "5", "by", "Bryan", "Tolson", "Mar", "5", "2003", "AND", "ALSO", "CEE", "PROJECT", "variables", ":", "x_range", "is", "the", "range", "of", "the", "real", "variable", "(", "s_max", "-", "s_min", ")" ]
def neigh_value_continuous(self, s, x_min, x_max, r): """ select a RANDOM neighbouring real value of a SINGLE decision variable CEE 509, HW 5 by Bryan Tolson, Mar 5, 2003 AND ALSO CEE PROJECT variables: x_range is the range of the real variable (s_max-s_min) :param s: is a current SINGLE decision variable VALUE :param x_min: is the min of variable s :param x_max: is the max of variable s :param r: is the neighbourhood parameter (replaces V parameter~see not It is defined as the ratio of the std deviation of the desired normal random number/x_range. Eg: std dev desired = r * x_range for comparison: variance (V) = (r * x_range)^2 :return: x_new, a new sample of values in beetween a given range """ x_range = x_max - x_min x_new = s + self.np_random.normal(0, 1) * r * x_range # NEED to deal with variable upper and lower bounds: # Originally bounds in DDS were 100# reflective # But some times DVs are right on the boundary and with 100# reflective # boundaries it is hard to detect them. Therefore, we decided to make the # boundaries reflective with 50# chance and absorptive with 50# chance. # M. Asadzadeh and B. Tolson Dec 2008 p_abs_or_ref = self.np_random.rand() if x_new < x_min: # works for any pos or neg x_min if p_abs_or_ref <= 0.5: # with 50%chance reflect x_new = x_min + (x_min - x_new) else: # with 50% chance absorb x_new = x_min # if reflection goes past x_max then value should be x_min since without reflection # the approach goes way past lower bound. This keeps X close to lower bound when X current # is close to lower bound: if x_new > x_max: x_new = x_min elif x_new > x_max: # works for any pos or neg x_max if p_abs_or_ref <= 0.5: # with 50% chance reflect x_new = x_max - (x_new - x_max) else: # with 50% chance absorb x_new = x_max # if reflection goes past x_min then value should be x_max for same reasons as above if x_new < x_min: x_new = x_max return x_new
[ "def", "neigh_value_continuous", "(", "self", ",", "s", ",", "x_min", ",", "x_max", ",", "r", ")", ":", "x_range", "=", "x_max", "-", "x_min", "x_new", "=", "s", "+", "self", ".", "np_random", ".", "normal", "(", "0", ",", "1", ")", "*", "r", "*", "x_range", "# NEED to deal with variable upper and lower bounds:", "# Originally bounds in DDS were 100# reflective", "# But some times DVs are right on the boundary and with 100# reflective", "# boundaries it is hard to detect them. Therefore, we decided to make the", "# boundaries reflective with 50# chance and absorptive with 50# chance.", "# M. Asadzadeh and B. Tolson Dec 2008", "p_abs_or_ref", "=", "self", ".", "np_random", ".", "rand", "(", ")", "if", "x_new", "<", "x_min", ":", "# works for any pos or neg x_min", "if", "p_abs_or_ref", "<=", "0.5", ":", "# with 50%chance reflect", "x_new", "=", "x_min", "+", "(", "x_min", "-", "x_new", ")", "else", ":", "# with 50% chance absorb", "x_new", "=", "x_min", "# if reflection goes past x_max then value should be x_min since without reflection", "# the approach goes way past lower bound. This keeps X close to lower bound when X current", "# is close to lower bound:", "if", "x_new", ">", "x_max", ":", "x_new", "=", "x_min", "elif", "x_new", ">", "x_max", ":", "# works for any pos or neg x_max", "if", "p_abs_or_ref", "<=", "0.5", ":", "# with 50% chance reflect", "x_new", "=", "x_max", "-", "(", "x_new", "-", "x_max", ")", "else", ":", "# with 50% chance absorb", "x_new", "=", "x_max", "# if reflection goes past x_min then value should be x_max for same reasons as above", "if", "x_new", "<", "x_min", ":", "x_new", "=", "x_max", "return", "x_new" ]
https://github.com/thouska/spotpy/blob/92c3aad416ccd6becbeb345c58cae36b3a63d892/spotpy/algorithms/dds.py#L15-L68
google/nogotofail
7037dcb23f1fc370de784c36dbb24ae93cd5a58d
nogotofail/mitm/util/extras.py
python
get_extras_path
(file_path)
return os.path.join(extras_dir, file_path)
Return a full path to file_path in the extras directory.
Return a full path to file_path in the extras directory.
[ "Return", "a", "full", "path", "to", "file_path", "in", "the", "extras", "directory", "." ]
def get_extras_path(file_path): """ Return a full path to file_path in the extras directory.""" if extras_dir is None: return file_path return os.path.join(extras_dir, file_path)
[ "def", "get_extras_path", "(", "file_path", ")", ":", "if", "extras_dir", "is", "None", ":", "return", "file_path", "return", "os", ".", "path", ".", "join", "(", "extras_dir", ",", "file_path", ")" ]
https://github.com/google/nogotofail/blob/7037dcb23f1fc370de784c36dbb24ae93cd5a58d/nogotofail/mitm/util/extras.py#L20-L24
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/idlelib/HyperParser.py
python
HyperParser.set_index
(self, index)
Set the index to which the functions relate. The index must be in the same statement.
Set the index to which the functions relate.
[ "Set", "the", "index", "to", "which", "the", "functions", "relate", "." ]
def set_index(self, index): """Set the index to which the functions relate. The index must be in the same statement. """ indexinrawtext = (len(self.rawtext) - len(self.text.get(index, self.stopatindex))) if indexinrawtext < 0: raise ValueError("Index %s precedes the analyzed statement" % index) self.indexinrawtext = indexinrawtext # find the rightmost bracket to which index belongs self.indexbracket = 0 while (self.indexbracket < len(self.bracketing)-1 and self.bracketing[self.indexbracket+1][0] < self.indexinrawtext): self.indexbracket += 1 if (self.indexbracket < len(self.bracketing)-1 and self.bracketing[self.indexbracket+1][0] == self.indexinrawtext and not self.isopener[self.indexbracket+1]): self.indexbracket += 1
[ "def", "set_index", "(", "self", ",", "index", ")", ":", "indexinrawtext", "=", "(", "len", "(", "self", ".", "rawtext", ")", "-", "len", "(", "self", ".", "text", ".", "get", "(", "index", ",", "self", ".", "stopatindex", ")", ")", ")", "if", "indexinrawtext", "<", "0", ":", "raise", "ValueError", "(", "\"Index %s precedes the analyzed statement\"", "%", "index", ")", "self", ".", "indexinrawtext", "=", "indexinrawtext", "# find the rightmost bracket to which index belongs", "self", ".", "indexbracket", "=", "0", "while", "(", "self", ".", "indexbracket", "<", "len", "(", "self", ".", "bracketing", ")", "-", "1", "and", "self", ".", "bracketing", "[", "self", ".", "indexbracket", "+", "1", "]", "[", "0", "]", "<", "self", ".", "indexinrawtext", ")", ":", "self", ".", "indexbracket", "+=", "1", "if", "(", "self", ".", "indexbracket", "<", "len", "(", "self", ".", "bracketing", ")", "-", "1", "and", "self", ".", "bracketing", "[", "self", ".", "indexbracket", "+", "1", "]", "[", "0", "]", "==", "self", ".", "indexinrawtext", "and", "not", "self", ".", "isopener", "[", "self", ".", "indexbracket", "+", "1", "]", ")", ":", "self", ".", "indexbracket", "+=", "1" ]
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/idlelib/HyperParser.py#L82-L101
robotlearn/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
pyrobolearn/utils/data_structures/orderedset.py
python
OrderedSet2.__reversed__
(self)
Iterate over the ordered set in the reverse order the items have been added. Time complexity: O(N)
Iterate over the ordered set in the reverse order the items have been added. Time complexity: O(N)
[ "Iterate", "over", "the", "ordered", "set", "in", "the", "reverse", "order", "the", "items", "have", "been", "added", ".", "Time", "complexity", ":", "O", "(", "N", ")" ]
def __reversed__(self): """ Iterate over the ordered set in the reverse order the items have been added. Time complexity: O(N) """ curr = self._end while curr != self.NonePtr: yield curr curr = self._map[curr][0]
[ "def", "__reversed__", "(", "self", ")", ":", "curr", "=", "self", ".", "_end", "while", "curr", "!=", "self", ".", "NonePtr", ":", "yield", "curr", "curr", "=", "self", ".", "_map", "[", "curr", "]", "[", "0", "]" ]
https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/utils/data_structures/orderedset.py#L771-L779
pyg-team/pytorch_geometric
b920e9a3a64e22c8356be55301c88444ff051cae
torch_geometric/datasets/webkb.py
python
WebKB.download
(self)
[]
def download(self): for f in self.raw_file_names[:2]: download_url(f'{self.url}/new_data/{self.name}/{f}', self.raw_dir) for f in self.raw_file_names[2:]: download_url(f'{self.url}/splits/{f}', self.raw_dir)
[ "def", "download", "(", "self", ")", ":", "for", "f", "in", "self", ".", "raw_file_names", "[", ":", "2", "]", ":", "download_url", "(", "f'{self.url}/new_data/{self.name}/{f}'", ",", "self", ".", "raw_dir", ")", "for", "f", "in", "self", ".", "raw_file_names", "[", "2", ":", "]", ":", "download_url", "(", "f'{self.url}/splits/{f}'", ",", "self", ".", "raw_dir", ")" ]
https://github.com/pyg-team/pytorch_geometric/blob/b920e9a3a64e22c8356be55301c88444ff051cae/torch_geometric/datasets/webkb.py#L59-L63
DataBrewery/cubes
140133e8c2e3f2ff60631cc3ebc9966d16c1655e
cubes/common.py
python
sorted_dependencies
(graph)
return L
Return keys from `deps` ordered by dependency (topological sort). `deps` is a dictionary where keys are strings and values are list of strings where keys is assumed to be dependant on values. Example:: A ---> B -+--> C | +--> D --> E Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
Return keys from `deps` ordered by dependency (topological sort). `deps` is a dictionary where keys are strings and values are list of strings where keys is assumed to be dependant on values.
[ "Return", "keys", "from", "deps", "ordered", "by", "dependency", "(", "topological", "sort", ")", ".", "deps", "is", "a", "dictionary", "where", "keys", "are", "strings", "and", "values", "are", "list", "of", "strings", "where", "keys", "is", "assumed", "to", "be", "dependant", "on", "values", "." ]
def sorted_dependencies(graph): """Return keys from `deps` ordered by dependency (topological sort). `deps` is a dictionary where keys are strings and values are list of strings where keys is assumed to be dependant on values. Example:: A ---> B -+--> C | +--> D --> E Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}`` """ graph = dict((key, set(value)) for key, value in graph.items()) # L ← Empty list that will contain the sorted elements L = [] # S ← Set of all nodes with no dependencies (incoming edges) S = set(parent for parent, req in graph.items() if not req) while S: # remove a node n from S n = S.pop() # insert n into L L.append(n) # for each node m with an edge e from n to m do # (n that depends on m) parents = [parent for parent, req in graph.items() if n in req] for parent in parents: graph[parent].remove(n) # remove edge e from the graph # if m has no other incoming edges then insert m into S if not graph[parent]: S.add(parent) # if graph has edges then -> error nonempty = [k for k, v in graph.items() if v] if nonempty: raise ArgumentError("Cyclic dependency of: %s" % ", ".join(nonempty)) return L
[ "def", "sorted_dependencies", "(", "graph", ")", ":", "graph", "=", "dict", "(", "(", "key", ",", "set", "(", "value", ")", ")", "for", "key", ",", "value", "in", "graph", ".", "items", "(", ")", ")", "# L ← Empty list that will contain the sorted elements", "L", "=", "[", "]", "# S ← Set of all nodes with no dependencies (incoming edges)", "S", "=", "set", "(", "parent", "for", "parent", ",", "req", "in", "graph", ".", "items", "(", ")", "if", "not", "req", ")", "while", "S", ":", "# remove a node n from S", "n", "=", "S", ".", "pop", "(", ")", "# insert n into L", "L", ".", "append", "(", "n", ")", "# for each node m with an edge e from n to m do", "# (n that depends on m)", "parents", "=", "[", "parent", "for", "parent", ",", "req", "in", "graph", ".", "items", "(", ")", "if", "n", "in", "req", "]", "for", "parent", "in", "parents", ":", "graph", "[", "parent", "]", ".", "remove", "(", "n", ")", "# remove edge e from the graph", "# if m has no other incoming edges then insert m into S", "if", "not", "graph", "[", "parent", "]", ":", "S", ".", "add", "(", "parent", ")", "# if graph has edges then -> error", "nonempty", "=", "[", "k", "for", "k", ",", "v", "in", "graph", ".", "items", "(", ")", "if", "v", "]", "if", "nonempty", ":", "raise", "ArgumentError", "(", "\"Cyclic dependency of: %s\"", "%", "\", \"", ".", "join", "(", "nonempty", ")", ")", "return", "L" ]
https://github.com/DataBrewery/cubes/blob/140133e8c2e3f2ff60631cc3ebc9966d16c1655e/cubes/common.py#L272-L317
openstack/manila
142990edc027e14839d5deaf4954dd6fc88de15e
manila/db/sqlalchemy/api.py
python
message_get_all
(context, filters=None, limit=None, offset=None, sort_key='created_at', sort_dir='desc')
Retrieves all messages. If no sort parameters are specified then the returned messages are sorted by the 'created_at' key in descending order. :param context: context to query under :param limit: maximum number of items to return :param offset: the number of items to skip from the marker or from the first element. :param sort_key: attributes by which results should be sorted. :param sort_dir: directions in which results should be sorted. :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see exact_filter function for more information :returns: list of matching messages
Retrieves all messages.
[ "Retrieves", "all", "messages", "." ]
def message_get_all(context, filters=None, limit=None, offset=None, sort_key='created_at', sort_dir='desc'): """Retrieves all messages. If no sort parameters are specified then the returned messages are sorted by the 'created_at' key in descending order. :param context: context to query under :param limit: maximum number of items to return :param offset: the number of items to skip from the marker or from the first element. :param sort_key: attributes by which results should be sorted. :param sort_dir: directions in which results should be sorted. :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see exact_filter function for more information :returns: list of matching messages """ messages = models.Message session = get_session() with session.begin(): query = model_query(context, messages, read_deleted="no", project_only="yes") legal_filter_keys = ('request_id', 'resource_type', 'resource_id', 'action_id', 'detail_id', 'message_level', 'created_since', 'created_before') if not filters: filters = {} query = exact_filter(query, messages, filters, legal_filter_keys) query = utils.paginate_query(query, messages, limit, sort_key=sort_key, sort_dir=sort_dir, offset=offset) return query.all()
[ "def", "message_get_all", "(", "context", ",", "filters", "=", "None", ",", "limit", "=", "None", ",", "offset", "=", "None", ",", "sort_key", "=", "'created_at'", ",", "sort_dir", "=", "'desc'", ")", ":", "messages", "=", "models", ".", "Message", "session", "=", "get_session", "(", ")", "with", "session", ".", "begin", "(", ")", ":", "query", "=", "model_query", "(", "context", ",", "messages", ",", "read_deleted", "=", "\"no\"", ",", "project_only", "=", "\"yes\"", ")", "legal_filter_keys", "=", "(", "'request_id'", ",", "'resource_type'", ",", "'resource_id'", ",", "'action_id'", ",", "'detail_id'", ",", "'message_level'", ",", "'created_since'", ",", "'created_before'", ")", "if", "not", "filters", ":", "filters", "=", "{", "}", "query", "=", "exact_filter", "(", "query", ",", "messages", ",", "filters", ",", "legal_filter_keys", ")", "query", "=", "utils", ".", "paginate_query", "(", "query", ",", "messages", ",", "limit", ",", "sort_key", "=", "sort_key", ",", "sort_dir", "=", "sort_dir", ",", "offset", "=", "offset", ")", "return", "query", ".", "all", "(", ")" ]
https://github.com/openstack/manila/blob/142990edc027e14839d5deaf4954dd6fc88de15e/manila/db/sqlalchemy/api.py#L5879-L5921
burke-software/schooldriver
a07262ba864aee0182548ecceb661e49c925725f
appy/shared/sap.py
python
Sap.getFunctionInfo
(self, functionName)
Returns information about the RFC function named p_functionName.
Returns information about the RFC function named p_functionName.
[ "Returns", "information", "about", "the", "RFC", "function", "named", "p_functionName", "." ]
def getFunctionInfo(self, functionName): '''Returns information about the RFC function named p_functionName.''' try: res = '' usedTypes = set() # Names of type definitions used in parameters. fDesc = self.sap.get_interface_desc(functionName) functionDescr = str(fDesc).strip() if functionDescr: res += functionDescr # Import parameters if fDesc.imports: res += '\nIMPORTS\n' for iDesc in fDesc.imports: res += ' %s\n' % str(iDesc) usedTypes.add(iDesc.field_def) # Export parameters if fDesc.exports: res += '\nEXPORTS\n' for eDesc in fDesc.exports: res += ' %s\n' % str(eDesc) usedTypes.add(eDesc.field_def) if fDesc.tables: res += '\nTABLES\n' for tDesc in fDesc.tables: res += ' %s\n' % str(tDesc) usedTypes.add(tDesc.field_def) if fDesc.exceptions: res += '\nEXCEPTIONS\n' for eDesc in fDesc.exceptions: res += ' %s\n' % str(eDesc) # Add information about used types if usedTypes: res += '\nTypes used by the parameters:\n' for typeName in usedTypes: # Dump info only if it is a structure, not a simple type try: self.sap.get_structure(typeName) res += '%s\n%s\n\n' % \ (typeName, self.getTypeInfo(typeName)) except pysap.BaseSapRfcError, ee: pass return res except pysap.BaseSapRfcError, se: if se.value == 'FU_NOT_FOUND': raise SapError(SAP_FUNCTION_NOT_FOUND % (functionName)) else: raise SapError(SAP_FUNCTION_INFO_ERROR % (functionName,str(se)))
[ "def", "getFunctionInfo", "(", "self", ",", "functionName", ")", ":", "try", ":", "res", "=", "''", "usedTypes", "=", "set", "(", ")", "# Names of type definitions used in parameters.", "fDesc", "=", "self", ".", "sap", ".", "get_interface_desc", "(", "functionName", ")", "functionDescr", "=", "str", "(", "fDesc", ")", ".", "strip", "(", ")", "if", "functionDescr", ":", "res", "+=", "functionDescr", "# Import parameters", "if", "fDesc", ".", "imports", ":", "res", "+=", "'\\nIMPORTS\\n'", "for", "iDesc", "in", "fDesc", ".", "imports", ":", "res", "+=", "' %s\\n'", "%", "str", "(", "iDesc", ")", "usedTypes", ".", "add", "(", "iDesc", ".", "field_def", ")", "# Export parameters", "if", "fDesc", ".", "exports", ":", "res", "+=", "'\\nEXPORTS\\n'", "for", "eDesc", "in", "fDesc", ".", "exports", ":", "res", "+=", "' %s\\n'", "%", "str", "(", "eDesc", ")", "usedTypes", ".", "add", "(", "eDesc", ".", "field_def", ")", "if", "fDesc", ".", "tables", ":", "res", "+=", "'\\nTABLES\\n'", "for", "tDesc", "in", "fDesc", ".", "tables", ":", "res", "+=", "' %s\\n'", "%", "str", "(", "tDesc", ")", "usedTypes", ".", "add", "(", "tDesc", ".", "field_def", ")", "if", "fDesc", ".", "exceptions", ":", "res", "+=", "'\\nEXCEPTIONS\\n'", "for", "eDesc", "in", "fDesc", ".", "exceptions", ":", "res", "+=", "' %s\\n'", "%", "str", "(", "eDesc", ")", "# Add information about used types", "if", "usedTypes", ":", "res", "+=", "'\\nTypes used by the parameters:\\n'", "for", "typeName", "in", "usedTypes", ":", "# Dump info only if it is a structure, not a simple type", "try", ":", "self", ".", "sap", ".", "get_structure", "(", "typeName", ")", "res", "+=", "'%s\\n%s\\n\\n'", "%", "(", "typeName", ",", "self", ".", "getTypeInfo", "(", "typeName", ")", ")", "except", "pysap", ".", "BaseSapRfcError", ",", "ee", ":", "pass", "return", "res", "except", "pysap", ".", "BaseSapRfcError", ",", "se", ":", "if", "se", ".", "value", "==", "'FU_NOT_FOUND'", ":", "raise", "SapError", "(", "SAP_FUNCTION_NOT_FOUND", "%", "(", "functionName", ")", ")", "else", ":", "raise", "SapError", "(", "SAP_FUNCTION_INFO_ERROR", "%", "(", "functionName", ",", "str", "(", "se", ")", ")", ")" ]
https://github.com/burke-software/schooldriver/blob/a07262ba864aee0182548ecceb661e49c925725f/appy/shared/sap.py#L163-L208
hakril/PythonForWindows
61e027a678d5b87aa64fcf8a37a6661a86236589
ctypes_generation/definitions/structures/template.py
python
EnumValue.__init__
(self, enum_name, name, value)
[]
def __init__(self, enum_name, name, value): self.enum_name = enum_name self.name = name
[ "def", "__init__", "(", "self", ",", "enum_name", ",", "name", ",", "value", ")", ":", "self", ".", "enum_name", "=", "enum_name", "self", ".", "name", "=", "name" ]
https://github.com/hakril/PythonForWindows/blob/61e027a678d5b87aa64fcf8a37a6661a86236589/ctypes_generation/definitions/structures/template.py#L11-L13
loli/medpy
39131b94f0ab5328ab14a874229320efc2f74d98
medpy/iterators/patchwise.py
python
CentredPatchIterator.applyslicer
(array, slicer, pmask, cval = 0)
return patch
r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. Notes ----- If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones are sliced. Parameters ---------- array : array_like A n-dimensional array. slicer : list List if `slice()` instances as returned by `next()`. pmask : narray The array mask as returned by `next()`. cval : number Value to fill undefined positions. Experiments ----------- >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3): >>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask) >>> print numpy.all(new_patch == patch) True ...
r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. Notes ----- If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones are sliced. Parameters ---------- array : array_like A n-dimensional array. slicer : list List if `slice()` instances as returned by `next()`. pmask : narray The array mask as returned by `next()`. cval : number Value to fill undefined positions. Experiments ----------- >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3): >>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask) >>> print numpy.all(new_patch == patch) True ...
[ "r", "Apply", "a", "slicer", "returned", "by", "the", "iterator", "to", "a", "new", "array", "of", "the", "same", "dimensionality", "as", "the", "one", "used", "to", "initialize", "the", "iterator", ".", "Notes", "-----", "If", "array", "has", "more", "dimensions", "than", "slicer", "and", "pmask", "the", "first", "ones", "are", "sliced", ".", "Parameters", "----------", "array", ":", "array_like", "A", "n", "-", "dimensional", "array", ".", "slicer", ":", "list", "List", "if", "slice", "()", "instances", "as", "returned", "by", "next", "()", ".", "pmask", ":", "narray", "The", "array", "mask", "as", "returned", "by", "next", "()", ".", "cval", ":", "number", "Value", "to", "fill", "undefined", "positions", ".", "Experiments", "-----------", ">>>", "import", "numpy", ">>>", "from", "medpy", ".", "iterators", "import", "CentredPatchIterator", ">>>", "arr", "=", "numpy", ".", "arange", "(", "0", "25", ")", ".", "reshape", "((", "5", "5", "))", ">>>", "for", "patch", "pmask", "_", "slicer", "in", "CentredPatchIterator", "(", "arr", "3", ")", ":", ">>>", "new_patch", "=", "CentredPatchIterator", ".", "applyslicer", "(", "arr", "slicer", "pmask", ")", ">>>", "print", "numpy", ".", "all", "(", "new_patch", "==", "patch", ")", "True", "..." ]
def applyslicer(array, slicer, pmask, cval = 0): r""" Apply a slicer returned by the iterator to a new array of the same dimensionality as the one used to initialize the iterator. Notes ----- If ``array`` has more dimensions than ``slicer`` and ``pmask``, the first ones are sliced. Parameters ---------- array : array_like A n-dimensional array. slicer : list List if `slice()` instances as returned by `next()`. pmask : narray The array mask as returned by `next()`. cval : number Value to fill undefined positions. Experiments ----------- >>> import numpy >>> from medpy.iterators import CentredPatchIterator >>> arr = numpy.arange(0, 25).reshape((5,5)) >>> for patch, pmask, _, slicer in CentredPatchIterator(arr, 3): >>> new_patch = CentredPatchIterator.applyslicer(arr, slicer, pmask) >>> print numpy.all(new_patch == patch) True ... """ l = len(slicer) patch = numpy.zeros(list(pmask.shape[:l]) + list(array.shape[l:]), array.dtype) if not 0 == cval: patch.fill(cval) sliced = array[slicer] patch[pmask] = sliced.reshape([numpy.prod(sliced.shape[:l])] + list(sliced.shape[l:])) return patch
[ "def", "applyslicer", "(", "array", ",", "slicer", ",", "pmask", ",", "cval", "=", "0", ")", ":", "l", "=", "len", "(", "slicer", ")", "patch", "=", "numpy", ".", "zeros", "(", "list", "(", "pmask", ".", "shape", "[", ":", "l", "]", ")", "+", "list", "(", "array", ".", "shape", "[", "l", ":", "]", ")", ",", "array", ".", "dtype", ")", "if", "not", "0", "==", "cval", ":", "patch", ".", "fill", "(", "cval", ")", "sliced", "=", "array", "[", "slicer", "]", "patch", "[", "pmask", "]", "=", "sliced", ".", "reshape", "(", "[", "numpy", ".", "prod", "(", "sliced", ".", "shape", "[", ":", "l", "]", ")", "]", "+", "list", "(", "sliced", ".", "shape", "[", "l", ":", "]", ")", ")", "return", "patch" ]
https://github.com/loli/medpy/blob/39131b94f0ab5328ab14a874229320efc2f74d98/medpy/iterators/patchwise.py#L320-L358
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
paddlex/paddleseg/models/gcnet.py
python
GCNetHead.__init__
(self, num_classes, backbone_indices, backbone_channels, gc_channels, ratio, enable_auxiliary_loss=True)
[]
def __init__(self, num_classes, backbone_indices, backbone_channels, gc_channels, ratio, enable_auxiliary_loss=True): super().__init__() in_channels = backbone_channels[1] self.conv_bn_relu1 = layers.ConvBNReLU( in_channels=in_channels, out_channels=gc_channels, kernel_size=3, padding=1) self.gc_block = GlobalContextBlock( gc_channels=gc_channels, in_channels=gc_channels, ratio=ratio) self.conv_bn_relu2 = layers.ConvBNReLU( in_channels=gc_channels, out_channels=gc_channels, kernel_size=3, padding=1) self.conv_bn_relu3 = layers.ConvBNReLU( in_channels=in_channels + gc_channels, out_channels=gc_channels, kernel_size=3, padding=1) self.dropout = nn.Dropout(p=0.1) self.conv = nn.Conv2D( in_channels=gc_channels, out_channels=num_classes, kernel_size=1) if enable_auxiliary_loss: self.auxlayer = layers.AuxLayer( in_channels=backbone_channels[0], inter_channels=backbone_channels[0] // 4, out_channels=num_classes) self.backbone_indices = backbone_indices self.enable_auxiliary_loss = enable_auxiliary_loss
[ "def", "__init__", "(", "self", ",", "num_classes", ",", "backbone_indices", ",", "backbone_channels", ",", "gc_channels", ",", "ratio", ",", "enable_auxiliary_loss", "=", "True", ")", ":", "super", "(", ")", ".", "__init__", "(", ")", "in_channels", "=", "backbone_channels", "[", "1", "]", "self", ".", "conv_bn_relu1", "=", "layers", ".", "ConvBNReLU", "(", "in_channels", "=", "in_channels", ",", "out_channels", "=", "gc_channels", ",", "kernel_size", "=", "3", ",", "padding", "=", "1", ")", "self", ".", "gc_block", "=", "GlobalContextBlock", "(", "gc_channels", "=", "gc_channels", ",", "in_channels", "=", "gc_channels", ",", "ratio", "=", "ratio", ")", "self", ".", "conv_bn_relu2", "=", "layers", ".", "ConvBNReLU", "(", "in_channels", "=", "gc_channels", ",", "out_channels", "=", "gc_channels", ",", "kernel_size", "=", "3", ",", "padding", "=", "1", ")", "self", ".", "conv_bn_relu3", "=", "layers", ".", "ConvBNReLU", "(", "in_channels", "=", "in_channels", "+", "gc_channels", ",", "out_channels", "=", "gc_channels", ",", "kernel_size", "=", "3", ",", "padding", "=", "1", ")", "self", ".", "dropout", "=", "nn", ".", "Dropout", "(", "p", "=", "0.1", ")", "self", ".", "conv", "=", "nn", ".", "Conv2D", "(", "in_channels", "=", "gc_channels", ",", "out_channels", "=", "num_classes", ",", "kernel_size", "=", "1", ")", "if", "enable_auxiliary_loss", ":", "self", ".", "auxlayer", "=", "layers", ".", "AuxLayer", "(", "in_channels", "=", "backbone_channels", "[", "0", "]", ",", "inter_channels", "=", "backbone_channels", "[", "0", "]", "//", "4", ",", "out_channels", "=", "num_classes", ")", "self", ".", "backbone_indices", "=", "backbone_indices", "self", ".", "enable_auxiliary_loss", "=", "enable_auxiliary_loss" ]
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/paddlex/paddleseg/models/gcnet.py#L98-L142
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
python
BlenderbotSmallForCausalLM.get_decoder
(self)
return self.model.decoder
[]
def get_decoder(self): return self.model.decoder
[ "def", "get_decoder", "(", "self", ")", ":", "return", "self", ".", "model", ".", "decoder" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py#L1389-L1390
AndreasMadsen/stable-nalu
b3296ace137ffa4854edeef3759f1578b7650210
stable_nalu/layer/regualized_linear_mnac.py
python
RegualizedLinearMNACCell.__init__
(self, input_size, hidden_size, **kwargs)
[]
def __init__(self, input_size, hidden_size, **kwargs): super().__init__(ReRegualizedLinearMNACLayer, input_size, hidden_size, **kwargs)
[ "def", "__init__", "(", "self", ",", "input_size", ",", "hidden_size", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "ReRegualizedLinearMNACLayer", ",", "input_size", ",", "hidden_size", ",", "*", "*", "kwargs", ")" ]
https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/regualized_linear_mnac.py#L72-L73
kerlomz/captcha_platform
f7d719bd1239a987996e266bd7fe35c96003b378
middleware/impl/gif_frames.py
python
concat_arr
(img_arr)
return all_slice
[]
def concat_arr(img_arr): if len(img_arr) < 2: return img_arr[0] all_slice = img_arr[0] for im_slice in img_arr[1:]: all_slice = np.concatenate((all_slice, im_slice), axis=1) return all_slice
[ "def", "concat_arr", "(", "img_arr", ")", ":", "if", "len", "(", "img_arr", ")", "<", "2", ":", "return", "img_arr", "[", "0", "]", "all_slice", "=", "img_arr", "[", "0", "]", "for", "im_slice", "in", "img_arr", "[", "1", ":", "]", ":", "all_slice", "=", "np", ".", "concatenate", "(", "(", "all_slice", ",", "im_slice", ")", ",", "axis", "=", "1", ")", "return", "all_slice" ]
https://github.com/kerlomz/captcha_platform/blob/f7d719bd1239a987996e266bd7fe35c96003b378/middleware/impl/gif_frames.py#L19-L25
kerlomz/captcha_platform
f7d719bd1239a987996e266bd7fe35c96003b378
middleware/impl/corp_to_multi.py
python
coord_calc
(param, is_range=True, is_integer=True)
return result_group
[]
def coord_calc(param, is_range=True, is_integer=True): result_group = [] start_h = param['start_pos'][1] end_h = start_h + param['corp_size'][1] for row in range(param['corp_num'][1]): start_w = param['start_pos'][0] end_w = start_w + param['corp_size'][0] for col in range(param['corp_num'][0]): pos_range = [[start_w, end_w], [start_h, end_h]] t = lambda x: int(x) if is_integer else x pos_center = [t((start_w + end_w)/2), t((start_h + end_h)/2)] result_group.append(pos_range if is_range else pos_center) start_w = end_w + param['interval_size'][0] end_w = start_w + param['corp_size'][0] start_h = end_h + param['interval_size'][1] end_h = start_h + param['corp_size'][1] return result_group
[ "def", "coord_calc", "(", "param", ",", "is_range", "=", "True", ",", "is_integer", "=", "True", ")", ":", "result_group", "=", "[", "]", "start_h", "=", "param", "[", "'start_pos'", "]", "[", "1", "]", "end_h", "=", "start_h", "+", "param", "[", "'corp_size'", "]", "[", "1", "]", "for", "row", "in", "range", "(", "param", "[", "'corp_num'", "]", "[", "1", "]", ")", ":", "start_w", "=", "param", "[", "'start_pos'", "]", "[", "0", "]", "end_w", "=", "start_w", "+", "param", "[", "'corp_size'", "]", "[", "0", "]", "for", "col", "in", "range", "(", "param", "[", "'corp_num'", "]", "[", "0", "]", ")", ":", "pos_range", "=", "[", "[", "start_w", ",", "end_w", "]", ",", "[", "start_h", ",", "end_h", "]", "]", "t", "=", "lambda", "x", ":", "int", "(", "x", ")", "if", "is_integer", "else", "x", "pos_center", "=", "[", "t", "(", "(", "start_w", "+", "end_w", ")", "/", "2", ")", ",", "t", "(", "(", "start_h", "+", "end_h", ")", "/", "2", ")", "]", "result_group", ".", "append", "(", "pos_range", "if", "is_range", "else", "pos_center", ")", "start_w", "=", "end_w", "+", "param", "[", "'interval_size'", "]", "[", "0", "]", "end_w", "=", "start_w", "+", "param", "[", "'corp_size'", "]", "[", "0", "]", "start_h", "=", "end_h", "+", "param", "[", "'interval_size'", "]", "[", "1", "]", "end_h", "=", "start_h", "+", "param", "[", "'corp_size'", "]", "[", "1", "]", "return", "result_group" ]
https://github.com/kerlomz/captcha_platform/blob/f7d719bd1239a987996e266bd7fe35c96003b378/middleware/impl/corp_to_multi.py#L10-L27
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/Python-2.7.9/Lib/plat-irix6/IN.py
python
IS_IPV4SOCKADDR
(a)
return
[]
def IS_IPV4SOCKADDR(a): return
[ "def", "IS_IPV4SOCKADDR", "(", "a", ")", ":", "return" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/plat-irix6/IN.py#L296-L296
globocom/m3u8
cf7ae5fda4681efcea796cd7c51c02f152c36009
m3u8/model.py
python
M3U8._initialize_attributes
(self)
[]
def _initialize_attributes(self): self.keys = [ Key(base_uri=self.base_uri, **params) if params else None for params in self.data.get('keys', []) ] self.segments = SegmentList([ Segment(base_uri=self.base_uri, keyobject=find_key(segment.get('key', {}), self.keys), **segment) for segment in self.data.get('segments', []) ]) #self.keys = get_uniques([ segment.key for segment in self.segments ]) for attr, param in self.simple_attributes: setattr(self, attr, self.data.get(param)) self.files = [] for key in self.keys: # Avoid None key, it could be the first one, don't repeat them if key and key.uri not in self.files: self.files.append(key.uri) self.files.extend(self.segments.uri) self.media = MediaList([ Media(base_uri=self.base_uri, **media) for media in self.data.get('media', []) ]) self.playlists = PlaylistList([ Playlist(base_uri=self.base_uri, media=self.media, **playlist) for playlist in self.data.get('playlists', []) ]) self.iframe_playlists = PlaylistList() for ifr_pl in self.data.get('iframe_playlists', []): self.iframe_playlists.append(IFramePlaylist(base_uri=self.base_uri, uri=ifr_pl['uri'], iframe_stream_info=ifr_pl['iframe_stream_info']) ) self.segment_map = self.data.get('segment_map') start = self.data.get('start', None) self.start = start and Start(**start) server_control = self.data.get('server_control', None) self.server_control = server_control and ServerControl(**server_control) part_inf = self.data.get('part_inf', None) self.part_inf = part_inf and PartInformation(**part_inf) skip = self.data.get('skip', None) self.skip = skip and Skip(**skip) self.rendition_reports = RenditionReportList([ RenditionReport(base_uri=self.base_uri, **rendition_report) for rendition_report in self.data.get('rendition_reports', []) ]) self.session_data = SessionDataList([ SessionData(**session_data) for session_data in self.data.get('session_data', []) if 'data_id' in session_data ]) self.session_keys = [ SessionKey(base_uri=self.base_uri, **params) if params else None for params in self.data.get('session_keys', []) ] preload_hint = self.data.get('preload_hint', None) self.preload_hint = preload_hint and PreloadHint(base_uri=self.base_uri, **preload_hint)
[ "def", "_initialize_attributes", "(", "self", ")", ":", "self", ".", "keys", "=", "[", "Key", "(", "base_uri", "=", "self", ".", "base_uri", ",", "*", "*", "params", ")", "if", "params", "else", "None", "for", "params", "in", "self", ".", "data", ".", "get", "(", "'keys'", ",", "[", "]", ")", "]", "self", ".", "segments", "=", "SegmentList", "(", "[", "Segment", "(", "base_uri", "=", "self", ".", "base_uri", ",", "keyobject", "=", "find_key", "(", "segment", ".", "get", "(", "'key'", ",", "{", "}", ")", ",", "self", ".", "keys", ")", ",", "*", "*", "segment", ")", "for", "segment", "in", "self", ".", "data", ".", "get", "(", "'segments'", ",", "[", "]", ")", "]", ")", "#self.keys = get_uniques([ segment.key for segment in self.segments ])", "for", "attr", ",", "param", "in", "self", ".", "simple_attributes", ":", "setattr", "(", "self", ",", "attr", ",", "self", ".", "data", ".", "get", "(", "param", ")", ")", "self", ".", "files", "=", "[", "]", "for", "key", "in", "self", ".", "keys", ":", "# Avoid None key, it could be the first one, don't repeat them", "if", "key", "and", "key", ".", "uri", "not", "in", "self", ".", "files", ":", "self", ".", "files", ".", "append", "(", "key", ".", "uri", ")", "self", ".", "files", ".", "extend", "(", "self", ".", "segments", ".", "uri", ")", "self", ".", "media", "=", "MediaList", "(", "[", "Media", "(", "base_uri", "=", "self", ".", "base_uri", ",", "*", "*", "media", ")", "for", "media", "in", "self", ".", "data", ".", "get", "(", "'media'", ",", "[", "]", ")", "]", ")", "self", ".", "playlists", "=", "PlaylistList", "(", "[", "Playlist", "(", "base_uri", "=", "self", ".", "base_uri", ",", "media", "=", "self", ".", "media", ",", "*", "*", "playlist", ")", "for", "playlist", "in", "self", ".", "data", ".", "get", "(", "'playlists'", ",", "[", "]", ")", "]", ")", "self", ".", "iframe_playlists", "=", "PlaylistList", "(", ")", "for", "ifr_pl", "in", "self", ".", "data", ".", "get", "(", "'iframe_playlists'", ",", "[", "]", ")", ":", "self", ".", "iframe_playlists", ".", "append", "(", "IFramePlaylist", "(", "base_uri", "=", "self", ".", "base_uri", ",", "uri", "=", "ifr_pl", "[", "'uri'", "]", ",", "iframe_stream_info", "=", "ifr_pl", "[", "'iframe_stream_info'", "]", ")", ")", "self", ".", "segment_map", "=", "self", ".", "data", ".", "get", "(", "'segment_map'", ")", "start", "=", "self", ".", "data", ".", "get", "(", "'start'", ",", "None", ")", "self", ".", "start", "=", "start", "and", "Start", "(", "*", "*", "start", ")", "server_control", "=", "self", ".", "data", ".", "get", "(", "'server_control'", ",", "None", ")", "self", ".", "server_control", "=", "server_control", "and", "ServerControl", "(", "*", "*", "server_control", ")", "part_inf", "=", "self", ".", "data", ".", "get", "(", "'part_inf'", ",", "None", ")", "self", ".", "part_inf", "=", "part_inf", "and", "PartInformation", "(", "*", "*", "part_inf", ")", "skip", "=", "self", ".", "data", ".", "get", "(", "'skip'", ",", "None", ")", "self", ".", "skip", "=", "skip", "and", "Skip", "(", "*", "*", "skip", ")", "self", ".", "rendition_reports", "=", "RenditionReportList", "(", "[", "RenditionReport", "(", "base_uri", "=", "self", ".", "base_uri", ",", "*", "*", "rendition_report", ")", "for", "rendition_report", "in", "self", ".", "data", ".", "get", "(", "'rendition_reports'", ",", "[", "]", ")", "]", ")", "self", ".", "session_data", "=", "SessionDataList", "(", "[", "SessionData", "(", "*", "*", "session_data", ")", "for", "session_data", "in", "self", ".", "data", ".", "get", "(", "'session_data'", ",", "[", "]", ")", "if", "'data_id'", "in", "session_data", "]", ")", "self", ".", "session_keys", "=", "[", "SessionKey", "(", "base_uri", "=", "self", ".", "base_uri", ",", "*", "*", "params", ")", "if", "params", "else", "None", "for", "params", "in", "self", ".", "data", ".", "get", "(", "'session_keys'", ",", "[", "]", ")", "]", "preload_hint", "=", "self", ".", "data", ".", "get", "(", "'preload_hint'", ",", "None", ")", "self", ".", "preload_hint", "=", "preload_hint", "and", "PreloadHint", "(", "base_uri", "=", "self", ".", "base_uri", ",", "*", "*", "preload_hint", ")" ]
https://github.com/globocom/m3u8/blob/cf7ae5fda4681efcea796cd7c51c02f152c36009/m3u8/model.py#L159-L212
swisscom/ai-research-keyphrase-extraction
78c0b13633f0e443cf43892b098b4c8dabf3dad9
swisscom_ai/research_keyphrase/preprocessing/postagging.py
python
PosTagging.pos_tag_file
(self, input_path, output_path=None)
POS Tag a file. Either we have a list of list (for each sentence a list of tuple (word,tag)) Or a file with the POS tagged text Note : The jumpline is only for readibility purpose , when reading a tagged file we'll use again sent_tokenize to find the sentences boundaries. :param input_path: path of the source file :param output_path: If set write POS tagged text with separator (self.pos_tag_raw_text with as_tuple_list False) If not set, return list of list of tuple (self.post_tag_raw_text with as_tuple_list = True) :return: resulting POS tagged text as a list of list of tuple or nothing if output path is set.
POS Tag a file. Either we have a list of list (for each sentence a list of tuple (word,tag)) Or a file with the POS tagged text
[ "POS", "Tag", "a", "file", ".", "Either", "we", "have", "a", "list", "of", "list", "(", "for", "each", "sentence", "a", "list", "of", "tuple", "(", "word", "tag", "))", "Or", "a", "file", "with", "the", "POS", "tagged", "text" ]
def pos_tag_file(self, input_path, output_path=None): """ POS Tag a file. Either we have a list of list (for each sentence a list of tuple (word,tag)) Or a file with the POS tagged text Note : The jumpline is only for readibility purpose , when reading a tagged file we'll use again sent_tokenize to find the sentences boundaries. :param input_path: path of the source file :param output_path: If set write POS tagged text with separator (self.pos_tag_raw_text with as_tuple_list False) If not set, return list of list of tuple (self.post_tag_raw_text with as_tuple_list = True) :return: resulting POS tagged text as a list of list of tuple or nothing if output path is set. """ original_text = read_file(input_path) if output_path is not None: tagged_text = self.pos_tag_raw_text(original_text, as_tuple_list=False) # Write to the output the POS-Tagged text. write_string(tagged_text, output_path) else: return self.pos_tag_raw_text(original_text, as_tuple_list=True)
[ "def", "pos_tag_file", "(", "self", ",", "input_path", ",", "output_path", "=", "None", ")", ":", "original_text", "=", "read_file", "(", "input_path", ")", "if", "output_path", "is", "not", "None", ":", "tagged_text", "=", "self", ".", "pos_tag_raw_text", "(", "original_text", ",", "as_tuple_list", "=", "False", ")", "# Write to the output the POS-Tagged text.", "write_string", "(", "tagged_text", ",", "output_path", ")", "else", ":", "return", "self", ".", "pos_tag_raw_text", "(", "original_text", ",", "as_tuple_list", "=", "True", ")" ]
https://github.com/swisscom/ai-research-keyphrase-extraction/blob/78c0b13633f0e443cf43892b098b4c8dabf3dad9/swisscom_ai/research_keyphrase/preprocessing/postagging.py#L65-L89
argoai/argoverse-api
575c222e7d7fa0c56bdc9dc20f71464c4bea2e6d
argoverse/utils/sim2.py
python
Sim2.scale
(self)
return self.s_
Return the scale.
Return the scale.
[ "Return", "the", "scale", "." ]
def scale(self) -> float: """Return the scale.""" return self.s_
[ "def", "scale", "(", "self", ")", "->", "float", ":", "return", "self", ".", "s_" ]
https://github.com/argoai/argoverse-api/blob/575c222e7d7fa0c56bdc9dc20f71464c4bea2e6d/argoverse/utils/sim2.py#L88-L90
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_internal/utils/parallel.py
python
_map_multiprocess
(func, iterable, chunksize=1)
Chop iterable into chunks and submit them to a process pool. For very long iterables using a large value for chunksize can make the job complete much faster than using the default value of 1. Return an unordered iterator of the results.
Chop iterable into chunks and submit them to a process pool.
[ "Chop", "iterable", "into", "chunks", "and", "submit", "them", "to", "a", "process", "pool", "." ]
def _map_multiprocess(func, iterable, chunksize=1): # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T] """Chop iterable into chunks and submit them to a process pool. For very long iterables using a large value for chunksize can make the job complete much faster than using the default value of 1. Return an unordered iterator of the results. """ with closing(ProcessPool()) as pool: return pool.imap_unordered(func, iterable, chunksize)
[ "def", "_map_multiprocess", "(", "func", ",", "iterable", ",", "chunksize", "=", "1", ")", ":", "# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]", "with", "closing", "(", "ProcessPool", "(", ")", ")", "as", "pool", ":", "return", "pool", ".", "imap_unordered", "(", "func", ",", "iterable", ",", "chunksize", ")" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_internal/utils/parallel.py#L71-L81
danielgtaylor/arista
edcf2565eea92014b55c1084acd12a832aab1ca2
arista/inputs/haldisco.py
python
InputFinder.device_removed
(self, udi)
Called when a device has been removed from the signal. If the device is a volume with a video DVD the "video-lost" signal is emitted.
Called when a device has been removed from the signal. If the device is a volume with a video DVD the "video-lost" signal is emitted.
[ "Called", "when", "a", "device", "has", "been", "removed", "from", "the", "signal", ".", "If", "the", "device", "is", "a", "volume", "with", "a", "video", "DVD", "the", "video", "-", "lost", "signal", "is", "emitted", "." ]
def device_removed(self, udi): """ Called when a device has been removed from the signal. If the device is a volume with a video DVD the "video-lost" signal is emitted. """ for block, drive in self.drives.items(): if drive.video_udi == udi: drive.video = False drive.udi = "" label = drive.label drive.label = "" self.emit("disc-lost", drive, label) break for device, capture in self.capture_devices.items(): if capture.udi == udi: self.emit("v4l-capture-lost", self.capture_devices[device]) del self.capture_devices[device] break
[ "def", "device_removed", "(", "self", ",", "udi", ")", ":", "for", "block", ",", "drive", "in", "self", ".", "drives", ".", "items", "(", ")", ":", "if", "drive", ".", "video_udi", "==", "udi", ":", "drive", ".", "video", "=", "False", "drive", ".", "udi", "=", "\"\"", "label", "=", "drive", ".", "label", "drive", ".", "label", "=", "\"\"", "self", ".", "emit", "(", "\"disc-lost\"", ",", "drive", ",", "label", ")", "break", "for", "device", ",", "capture", "in", "self", ".", "capture_devices", ".", "items", "(", ")", ":", "if", "capture", ".", "udi", "==", "udi", ":", "self", ".", "emit", "(", "\"v4l-capture-lost\"", ",", "self", ".", "capture_devices", "[", "device", "]", ")", "del", "self", ".", "capture_devices", "[", "device", "]", "break" ]
https://github.com/danielgtaylor/arista/blob/edcf2565eea92014b55c1084acd12a832aab1ca2/arista/inputs/haldisco.py#L246-L265
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/setuptools/command/install_lib.py
python
install_lib._exclude_pkg_path
(self, pkg, exclusion_path)
return os.path.join(self.install_dir, *parts)
Given a package name and exclusion path within that package, compute the full exclusion path.
Given a package name and exclusion path within that package, compute the full exclusion path.
[ "Given", "a", "package", "name", "and", "exclusion", "path", "within", "that", "package", "compute", "the", "full", "exclusion", "path", "." ]
def _exclude_pkg_path(self, pkg, exclusion_path): """ Given a package name and exclusion path within that package, compute the full exclusion path. """ parts = pkg.split('.') + [exclusion_path] return os.path.join(self.install_dir, *parts)
[ "def", "_exclude_pkg_path", "(", "self", ",", "pkg", ",", "exclusion_path", ")", ":", "parts", "=", "pkg", ".", "split", "(", "'.'", ")", "+", "[", "exclusion_path", "]", "return", "os", ".", "path", ".", "join", "(", "self", ".", "install_dir", ",", "*", "parts", ")" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/setuptools/command/install_lib.py#L31-L37
git-cola/git-cola
b48b8028e0c3baf47faf7b074b9773737358163d
cola/widgets/browse.py
python
BrowseBranch.setModel
(self, model)
[]
def setModel(self, model): self.tree.setModel(model)
[ "def", "setModel", "(", "self", ",", "model", ")", ":", "self", ".", "tree", ".", "setModel", "(", "model", ")" ]
https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/widgets/browse.py#L655-L656
kennethreitz-archive/requests3
69eb662703b40db58fdc6c095d0fe130c56649bb
requests3/core/_http/contrib/securetransport.py
python
WrappedSocket._reuse
(self)
[]
def _reuse(self): self._makefile_refs += 1
[ "def", "_reuse", "(", "self", ")", ":", "self", ".", "_makefile_refs", "+=", "1" ]
https://github.com/kennethreitz-archive/requests3/blob/69eb662703b40db58fdc6c095d0fe130c56649bb/requests3/core/_http/contrib/securetransport.py#L642-L643
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/asyncio/events.py
python
AbstractEventLoop.is_running
(self)
Return whether the event loop is currently running.
Return whether the event loop is currently running.
[ "Return", "whether", "the", "event", "loop", "is", "currently", "running", "." ]
def is_running(self): """Return whether the event loop is currently running.""" raise NotImplementedError
[ "def", "is_running", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/asyncio/events.py#L232-L234
pypa/setuptools
9f37366aab9cd8f6baa23e6a77cfdb8daf97757e
pkg_resources/_vendor/packaging/tags.py
python
Tag.__repr__
(self)
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
[]
def __repr__(self) -> str: return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
[ "def", "__repr__", "(", "self", ")", "->", "str", ":", "return", "\"<{self} @ {self_id}>\"", ".", "format", "(", "self", "=", "self", ",", "self_id", "=", "id", "(", "self", ")", ")" ]
https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/pkg_resources/_vendor/packaging/tags.py#L92-L93
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py
python
main
()
ansible oc module for service accounts
ansible oc module for service accounts
[ "ansible", "oc", "module", "for", "service", "accounts" ]
def main(): ''' ansible oc module for service accounts ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), name=dict(default=None, required=True, type='str'), namespace=dict(default=None, required=True, type='str'), secrets=dict(default=None, type='list'), image_pull_secrets=dict(default=None, type='list'), ), supports_check_mode=True, ) rval = OCServiceAccount.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) module.exit_json(**rval)
[ "def", "main", "(", ")", ":", "module", "=", "AnsibleModule", "(", "argument_spec", "=", "dict", "(", "kubeconfig", "=", "dict", "(", "default", "=", "'/etc/origin/master/admin.kubeconfig'", ",", "type", "=", "'str'", ")", ",", "state", "=", "dict", "(", "default", "=", "'present'", ",", "type", "=", "'str'", ",", "choices", "=", "[", "'present'", ",", "'absent'", ",", "'list'", "]", ")", ",", "debug", "=", "dict", "(", "default", "=", "False", ",", "type", "=", "'bool'", ")", ",", "name", "=", "dict", "(", "default", "=", "None", ",", "required", "=", "True", ",", "type", "=", "'str'", ")", ",", "namespace", "=", "dict", "(", "default", "=", "None", ",", "required", "=", "True", ",", "type", "=", "'str'", ")", ",", "secrets", "=", "dict", "(", "default", "=", "None", ",", "type", "=", "'list'", ")", ",", "image_pull_secrets", "=", "dict", "(", "default", "=", "None", ",", "type", "=", "'list'", ")", ",", ")", ",", "supports_check_mode", "=", "True", ",", ")", "rval", "=", "OCServiceAccount", ".", "run_ansible", "(", "module", ".", "params", ",", "module", ".", "check_mode", ")", "if", "'failed'", "in", "rval", ":", "module", ".", "fail_json", "(", "*", "*", "rval", ")", "module", ".", "exit_json", "(", "*", "*", "rval", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_vendored_deps/library/oc_serviceaccount.py#L1761-L1784
iiau-tracker/SPLT
a196e603798e9be969d9d985c087c11cad1cda43
lib/object_detection/core/losses.py
python
WeightedL2LocalizationLoss._compute_loss
(self, prediction_tensor, target_tensor, weights)
return tf.reduce_sum(square_diff)
Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function or a float tensor of shape [batch_size, num_anchors]
Compute loss function.
[ "Compute", "loss", "function", "." ]
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function or a float tensor of shape [batch_size, num_anchors] """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) if self._anchorwise_output: return tf.reduce_sum(square_diff, 2) return tf.reduce_sum(square_diff)
[ "def", "_compute_loss", "(", "self", ",", "prediction_tensor", ",", "target_tensor", ",", "weights", ")", ":", "weighted_diff", "=", "(", "prediction_tensor", "-", "target_tensor", ")", "*", "tf", ".", "expand_dims", "(", "weights", ",", "2", ")", "square_diff", "=", "0.5", "*", "tf", ".", "square", "(", "weighted_diff", ")", "if", "self", ".", "_anchorwise_output", ":", "return", "tf", ".", "reduce_sum", "(", "square_diff", ",", "2", ")", "return", "tf", ".", "reduce_sum", "(", "square_diff", ")" ]
https://github.com/iiau-tracker/SPLT/blob/a196e603798e9be969d9d985c087c11cad1cda43/lib/object_detection/core/losses.py#L103-L122
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/qtconsole/kill_ring.py
python
QtKillRing.__init__
(self, text_edit)
Create a kill ring attached to the specified Qt text edit.
Create a kill ring attached to the specified Qt text edit.
[ "Create", "a", "kill", "ring", "attached", "to", "the", "specified", "Qt", "text", "edit", "." ]
def __init__(self, text_edit): """ Create a kill ring attached to the specified Qt text edit. """ assert isinstance(text_edit, (QtGui.QTextEdit, QtGui.QPlainTextEdit)) super(QtKillRing, self).__init__() self._ring = KillRing() self._prev_yank = None self._skip_cursor = False self._text_edit = text_edit text_edit.cursorPositionChanged.connect(self._cursor_position_changed)
[ "def", "__init__", "(", "self", ",", "text_edit", ")", ":", "assert", "isinstance", "(", "text_edit", ",", "(", "QtGui", ".", "QTextEdit", ",", "QtGui", ".", "QPlainTextEdit", ")", ")", "super", "(", "QtKillRing", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "_ring", "=", "KillRing", "(", ")", "self", ".", "_prev_yank", "=", "None", "self", ".", "_skip_cursor", "=", "False", "self", ".", "_text_edit", "=", "text_edit", "text_edit", ".", "cursorPositionChanged", ".", "connect", "(", "self", ".", "_cursor_position_changed", ")" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/qtconsole/kill_ring.py#L62-L73
m-rtijn/mpu6050
0626053a5e1182f4951b78b8326691a9223a5f7d
mpu6050/mpu6050.py
python
mpu6050.set_accel_range
(self, accel_range)
Sets the range of the accelerometer to range. accel_range -- the range to set the accelerometer to. Using a pre-defined range is advised.
Sets the range of the accelerometer to range.
[ "Sets", "the", "range", "of", "the", "accelerometer", "to", "range", "." ]
def set_accel_range(self, accel_range): """Sets the range of the accelerometer to range. accel_range -- the range to set the accelerometer to. Using a pre-defined range is advised. """ # First change it to 0x00 to make sure we write the correct value later self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00) # Write the new range to the ACCEL_CONFIG register self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)
[ "def", "set_accel_range", "(", "self", ",", "accel_range", ")", ":", "# First change it to 0x00 to make sure we write the correct value later", "self", ".", "bus", ".", "write_byte_data", "(", "self", ".", "address", ",", "self", ".", "ACCEL_CONFIG", ",", "0x00", ")", "# Write the new range to the ACCEL_CONFIG register", "self", ".", "bus", ".", "write_byte_data", "(", "self", ".", "address", ",", "self", ".", "ACCEL_CONFIG", ",", "accel_range", ")" ]
https://github.com/m-rtijn/mpu6050/blob/0626053a5e1182f4951b78b8326691a9223a5f7d/mpu6050/mpu6050.py#L108-L118
alanhamlett/pip-update-requirements
ce875601ef278c8ce00ad586434a978731525561
pur/packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
python
_cf_data_from_bytes
(bytestring)
return CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) )
Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller.
Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller.
[ "Given", "a", "bytestring", "create", "a", "CFData", "object", "from", "it", ".", "This", "CFData", "object", "must", "be", "CFReleased", "by", "the", "caller", "." ]
def _cf_data_from_bytes(bytestring): """ Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller. """ return CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) )
[ "def", "_cf_data_from_bytes", "(", "bytestring", ")", ":", "return", "CoreFoundation", ".", "CFDataCreate", "(", "CoreFoundation", ".", "kCFAllocatorDefault", ",", "bytestring", ",", "len", "(", "bytestring", ")", ")" ]
https://github.com/alanhamlett/pip-update-requirements/blob/ce875601ef278c8ce00ad586434a978731525561/pur/packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py#L27-L34
bruderstein/PythonScript
df9f7071ddf3a079e3a301b9b53a6dc78cf1208f
PythonLib/min/pydoc.py
python
TextDoc.indent
(self, text, prefix=' ')
return '\n'.join(lines)
Indent text by prepending a given prefix to each line.
Indent text by prepending a given prefix to each line.
[ "Indent", "text", "by", "prepending", "a", "given", "prefix", "to", "each", "line", "." ]
def indent(self, text, prefix=' '): """Indent text by prepending a given prefix to each line.""" if not text: return '' lines = [prefix + line for line in text.split('\n')] if lines: lines[-1] = lines[-1].rstrip() return '\n'.join(lines)
[ "def", "indent", "(", "self", ",", "text", ",", "prefix", "=", "' '", ")", ":", "if", "not", "text", ":", "return", "''", "lines", "=", "[", "prefix", "+", "line", "for", "line", "in", "text", ".", "split", "(", "'\\n'", ")", "]", "if", "lines", ":", "lines", "[", "-", "1", "]", "=", "lines", "[", "-", "1", "]", ".", "rstrip", "(", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/min/pydoc.py#L1169-L1174
yuxiaokui/Intranet-Penetration
f57678a204840c83cbf3308e3470ae56c5ff514b
proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/images/__init__.py
python
create_rpc
(deadline=None, callback=None)
return apiproxy_stub_map.UserRPC("images", deadline, callback)
Creates an RPC object for use with the images API. Args: deadline: Optional deadline in seconds for the operation; the default is a system-specific deadline (typically 5 seconds). callback: Optional callable to invoke on completion. Returns: An apiproxy_stub_map.UserRPC object specialized for this service.
Creates an RPC object for use with the images API.
[ "Creates", "an", "RPC", "object", "for", "use", "with", "the", "images", "API", "." ]
def create_rpc(deadline=None, callback=None): """Creates an RPC object for use with the images API. Args: deadline: Optional deadline in seconds for the operation; the default is a system-specific deadline (typically 5 seconds). callback: Optional callable to invoke on completion. Returns: An apiproxy_stub_map.UserRPC object specialized for this service. """ return apiproxy_stub_map.UserRPC("images", deadline, callback)
[ "def", "create_rpc", "(", "deadline", "=", "None", ",", "callback", "=", "None", ")", ":", "return", "apiproxy_stub_map", ".", "UserRPC", "(", "\"images\"", ",", "deadline", ",", "callback", ")" ]
https://github.com/yuxiaokui/Intranet-Penetration/blob/f57678a204840c83cbf3308e3470ae56c5ff514b/proxy/XX-Net/code/default/gae_proxy/server/lib/google/appengine/api/images/__init__.py#L1018-L1029
davidastephens/pandas-finance
3b7a80df8159fa253f08191b7aec5587976c26e7
pandas_finance/api.py
python
OptionChain.near_calls
(self)
return self._pdr._chop_data(self.calls, 5, self.underlying.price)
[]
def near_calls(self): return self._pdr._chop_data(self.calls, 5, self.underlying.price)
[ "def", "near_calls", "(", "self", ")", ":", "return", "self", ".", "_pdr", ".", "_chop_data", "(", "self", ".", "calls", ",", "5", ",", "self", ".", "underlying", ".", "price", ")" ]
https://github.com/davidastephens/pandas-finance/blob/3b7a80df8159fa253f08191b7aec5587976c26e7/pandas_finance/api.py#L257-L258
QUANTAXIS/QUANTAXIS
d6eccb97c8385854aa596d6ba8d70ec0655519ff
QUANTAXIS/QAData/base_datastruct.py
python
_quotation_base.stdev
(self)
return res
返回DataStruct.price的样本标准差 Sample standard deviation
返回DataStruct.price的样本标准差 Sample standard deviation
[ "返回DataStruct", ".", "price的样本标准差", "Sample", "standard", "deviation" ]
def stdev(self): '返回DataStruct.price的样本标准差 Sample standard deviation' res = self.price.groupby(level=1).apply(lambda x: statistics.stdev(x)) res.name = 'stdev' return res
[ "def", "stdev", "(", "self", ")", ":", "res", "=", "self", ".", "price", ".", "groupby", "(", "level", "=", "1", ")", ".", "apply", "(", "lambda", "x", ":", "statistics", ".", "stdev", "(", "x", ")", ")", "res", ".", "name", "=", "'stdev'", "return", "res" ]
https://github.com/QUANTAXIS/QUANTAXIS/blob/d6eccb97c8385854aa596d6ba8d70ec0655519ff/QUANTAXIS/QAData/base_datastruct.py#L516-L520
Fantomas42/django-blog-zinnia
881101a9d1d455b2fc581d6f4ae0947cdd8126c6
zinnia/templatetags/zinnia.py
python
get_random_entries
(number=5, template='zinnia/tags/entries_random.html')
return {'template': template, 'entries': Entry.published.order_by('?')[:number]}
Return random entries.
Return random entries.
[ "Return", "random", "entries", "." ]
def get_random_entries(number=5, template='zinnia/tags/entries_random.html'): """ Return random entries. """ return {'template': template, 'entries': Entry.published.order_by('?')[:number]}
[ "def", "get_random_entries", "(", "number", "=", "5", ",", "template", "=", "'zinnia/tags/entries_random.html'", ")", ":", "return", "{", "'template'", ":", "template", ",", "'entries'", ":", "Entry", ".", "published", ".", "order_by", "(", "'?'", ")", "[", ":", "number", "]", "}" ]
https://github.com/Fantomas42/django-blog-zinnia/blob/881101a9d1d455b2fc581d6f4ae0947cdd8126c6/zinnia/templatetags/zinnia.py#L116-L121
i-pan/kaggle-rsna18
2db498fe99615d935aa676f04847d0c562fd8e46
models/RetinaNet/keras_retinanet/models/retinanet.py
python
retinanet_bbox
( model = None, anchor_parameters = AnchorParameters.default, nms = True, class_specific_filter = True, name = 'retinanet-bbox', **kwargs )
return keras.models.Model(inputs=model.inputs, outputs=outputs, name=name)
Construct a RetinaNet model on top of a backbone and adds convenience functions to output boxes directly. This model uses the minimum retinanet model and appends a few layers to compute boxes within the graph. These layers include applying the regression values to the anchors and performing NMS. Args model : RetinaNet model to append bbox layers to. If None, it will create a RetinaNet model using **kwargs. anchor_parameters : Struct containing configuration for anchor generation (sizes, strides, ratios, scales). nms : Whether to use non-maximum suppression for the filtering step. class_specific_filter : Whether to use class specific filtering or filter for the best scoring class only. name : Name of the model. *kwargs : Additional kwargs to pass to the minimal retinanet model. Returns A keras.models.Model which takes an image as input and outputs the detections on the image. The order is defined as follows: ``` [ boxes, scores, labels, other[0], other[1], ... ] ```
Construct a RetinaNet model on top of a backbone and adds convenience functions to output boxes directly.
[ "Construct", "a", "RetinaNet", "model", "on", "top", "of", "a", "backbone", "and", "adds", "convenience", "functions", "to", "output", "boxes", "directly", "." ]
def retinanet_bbox( model = None, anchor_parameters = AnchorParameters.default, nms = True, class_specific_filter = True, name = 'retinanet-bbox', **kwargs ): """ Construct a RetinaNet model on top of a backbone and adds convenience functions to output boxes directly. This model uses the minimum retinanet model and appends a few layers to compute boxes within the graph. These layers include applying the regression values to the anchors and performing NMS. Args model : RetinaNet model to append bbox layers to. If None, it will create a RetinaNet model using **kwargs. anchor_parameters : Struct containing configuration for anchor generation (sizes, strides, ratios, scales). nms : Whether to use non-maximum suppression for the filtering step. class_specific_filter : Whether to use class specific filtering or filter for the best scoring class only. name : Name of the model. *kwargs : Additional kwargs to pass to the minimal retinanet model. Returns A keras.models.Model which takes an image as input and outputs the detections on the image. The order is defined as follows: ``` [ boxes, scores, labels, other[0], other[1], ... ] ``` """ if model is None: model = retinanet(num_anchors=anchor_parameters.num_anchors(), **kwargs) # compute the anchors features = [model.get_layer(p_name).output for p_name in ['P3', 'P4', 'P5', 'P6', 'P7']] anchors = __build_anchors(anchor_parameters, features) # we expect the anchors, regression and classification values as first output regression = model.outputs[0] classification = model.outputs[1] # "other" can be any additional output from custom submodels, by default this will be [] other = model.outputs[2:] # apply predicted regression to anchors boxes = layers.RegressBoxes(name='boxes')([anchors, regression]) boxes = layers.ClipBoxes(name='clipped_boxes')([model.inputs[0], boxes]) # filter detections (apply NMS / score threshold / select top-k) detections = layers.FilterDetections( nms = nms, class_specific_filter = class_specific_filter, name = 'filtered_detections' )([boxes, classification] + other) outputs = detections # construct the model return keras.models.Model(inputs=model.inputs, outputs=outputs, name=name)
[ "def", "retinanet_bbox", "(", "model", "=", "None", ",", "anchor_parameters", "=", "AnchorParameters", ".", "default", ",", "nms", "=", "True", ",", "class_specific_filter", "=", "True", ",", "name", "=", "'retinanet-bbox'", ",", "*", "*", "kwargs", ")", ":", "if", "model", "is", "None", ":", "model", "=", "retinanet", "(", "num_anchors", "=", "anchor_parameters", ".", "num_anchors", "(", ")", ",", "*", "*", "kwargs", ")", "# compute the anchors", "features", "=", "[", "model", ".", "get_layer", "(", "p_name", ")", ".", "output", "for", "p_name", "in", "[", "'P3'", ",", "'P4'", ",", "'P5'", ",", "'P6'", ",", "'P7'", "]", "]", "anchors", "=", "__build_anchors", "(", "anchor_parameters", ",", "features", ")", "# we expect the anchors, regression and classification values as first output", "regression", "=", "model", ".", "outputs", "[", "0", "]", "classification", "=", "model", ".", "outputs", "[", "1", "]", "# \"other\" can be any additional output from custom submodels, by default this will be []", "other", "=", "model", ".", "outputs", "[", "2", ":", "]", "# apply predicted regression to anchors", "boxes", "=", "layers", ".", "RegressBoxes", "(", "name", "=", "'boxes'", ")", "(", "[", "anchors", ",", "regression", "]", ")", "boxes", "=", "layers", ".", "ClipBoxes", "(", "name", "=", "'clipped_boxes'", ")", "(", "[", "model", ".", "inputs", "[", "0", "]", ",", "boxes", "]", ")", "# filter detections (apply NMS / score threshold / select top-k)", "detections", "=", "layers", ".", "FilterDetections", "(", "nms", "=", "nms", ",", "class_specific_filter", "=", "class_specific_filter", ",", "name", "=", "'filtered_detections'", ")", "(", "[", "boxes", ",", "classification", "]", "+", "other", ")", "outputs", "=", "detections", "# construct the model", "return", "keras", ".", "models", ".", "Model", "(", "inputs", "=", "model", ".", "inputs", ",", "outputs", "=", "outputs", ",", "name", "=", "name", ")" ]
https://github.com/i-pan/kaggle-rsna18/blob/2db498fe99615d935aa676f04847d0c562fd8e46/models/RetinaNet/keras_retinanet/models/retinanet.py#L312-L371
scikit-image/scikit-image
ed642e2bc822f362504d24379dee94978d6fa9de
skimage/morphology/extrema.py
python
_add_constant_clip
(image, const_value)
return(result)
Add constant to the image while handling overflow issues gracefully.
Add constant to the image while handling overflow issues gracefully.
[ "Add", "constant", "to", "the", "image", "while", "handling", "overflow", "issues", "gracefully", "." ]
def _add_constant_clip(image, const_value): """Add constant to the image while handling overflow issues gracefully. """ min_dtype, max_dtype = dtype_limits(image, clip_negative=False) if const_value > (max_dtype - min_dtype): raise ValueError("The added constant is not compatible" "with the image data type.") result = image + const_value result[image > max_dtype-const_value] = max_dtype return(result)
[ "def", "_add_constant_clip", "(", "image", ",", "const_value", ")", ":", "min_dtype", ",", "max_dtype", "=", "dtype_limits", "(", "image", ",", "clip_negative", "=", "False", ")", "if", "const_value", ">", "(", "max_dtype", "-", "min_dtype", ")", ":", "raise", "ValueError", "(", "\"The added constant is not compatible\"", "\"with the image data type.\"", ")", "result", "=", "image", "+", "const_value", "result", "[", "image", ">", "max_dtype", "-", "const_value", "]", "=", "max_dtype", "return", "(", "result", ")" ]
https://github.com/scikit-image/scikit-image/blob/ed642e2bc822f362504d24379dee94978d6fa9de/skimage/morphology/extrema.py#L20-L31
XX-net/XX-Net
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
python3.8.2/Lib/asynchat.py
python
async_chat.close_when_done
(self)
automatically close this channel once the outgoing queue is empty
automatically close this channel once the outgoing queue is empty
[ "automatically", "close", "this", "channel", "once", "the", "outgoing", "queue", "is", "empty" ]
def close_when_done(self): "automatically close this channel once the outgoing queue is empty" self.producer_fifo.append(None)
[ "def", "close_when_done", "(", "self", ")", ":", "self", ".", "producer_fifo", ".", "append", "(", "None", ")" ]
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/asynchat.py#L220-L222
general03/flask-autoindex
424246242c9f40aeb9ac2c8c63f4d2234024256e
.eggs/Werkzeug-1.0.1-py3.7.egg/werkzeug/http.py
python
parse_csp_header
(value, on_update=None, cls=None)
return cls(items, on_update)
Parse a Content Security Policy header. .. versionadded:: 1.0.0 Support for Content Security Policy headers was added. :param value: a csp header to be parsed. :param on_update: an optional callable that is called every time a value on the object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.ContentSecurityPolicy` is used. :return: a `cls` object.
Parse a Content Security Policy header.
[ "Parse", "a", "Content", "Security", "Policy", "header", "." ]
def parse_csp_header(value, on_update=None, cls=None): """Parse a Content Security Policy header. .. versionadded:: 1.0.0 Support for Content Security Policy headers was added. :param value: a csp header to be parsed. :param on_update: an optional callable that is called every time a value on the object is changed. :param cls: the class for the returned object. By default :class:`~werkzeug.datastructures.ContentSecurityPolicy` is used. :return: a `cls` object. """ if cls is None: cls = ContentSecurityPolicy if value is None: return cls(None, on_update) items = [] for policy in value.split(";"): policy = policy.strip() # Ignore badly formatted policies (no space) if " " in policy: directive, value = policy.strip().split(" ", 1) items.append((directive.strip(), value.strip())) return cls(items, on_update)
[ "def", "parse_csp_header", "(", "value", ",", "on_update", "=", "None", ",", "cls", "=", "None", ")", ":", "if", "cls", "is", "None", ":", "cls", "=", "ContentSecurityPolicy", "if", "value", "is", "None", ":", "return", "cls", "(", "None", ",", "on_update", ")", "items", "=", "[", "]", "for", "policy", "in", "value", ".", "split", "(", "\";\"", ")", ":", "policy", "=", "policy", ".", "strip", "(", ")", "# Ignore badly formatted policies (no space)", "if", "\" \"", "in", "policy", ":", "directive", ",", "value", "=", "policy", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ",", "1", ")", "items", ".", "append", "(", "(", "directive", ".", "strip", "(", ")", ",", "value", ".", "strip", "(", ")", ")", ")", "return", "cls", "(", "items", ",", "on_update", ")" ]
https://github.com/general03/flask-autoindex/blob/424246242c9f40aeb9ac2c8c63f4d2234024256e/.eggs/Werkzeug-1.0.1-py3.7.egg/werkzeug/http.py#L527-L552
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
external/Scripting Engine/Xenotix Python Scripting Engine/Lib/mhlib.py
python
Message.__init__
(self, f, n, fp = None)
Constructor.
Constructor.
[ "Constructor", "." ]
def __init__(self, f, n, fp = None): """Constructor.""" self.folder = f self.number = n if fp is None: path = f.getmessagefilename(n) fp = open(path, 'r') mimetools.Message.__init__(self, fp)
[ "def", "__init__", "(", "self", ",", "f", ",", "n", ",", "fp", "=", "None", ")", ":", "self", ".", "folder", "=", "f", "self", ".", "number", "=", "n", "if", "fp", "is", "None", ":", "path", "=", "f", ".", "getmessagefilename", "(", "n", ")", "fp", "=", "open", "(", "path", ",", "'r'", ")", "mimetools", ".", "Message", ".", "__init__", "(", "self", ",", "fp", ")" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/external/Scripting Engine/Xenotix Python Scripting Engine/Lib/mhlib.py#L665-L672
scikit-hep/awkward-0.x
dd885bef15814f588b58944d2505296df4aaae0e
awkward0/array/base.py
python
AwkwardArray.unzip
(self)
return tuple(self[column_name] for column_name in self._util_columns(set()))
[]
def unzip(self): return tuple(self[column_name] for column_name in self._util_columns(set()))
[ "def", "unzip", "(", "self", ")", ":", "return", "tuple", "(", "self", "[", "column_name", "]", "for", "column_name", "in", "self", ".", "_util_columns", "(", "set", "(", ")", ")", ")" ]
https://github.com/scikit-hep/awkward-0.x/blob/dd885bef15814f588b58944d2505296df4aaae0e/awkward0/array/base.py#L689-L690
dmlc/dgl
8d14a739bc9e446d6c92ef83eafe5782398118de
python/dgl/core.py
python
invoke_udf_reduce
(graph, func, msgdata, *, orig_nid=None)
return retf
Invoke user-defined reduce function on all the nodes in the graph. It analyzes the graph, groups nodes by their degrees and applies the UDF on each group -- a strategy called *degree-bucketing*. Parameters ---------- graph : DGLGraph The input graph. func : callable The user-defined function. msgdata : dict[str, Tensor] Message data. orig_nid : Tensor, optional Original node IDs. Useful if the input graph is an extracted subgraph. Returns ------- dict[str, Tensor] Results from running the UDF.
Invoke user-defined reduce function on all the nodes in the graph.
[ "Invoke", "user", "-", "defined", "reduce", "function", "on", "all", "the", "nodes", "in", "the", "graph", "." ]
def invoke_udf_reduce(graph, func, msgdata, *, orig_nid=None): """Invoke user-defined reduce function on all the nodes in the graph. It analyzes the graph, groups nodes by their degrees and applies the UDF on each group -- a strategy called *degree-bucketing*. Parameters ---------- graph : DGLGraph The input graph. func : callable The user-defined function. msgdata : dict[str, Tensor] Message data. orig_nid : Tensor, optional Original node IDs. Useful if the input graph is an extracted subgraph. Returns ------- dict[str, Tensor] Results from running the UDF. """ degs = graph.in_degrees() nodes = graph.dstnodes() if orig_nid is None: orig_nid = nodes ntype = graph.dsttypes[0] ntid = graph.get_ntype_id_from_dst(ntype) dstdata = graph._node_frames[ntid] msgdata = Frame(msgdata) # degree bucketing unique_degs, bucketor = _bucketing(degs) bkt_rsts = [] bkt_nodes = [] for deg, node_bkt, orig_nid_bkt in zip(unique_degs, bucketor(nodes), bucketor(orig_nid)): if deg == 0: # skip reduce function for zero-degree nodes continue bkt_nodes.append(node_bkt) ndata_bkt = dstdata.subframe(node_bkt) # order the incoming edges per node by edge ID eid_bkt = F.zerocopy_to_numpy(graph.in_edges(node_bkt, form='eid')) assert len(eid_bkt) == deg * len(node_bkt) eid_bkt = np.sort(eid_bkt.reshape((len(node_bkt), deg)), 1) eid_bkt = F.zerocopy_from_numpy(eid_bkt.flatten()) msgdata_bkt = msgdata.subframe(eid_bkt) # reshape all msg tensors to (num_nodes_bkt, degree, feat_size) maildata = {} for k, msg in msgdata_bkt.items(): newshape = (len(node_bkt), deg) + F.shape(msg)[1:] maildata[k] = F.reshape(msg, newshape) # invoke udf nbatch = NodeBatch(graph, orig_nid_bkt, ntype, ndata_bkt, msgs=maildata) bkt_rsts.append(func(nbatch)) # prepare a result frame retf = Frame(num_rows=len(nodes)) retf._initializers = dstdata._initializers retf._default_initializer = dstdata._default_initializer # merge bucket results and write to the result frame if len(bkt_rsts) != 0: # if all the nodes have zero degree, no need to merge results. merged_rst = {} for k in bkt_rsts[0].keys(): merged_rst[k] = F.cat([rst[k] for rst in bkt_rsts], dim=0) merged_nodes = F.cat(bkt_nodes, dim=0) retf.update_row(merged_nodes, merged_rst) return retf
[ "def", "invoke_udf_reduce", "(", "graph", ",", "func", ",", "msgdata", ",", "*", ",", "orig_nid", "=", "None", ")", ":", "degs", "=", "graph", ".", "in_degrees", "(", ")", "nodes", "=", "graph", ".", "dstnodes", "(", ")", "if", "orig_nid", "is", "None", ":", "orig_nid", "=", "nodes", "ntype", "=", "graph", ".", "dsttypes", "[", "0", "]", "ntid", "=", "graph", ".", "get_ntype_id_from_dst", "(", "ntype", ")", "dstdata", "=", "graph", ".", "_node_frames", "[", "ntid", "]", "msgdata", "=", "Frame", "(", "msgdata", ")", "# degree bucketing", "unique_degs", ",", "bucketor", "=", "_bucketing", "(", "degs", ")", "bkt_rsts", "=", "[", "]", "bkt_nodes", "=", "[", "]", "for", "deg", ",", "node_bkt", ",", "orig_nid_bkt", "in", "zip", "(", "unique_degs", ",", "bucketor", "(", "nodes", ")", ",", "bucketor", "(", "orig_nid", ")", ")", ":", "if", "deg", "==", "0", ":", "# skip reduce function for zero-degree nodes", "continue", "bkt_nodes", ".", "append", "(", "node_bkt", ")", "ndata_bkt", "=", "dstdata", ".", "subframe", "(", "node_bkt", ")", "# order the incoming edges per node by edge ID", "eid_bkt", "=", "F", ".", "zerocopy_to_numpy", "(", "graph", ".", "in_edges", "(", "node_bkt", ",", "form", "=", "'eid'", ")", ")", "assert", "len", "(", "eid_bkt", ")", "==", "deg", "*", "len", "(", "node_bkt", ")", "eid_bkt", "=", "np", ".", "sort", "(", "eid_bkt", ".", "reshape", "(", "(", "len", "(", "node_bkt", ")", ",", "deg", ")", ")", ",", "1", ")", "eid_bkt", "=", "F", ".", "zerocopy_from_numpy", "(", "eid_bkt", ".", "flatten", "(", ")", ")", "msgdata_bkt", "=", "msgdata", ".", "subframe", "(", "eid_bkt", ")", "# reshape all msg tensors to (num_nodes_bkt, degree, feat_size)", "maildata", "=", "{", "}", "for", "k", ",", "msg", "in", "msgdata_bkt", ".", "items", "(", ")", ":", "newshape", "=", "(", "len", "(", "node_bkt", ")", ",", "deg", ")", "+", "F", ".", "shape", "(", "msg", ")", "[", "1", ":", "]", "maildata", "[", "k", "]", "=", "F", ".", "reshape", "(", "msg", ",", "newshape", ")", "# invoke udf", "nbatch", "=", "NodeBatch", "(", "graph", ",", "orig_nid_bkt", ",", "ntype", ",", "ndata_bkt", ",", "msgs", "=", "maildata", ")", "bkt_rsts", ".", "append", "(", "func", "(", "nbatch", ")", ")", "# prepare a result frame", "retf", "=", "Frame", "(", "num_rows", "=", "len", "(", "nodes", ")", ")", "retf", ".", "_initializers", "=", "dstdata", ".", "_initializers", "retf", ".", "_default_initializer", "=", "dstdata", ".", "_default_initializer", "# merge bucket results and write to the result frame", "if", "len", "(", "bkt_rsts", ")", "!=", "0", ":", "# if all the nodes have zero degree, no need to merge results.", "merged_rst", "=", "{", "}", "for", "k", "in", "bkt_rsts", "[", "0", "]", ".", "keys", "(", ")", ":", "merged_rst", "[", "k", "]", "=", "F", ".", "cat", "(", "[", "rst", "[", "k", "]", "for", "rst", "in", "bkt_rsts", "]", ",", "dim", "=", "0", ")", "merged_nodes", "=", "F", ".", "cat", "(", "bkt_nodes", ",", "dim", "=", "0", ")", "retf", ".", "update_row", "(", "merged_nodes", ",", "merged_rst", ")", "return", "retf" ]
https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/python/dgl/core.py#L87-L158
XX-net/XX-Net
a9898cfcf0084195fb7e69b6bc834e59aecdf14f
python3.8.2/Lib/site-packages/setuptools/unicode_utils.py
python
try_encode
(string, enc)
turn unicode encoding into a functional routine
turn unicode encoding into a functional routine
[ "turn", "unicode", "encoding", "into", "a", "functional", "routine" ]
def try_encode(string, enc): "turn unicode encoding into a functional routine" try: return string.encode(enc) except UnicodeEncodeError: return None
[ "def", "try_encode", "(", "string", ",", "enc", ")", ":", "try", ":", "return", "string", ".", "encode", "(", "enc", ")", "except", "UnicodeEncodeError", ":", "return", "None" ]
https://github.com/XX-net/XX-Net/blob/a9898cfcf0084195fb7e69b6bc834e59aecdf14f/python3.8.2/Lib/site-packages/setuptools/unicode_utils.py#L39-L44
BrikerMan/Kashgari
ffe730d33f894e99a6fd7aa17ca67d161bf70359
kashgari/tasks/classification/abc_model.py
python
ABCClassificationModel.fit
(self, x_train: TextSamplesVar, y_train: Union[ClassificationLabelVar, MultiLabelClassificationLabelVar], x_validate: TextSamplesVar = None, y_validate: Union[ClassificationLabelVar, MultiLabelClassificationLabelVar] = None, *, batch_size: int = 64, epochs: int = 5, callbacks: List['keras.callbacks.Callback'] = None, fit_kwargs: Dict = None)
return self.fit_generator(train_sample_gen=train_gen, valid_sample_gen=valid_gen, batch_size=batch_size, epochs=epochs, callbacks=callbacks, fit_kwargs=fit_kwargs)
Trains the model for a given number of epochs with given data set list. Args: x_train: Array of train feature data (if the model has a single input), or tuple of train feature data array (if the model has multiple inputs) y_train: Array of train label data x_validate: Array of validation feature data (if the model has a single input), or tuple of validation feature data array (if the model has multiple inputs) y_validate: Array of validation label data batch_size: Number of samples per gradient update, default to 64. epochs: Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. callbacks: List of `tf.keras.callbacks.Callback` instances. List of callbacks to apply during training. See :class:`tf.keras.callbacks`. fit_kwargs: fit_kwargs: additional arguments passed to :meth:`tf.keras.Model.fit` Returns: A :class:`tf.keras.callback.History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable).
Trains the model for a given number of epochs with given data set list.
[ "Trains", "the", "model", "for", "a", "given", "number", "of", "epochs", "with", "given", "data", "set", "list", "." ]
def fit(self, x_train: TextSamplesVar, y_train: Union[ClassificationLabelVar, MultiLabelClassificationLabelVar], x_validate: TextSamplesVar = None, y_validate: Union[ClassificationLabelVar, MultiLabelClassificationLabelVar] = None, *, batch_size: int = 64, epochs: int = 5, callbacks: List['keras.callbacks.Callback'] = None, fit_kwargs: Dict = None) -> 'keras.callbacks.History': """ Trains the model for a given number of epochs with given data set list. Args: x_train: Array of train feature data (if the model has a single input), or tuple of train feature data array (if the model has multiple inputs) y_train: Array of train label data x_validate: Array of validation feature data (if the model has a single input), or tuple of validation feature data array (if the model has multiple inputs) y_validate: Array of validation label data batch_size: Number of samples per gradient update, default to 64. epochs: Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. callbacks: List of `tf.keras.callbacks.Callback` instances. List of callbacks to apply during training. See :class:`tf.keras.callbacks`. fit_kwargs: fit_kwargs: additional arguments passed to :meth:`tf.keras.Model.fit` Returns: A :class:`tf.keras.callback.History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). """ train_gen = CorpusGenerator(x_train, y_train) if x_validate is not None: valid_gen = CorpusGenerator(x_validate, y_validate) else: valid_gen = None return self.fit_generator(train_sample_gen=train_gen, valid_sample_gen=valid_gen, batch_size=batch_size, epochs=epochs, callbacks=callbacks, fit_kwargs=fit_kwargs)
[ "def", "fit", "(", "self", ",", "x_train", ":", "TextSamplesVar", ",", "y_train", ":", "Union", "[", "ClassificationLabelVar", ",", "MultiLabelClassificationLabelVar", "]", ",", "x_validate", ":", "TextSamplesVar", "=", "None", ",", "y_validate", ":", "Union", "[", "ClassificationLabelVar", ",", "MultiLabelClassificationLabelVar", "]", "=", "None", ",", "*", ",", "batch_size", ":", "int", "=", "64", ",", "epochs", ":", "int", "=", "5", ",", "callbacks", ":", "List", "[", "'keras.callbacks.Callback'", "]", "=", "None", ",", "fit_kwargs", ":", "Dict", "=", "None", ")", "->", "'keras.callbacks.History'", ":", "train_gen", "=", "CorpusGenerator", "(", "x_train", ",", "y_train", ")", "if", "x_validate", "is", "not", "None", ":", "valid_gen", "=", "CorpusGenerator", "(", "x_validate", ",", "y_validate", ")", "else", ":", "valid_gen", "=", "None", "return", "self", ".", "fit_generator", "(", "train_sample_gen", "=", "train_gen", ",", "valid_sample_gen", "=", "valid_gen", ",", "batch_size", "=", "batch_size", ",", "epochs", "=", "epochs", ",", "callbacks", "=", "callbacks", ",", "fit_kwargs", "=", "fit_kwargs", ")" ]
https://github.com/BrikerMan/Kashgari/blob/ffe730d33f894e99a6fd7aa17ca67d161bf70359/kashgari/tasks/classification/abc_model.py#L164-L208
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/comb.py
python
getCraftedTextFromText
(gcodeText, repository=None)
return CombSkein().getCraftedGcode(gcodeText, repository)
Comb a gcode linear move text.
Comb a gcode linear move text.
[ "Comb", "a", "gcode", "linear", "move", "text", "." ]
def getCraftedTextFromText(gcodeText, repository=None): "Comb a gcode linear move text." if gcodec.isProcedureDoneOrFileIsEmpty(gcodeText, 'comb'): return gcodeText if repository == None: repository = settings.getReadRepository(CombRepository()) if not repository.activateComb.value: return gcodeText return CombSkein().getCraftedGcode(gcodeText, repository)
[ "def", "getCraftedTextFromText", "(", "gcodeText", ",", "repository", "=", "None", ")", ":", "if", "gcodec", ".", "isProcedureDoneOrFileIsEmpty", "(", "gcodeText", ",", "'comb'", ")", ":", "return", "gcodeText", "if", "repository", "==", "None", ":", "repository", "=", "settings", ".", "getReadRepository", "(", "CombRepository", "(", ")", ")", "if", "not", "repository", ".", "activateComb", ".", "value", ":", "return", "gcodeText", "return", "CombSkein", "(", ")", ".", "getCraftedGcode", "(", "gcodeText", ",", "repository", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-47/skeinforge_application/skeinforge_plugins/craft_plugins/comb.py#L63-L71
JimmXinu/FanFicFare
bc149a2deb2636320fe50a3e374af6eef8f61889
included_dependencies/urllib3/packages/six.py
python
assertRegex
(self, *args, **kwargs)
return getattr(self, _assertRegex)(*args, **kwargs)
[]
def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs)
[ "def", "assertRegex", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "self", ",", "_assertRegex", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/JimmXinu/FanFicFare/blob/bc149a2deb2636320fe50a3e374af6eef8f61889/included_dependencies/urllib3/packages/six.py#L753-L754
makerbot/ReplicatorG
d6f2b07785a5a5f1e172fb87cb4303b17c575d5d
skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/analyze_plugins/skeiniso.py
python
SkeinWindow.update
(self)
Update the screen.
Update the screen.
[ "Update", "the", "screen", "." ]
def update(self): "Update the screen." if len( self.skeinPanes ) < 1: return self.limitIndexSetArrowMouseDeleteCanvas() self.repository.viewpointLatitude.value = view_rotate.getBoundedLatitude( self.repository.viewpointLatitude.value ) self.repository.viewpointLongitude.value = round( self.repository.viewpointLongitude.value, 1 ) projectiveSpace = euclidean.ProjectiveSpace().getByLatitudeLongitude( self.repository.viewpointLatitude.value, self.repository.viewpointLongitude.value ) skeinPanesCopy = self.getUpdateSkeinPanes()[:] skeinPanesCopy.sort( compareLayerSequence ) if projectiveSpace.basisZ.z > 0.0: self.drawXYAxisLines( projectiveSpace ) else: skeinPanesCopy.reverse() self.drawZAxisLine( projectiveSpace ) for skeinPane in skeinPanesCopy: self.drawSkeinPane( projectiveSpace, skeinPane ) if projectiveSpace.basisZ.z > 0.0: self.drawZAxisLine( projectiveSpace ) else: self.drawXYAxisLines( projectiveSpace ) if self.repository.widthOfAxisNegativeSide.value > 0: self.drawRulings( self.negativeAxisLineX, projectiveSpace, self.negativeRulings ) self.drawRulings( self.negativeAxisLineY, projectiveSpace, self.negativeRulings ) self.drawRulings( self.negativeAxisLineZ, projectiveSpace, self.negativeRulings ) if self.repository.widthOfAxisPositiveSide.value > 0: self.drawRulings( self.positiveAxisLineX, projectiveSpace, self.positiveRulings ) self.drawRulings( self.positiveAxisLineY, projectiveSpace, self.positiveRulings ) self.drawRulings( self.positiveAxisLineZ, projectiveSpace, self.positiveRulings ) self.setDisplayLayerIndex()
[ "def", "update", "(", "self", ")", ":", "if", "len", "(", "self", ".", "skeinPanes", ")", "<", "1", ":", "return", "self", ".", "limitIndexSetArrowMouseDeleteCanvas", "(", ")", "self", ".", "repository", ".", "viewpointLatitude", ".", "value", "=", "view_rotate", ".", "getBoundedLatitude", "(", "self", ".", "repository", ".", "viewpointLatitude", ".", "value", ")", "self", ".", "repository", ".", "viewpointLongitude", ".", "value", "=", "round", "(", "self", ".", "repository", ".", "viewpointLongitude", ".", "value", ",", "1", ")", "projectiveSpace", "=", "euclidean", ".", "ProjectiveSpace", "(", ")", ".", "getByLatitudeLongitude", "(", "self", ".", "repository", ".", "viewpointLatitude", ".", "value", ",", "self", ".", "repository", ".", "viewpointLongitude", ".", "value", ")", "skeinPanesCopy", "=", "self", ".", "getUpdateSkeinPanes", "(", ")", "[", ":", "]", "skeinPanesCopy", ".", "sort", "(", "compareLayerSequence", ")", "if", "projectiveSpace", ".", "basisZ", ".", "z", ">", "0.0", ":", "self", ".", "drawXYAxisLines", "(", "projectiveSpace", ")", "else", ":", "skeinPanesCopy", ".", "reverse", "(", ")", "self", ".", "drawZAxisLine", "(", "projectiveSpace", ")", "for", "skeinPane", "in", "skeinPanesCopy", ":", "self", ".", "drawSkeinPane", "(", "projectiveSpace", ",", "skeinPane", ")", "if", "projectiveSpace", ".", "basisZ", ".", "z", ">", "0.0", ":", "self", ".", "drawZAxisLine", "(", "projectiveSpace", ")", "else", ":", "self", ".", "drawXYAxisLines", "(", "projectiveSpace", ")", "if", "self", ".", "repository", ".", "widthOfAxisNegativeSide", ".", "value", ">", "0", ":", "self", ".", "drawRulings", "(", "self", ".", "negativeAxisLineX", ",", "projectiveSpace", ",", "self", ".", "negativeRulings", ")", "self", ".", "drawRulings", "(", "self", ".", "negativeAxisLineY", ",", "projectiveSpace", ",", "self", ".", "negativeRulings", ")", "self", ".", "drawRulings", "(", "self", ".", "negativeAxisLineZ", ",", "projectiveSpace", ",", "self", ".", "negativeRulings", ")", "if", "self", ".", "repository", ".", "widthOfAxisPositiveSide", ".", "value", ">", "0", ":", "self", ".", "drawRulings", "(", "self", ".", "positiveAxisLineX", ",", "projectiveSpace", ",", "self", ".", "positiveRulings", ")", "self", ".", "drawRulings", "(", "self", ".", "positiveAxisLineY", ",", "projectiveSpace", ",", "self", ".", "positiveRulings", ")", "self", ".", "drawRulings", "(", "self", ".", "positiveAxisLineZ", ",", "projectiveSpace", ",", "self", ".", "positiveRulings", ")", "self", ".", "setDisplayLayerIndex", "(", ")" ]
https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-50/skeinforge_application/skeinforge_plugins/analyze_plugins/skeiniso.py#L848-L877
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
lib-python/2.7/email/header.py
python
Header.__unicode__
(self)
return UEMPTYSTRING.join(uchunks)
Helper for the built-in unicode function.
Helper for the built-in unicode function.
[ "Helper", "for", "the", "built", "-", "in", "unicode", "function", "." ]
def __unicode__(self): """Helper for the built-in unicode function.""" uchunks = [] lastcs = None for s, charset in self._chunks: # We must preserve spaces between encoded and non-encoded word # boundaries, which means for us we need to add a space when we go # from a charset to None/us-ascii, or from None/us-ascii to a # charset. Only do this for the second and subsequent chunks. nextcs = charset if uchunks: if lastcs not in (None, 'us-ascii'): if nextcs in (None, 'us-ascii'): uchunks.append(USPACE) nextcs = None elif nextcs not in (None, 'us-ascii'): uchunks.append(USPACE) lastcs = nextcs uchunks.append(unicode(s, str(charset))) return UEMPTYSTRING.join(uchunks)
[ "def", "__unicode__", "(", "self", ")", ":", "uchunks", "=", "[", "]", "lastcs", "=", "None", "for", "s", ",", "charset", "in", "self", ".", "_chunks", ":", "# We must preserve spaces between encoded and non-encoded word", "# boundaries, which means for us we need to add a space when we go", "# from a charset to None/us-ascii, or from None/us-ascii to a", "# charset. Only do this for the second and subsequent chunks.", "nextcs", "=", "charset", "if", "uchunks", ":", "if", "lastcs", "not", "in", "(", "None", ",", "'us-ascii'", ")", ":", "if", "nextcs", "in", "(", "None", ",", "'us-ascii'", ")", ":", "uchunks", ".", "append", "(", "USPACE", ")", "nextcs", "=", "None", "elif", "nextcs", "not", "in", "(", "None", ",", "'us-ascii'", ")", ":", "uchunks", ".", "append", "(", "USPACE", ")", "lastcs", "=", "nextcs", "uchunks", ".", "append", "(", "unicode", "(", "s", ",", "str", "(", "charset", ")", ")", ")", "return", "UEMPTYSTRING", ".", "join", "(", "uchunks", ")" ]
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/email/header.py#L202-L221
ronf/asyncssh
ee1714c598d8c2ea6f5484e465443f38b68714aa
asyncssh/connection.py
python
connect_reverse
( host: str, port: DefTuple[int] = (), *, tunnel: DefTuple[_TunnelConnector] = (), family: DefTuple[int] = (), flags: int = 0, local_addr: DefTuple[HostPort] = (), config: DefTuple[ConfigPaths] = (), options: Optional[SSHServerConnectionOptions] = None, **kwargs: object)
return await asyncio.wait_for( _connect(new_options, loop, flags, conn_factory, 'Opening reverse SSH connection to'), timeout=new_options.connect_timeout)
Create a reverse direction SSH connection This function is a coroutine which behaves similar to :func:`connect`, making an outbound TCP connection to a remote server. However, instead of starting up an SSH client which runs on that outbound connection, this function starts up an SSH server, expecting the remote system to start up a reverse-direction SSH client. Arguments to this function are the same as :func:`connect`, except that the `options` are of type :class:`SSHServerConnectionOptions` instead of :class:`SSHClientConnectionOptions`. :param host: The hostname or address to connect to. :param port: (optional) The port number to connect to. If not specified, the default SSH port is used. :param tunnel: (optional) An existing SSH client connection that this new connection should be tunneled over. If set, a direct TCP/IP tunnel will be opened over this connection to the requested host and port rather than connecting directly via TCP. A string of the form [user@]host[:port] may also be specified, in which case a connection will first be made to that host and it will then be used as a tunnel. :param family: (optional) The address family to use when creating the socket. By default, the address family is automatically selected based on the host. :param flags: (optional) The flags to pass to getaddrinfo() when looking up the host address :param local_addr: (optional) The host and port to bind the socket to before connecting :param config: (optional) Paths to OpenSSH server configuration files to load. This configuration will be used as a fallback to override the defaults for settings which are not explcitly specified using AsyncSSH's configuration options. By default, no OpenSSH configuration files will be loaded. See :ref:`SupportedServerConfigOptions` for details on what configuration options are currently supported. :param options: (optional) Options to use when starting the reverse-direction SSH server. These options can be specified either through this parameter or as direct keyword arguments to this function. :type host: `str` :type port: `int` :type tunnel: :class:`SSHClientConnection` or `str` :type family: `socket.AF_UNSPEC`, `socket.AF_INET`, or `socket.AF_INET6` :type flags: flags to pass to :meth:`getaddrinfo() <socket.getaddrinfo>` :type local_addr: tuple of `str` and `int` :type config: `list` of `str` :type options: :class:`SSHServerConnectionOptions` :returns: :class:`SSHServerConnection`
Create a reverse direction SSH connection
[ "Create", "a", "reverse", "direction", "SSH", "connection" ]
async def connect_reverse( host: str, port: DefTuple[int] = (), *, tunnel: DefTuple[_TunnelConnector] = (), family: DefTuple[int] = (), flags: int = 0, local_addr: DefTuple[HostPort] = (), config: DefTuple[ConfigPaths] = (), options: Optional[SSHServerConnectionOptions] = None, **kwargs: object) -> SSHServerConnection: """Create a reverse direction SSH connection This function is a coroutine which behaves similar to :func:`connect`, making an outbound TCP connection to a remote server. However, instead of starting up an SSH client which runs on that outbound connection, this function starts up an SSH server, expecting the remote system to start up a reverse-direction SSH client. Arguments to this function are the same as :func:`connect`, except that the `options` are of type :class:`SSHServerConnectionOptions` instead of :class:`SSHClientConnectionOptions`. :param host: The hostname or address to connect to. :param port: (optional) The port number to connect to. If not specified, the default SSH port is used. :param tunnel: (optional) An existing SSH client connection that this new connection should be tunneled over. If set, a direct TCP/IP tunnel will be opened over this connection to the requested host and port rather than connecting directly via TCP. A string of the form [user@]host[:port] may also be specified, in which case a connection will first be made to that host and it will then be used as a tunnel. :param family: (optional) The address family to use when creating the socket. By default, the address family is automatically selected based on the host. :param flags: (optional) The flags to pass to getaddrinfo() when looking up the host address :param local_addr: (optional) The host and port to bind the socket to before connecting :param config: (optional) Paths to OpenSSH server configuration files to load. This configuration will be used as a fallback to override the defaults for settings which are not explcitly specified using AsyncSSH's configuration options. By default, no OpenSSH configuration files will be loaded. See :ref:`SupportedServerConfigOptions` for details on what configuration options are currently supported. :param options: (optional) Options to use when starting the reverse-direction SSH server. These options can be specified either through this parameter or as direct keyword arguments to this function. :type host: `str` :type port: `int` :type tunnel: :class:`SSHClientConnection` or `str` :type family: `socket.AF_UNSPEC`, `socket.AF_INET`, or `socket.AF_INET6` :type flags: flags to pass to :meth:`getaddrinfo() <socket.getaddrinfo>` :type local_addr: tuple of `str` and `int` :type config: `list` of `str` :type options: :class:`SSHServerConnectionOptions` :returns: :class:`SSHServerConnection` """ def conn_factory() -> SSHServerConnection: """Return an SSH client connection factory""" return SSHServerConnection(loop, new_options, wait='auth') loop = asyncio.get_event_loop() new_options = SSHServerConnectionOptions(options, config=config, host=host, port=port, tunnel=tunnel, family=family, local_addr=local_addr, **kwargs) return await asyncio.wait_for( _connect(new_options, loop, flags, conn_factory, 'Opening reverse SSH connection to'), timeout=new_options.connect_timeout)
[ "async", "def", "connect_reverse", "(", "host", ":", "str", ",", "port", ":", "DefTuple", "[", "int", "]", "=", "(", ")", ",", "*", ",", "tunnel", ":", "DefTuple", "[", "_TunnelConnector", "]", "=", "(", ")", ",", "family", ":", "DefTuple", "[", "int", "]", "=", "(", ")", ",", "flags", ":", "int", "=", "0", ",", "local_addr", ":", "DefTuple", "[", "HostPort", "]", "=", "(", ")", ",", "config", ":", "DefTuple", "[", "ConfigPaths", "]", "=", "(", ")", ",", "options", ":", "Optional", "[", "SSHServerConnectionOptions", "]", "=", "None", ",", "*", "*", "kwargs", ":", "object", ")", "->", "SSHServerConnection", ":", "def", "conn_factory", "(", ")", "->", "SSHServerConnection", ":", "\"\"\"Return an SSH client connection factory\"\"\"", "return", "SSHServerConnection", "(", "loop", ",", "new_options", ",", "wait", "=", "'auth'", ")", "loop", "=", "asyncio", ".", "get_event_loop", "(", ")", "new_options", "=", "SSHServerConnectionOptions", "(", "options", ",", "config", "=", "config", ",", "host", "=", "host", ",", "port", "=", "port", ",", "tunnel", "=", "tunnel", ",", "family", "=", "family", ",", "local_addr", "=", "local_addr", ",", "*", "*", "kwargs", ")", "return", "await", "asyncio", ".", "wait_for", "(", "_connect", "(", "new_options", ",", "loop", ",", "flags", ",", "conn_factory", ",", "'Opening reverse SSH connection to'", ")", ",", "timeout", "=", "new_options", ".", "connect_timeout", ")" ]
https://github.com/ronf/asyncssh/blob/ee1714c598d8c2ea6f5484e465443f38b68714aa/asyncssh/connection.py#L7644-L7724
devstructure/blueprint
574a9fc0dd3031c66970387f1105d8c89e61218f
blueprint/io/server/backend.py
python
put
(key, data)
Store an object in S3. This function makes one billable request.
Store an object in S3. This function makes one billable request.
[ "Store", "an", "object", "in", "S3", ".", "This", "function", "makes", "one", "billable", "request", "." ]
def put(key, data): """ Store an object in S3. This function makes one billable request. """ librato.count('blueprint-io-server.requests.put') statsd.increment('blueprint-io-server.requests.put') # TODO librato.something('blueprint-io-server.storage', len(data)) statsd.update('blueprint-io-server.storage', len(data)) c = boto.connect_s3(access_key, secret_key) b = c.get_bucket(bucket, validate=False) k = b.new_key(key) try: k.set_contents_from_string(data, policy='public-read', reduced_redundancy=True) return True except (boto.exception.BotoClientError, boto.exception.BotoServerError, httplib.HTTPException, socket.error, socket.gaierror): return False
[ "def", "put", "(", "key", ",", "data", ")", ":", "librato", ".", "count", "(", "'blueprint-io-server.requests.put'", ")", "statsd", ".", "increment", "(", "'blueprint-io-server.requests.put'", ")", "# TODO librato.something('blueprint-io-server.storage', len(data))", "statsd", ".", "update", "(", "'blueprint-io-server.storage'", ",", "len", "(", "data", ")", ")", "c", "=", "boto", ".", "connect_s3", "(", "access_key", ",", "secret_key", ")", "b", "=", "c", ".", "get_bucket", "(", "bucket", ",", "validate", "=", "False", ")", "k", "=", "b", ".", "new_key", "(", "key", ")", "try", ":", "k", ".", "set_contents_from_string", "(", "data", ",", "policy", "=", "'public-read'", ",", "reduced_redundancy", "=", "True", ")", "return", "True", "except", "(", "boto", ".", "exception", ".", "BotoClientError", ",", "boto", ".", "exception", ".", "BotoServerError", ",", "httplib", ".", "HTTPException", ",", "socket", ".", "error", ",", "socket", ".", "gaierror", ")", ":", "return", "False" ]
https://github.com/devstructure/blueprint/blob/574a9fc0dd3031c66970387f1105d8c89e61218f/blueprint/io/server/backend.py#L144-L165
arthurdejong/python-stdnum
02dec52602ae0709b940b781fc1fcebfde7340b7
stdnum/pl/regon.py
python
is_valid
(number)
Check if the number is a valid REGON number.
Check if the number is a valid REGON number.
[ "Check", "if", "the", "number", "is", "a", "valid", "REGON", "number", "." ]
def is_valid(number): """Check if the number is a valid REGON number.""" try: return bool(validate(number)) except ValidationError: return False
[ "def", "is_valid", "(", "number", ")", ":", "try", ":", "return", "bool", "(", "validate", "(", "number", ")", ")", "except", "ValidationError", ":", "return", "False" ]
https://github.com/arthurdejong/python-stdnum/blob/02dec52602ae0709b940b781fc1fcebfde7340b7/stdnum/pl/regon.py#L89-L94
ganeti/ganeti
d340a9ddd12f501bef57da421b5f9b969a4ba905
lib/hypervisor/hv_kvm/__init__.py
python
KVMHypervisor._SocatUnixConsoleParams
()
Returns the correct parameters for socat If we have a new-enough socat we can use raw mode with an escape character.
Returns the correct parameters for socat
[ "Returns", "the", "correct", "parameters", "for", "socat" ]
def _SocatUnixConsoleParams(): """Returns the correct parameters for socat If we have a new-enough socat we can use raw mode with an escape character. """ if constants.SOCAT_USE_ESCAPE: return "raw,echo=0,escape=%s" % constants.SOCAT_ESCAPE_CODE else: return "echo=0,icanon=0"
[ "def", "_SocatUnixConsoleParams", "(", ")", ":", "if", "constants", ".", "SOCAT_USE_ESCAPE", ":", "return", "\"raw,echo=0,escape=%s\"", "%", "constants", ".", "SOCAT_ESCAPE_CODE", "else", ":", "return", "\"echo=0,icanon=0\"" ]
https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/hypervisor/hv_kvm/__init__.py#L823-L832
xonsh/xonsh
b76d6f994f22a4078f602f8b386f4ec280c8461f
xonsh/ansi_colors.py
python
ansi_style_by_name
(name)
return astyle
Gets or makes an ANSI color style by name. If the styles does not exist, it will look for a style using the pygments name.
Gets or makes an ANSI color style by name. If the styles does not exist, it will look for a style using the pygments name.
[ "Gets", "or", "makes", "an", "ANSI", "color", "style", "by", "name", ".", "If", "the", "styles", "does", "not", "exist", "it", "will", "look", "for", "a", "style", "using", "the", "pygments", "name", "." ]
def ansi_style_by_name(name): """Gets or makes an ANSI color style by name. If the styles does not exist, it will look for a style using the pygments name. """ if name in ANSI_STYLES: return ANSI_STYLES[name] elif not HAS_PYGMENTS: raise KeyError(f"could not find style {name!r}") from xonsh.pygments_cache import get_style_by_name from pygments.util import ClassNotFound try: pstyle = get_style_by_name(name) except (ModuleNotFoundError, ClassNotFound): pstyle = get_style_by_name("default") palette = make_palette(pstyle.styles.values()) astyle = make_ansi_style(palette) ANSI_STYLES[name] = astyle return astyle
[ "def", "ansi_style_by_name", "(", "name", ")", ":", "if", "name", "in", "ANSI_STYLES", ":", "return", "ANSI_STYLES", "[", "name", "]", "elif", "not", "HAS_PYGMENTS", ":", "raise", "KeyError", "(", "f\"could not find style {name!r}\"", ")", "from", "xonsh", ".", "pygments_cache", "import", "get_style_by_name", "from", "pygments", ".", "util", "import", "ClassNotFound", "try", ":", "pstyle", "=", "get_style_by_name", "(", "name", ")", "except", "(", "ModuleNotFoundError", ",", "ClassNotFound", ")", ":", "pstyle", "=", "get_style_by_name", "(", "\"default\"", ")", "palette", "=", "make_palette", "(", "pstyle", ".", "styles", ".", "values", "(", ")", ")", "astyle", "=", "make_ansi_style", "(", "palette", ")", "ANSI_STYLES", "[", "name", "]", "=", "astyle", "return", "astyle" ]
https://github.com/xonsh/xonsh/blob/b76d6f994f22a4078f602f8b386f4ec280c8461f/xonsh/ansi_colors.py#L1146-L1164
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/benchmarks/src/benchmarks/sympy/sympy/utilities/iterables.py
python
multiset
(seq)
return dict(rv)
Return the hashable sequence in multiset form with values being the multiplicity of the item in the sequence. Examples ======== >>> from sympy.utilities.iterables import multiset >>> multiset('mississippi') {'i': 4, 'm': 1, 'p': 2, 's': 4} See Also ======== group
Return the hashable sequence in multiset form with values being the multiplicity of the item in the sequence.
[ "Return", "the", "hashable", "sequence", "in", "multiset", "form", "with", "values", "being", "the", "multiplicity", "of", "the", "item", "in", "the", "sequence", "." ]
def multiset(seq): """Return the hashable sequence in multiset form with values being the multiplicity of the item in the sequence. Examples ======== >>> from sympy.utilities.iterables import multiset >>> multiset('mississippi') {'i': 4, 'm': 1, 'p': 2, 's': 4} See Also ======== group """ rv = defaultdict(int) for s in seq: rv[s] += 1 return dict(rv)
[ "def", "multiset", "(", "seq", ")", ":", "rv", "=", "defaultdict", "(", "int", ")", "for", "s", "in", "seq", ":", "rv", "[", "s", "]", "+=", "1", "return", "dict", "(", "rv", ")" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/utilities/iterables.py#L196-L214
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/babel/numbers.py
python
format_scientific
(number, format=None, locale=LC_NUMERIC)
return pattern.apply(number, locale)
Return value formatted in scientific notation for a specific locale. >>> format_scientific(10000, locale='en_US') u'1E4' The format pattern can also be specified explicitly: >>> format_scientific(1234567, u'##0E00', locale='en_US') u'1.23E06' :param number: the number to format :param format: :param locale: the `Locale` object or locale identifier
Return value formatted in scientific notation for a specific locale.
[ "Return", "value", "formatted", "in", "scientific", "notation", "for", "a", "specific", "locale", "." ]
def format_scientific(number, format=None, locale=LC_NUMERIC): """Return value formatted in scientific notation for a specific locale. >>> format_scientific(10000, locale='en_US') u'1E4' The format pattern can also be specified explicitly: >>> format_scientific(1234567, u'##0E00', locale='en_US') u'1.23E06' :param number: the number to format :param format: :param locale: the `Locale` object or locale identifier """ locale = Locale.parse(locale) if not format: format = locale.scientific_formats.get(format) pattern = parse_pattern(format) return pattern.apply(number, locale)
[ "def", "format_scientific", "(", "number", ",", "format", "=", "None", ",", "locale", "=", "LC_NUMERIC", ")", ":", "locale", "=", "Locale", ".", "parse", "(", "locale", ")", "if", "not", "format", ":", "format", "=", "locale", ".", "scientific_formats", ".", "get", "(", "format", ")", "pattern", "=", "parse_pattern", "(", "format", ")", "return", "pattern", ".", "apply", "(", "number", ",", "locale", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/babel/numbers.py#L451-L470
nasa/CrisisMappingToolkit
0296487974d74cec6aa8be42eafbb5cd24dc6a51
app_engine/libs/bs4/__init__.py
python
BeautifulSoup.reset
(self)
[]
def reset(self): Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) self.hidden = 1 self.builder.reset() self.current_data = [] self.currentTag = None self.tagStack = [] self.preserve_whitespace_tag_stack = [] self.pushTag(self)
[ "def", "reset", "(", "self", ")", ":", "Tag", ".", "__init__", "(", "self", ",", "self", ",", "self", ".", "builder", ",", "self", ".", "ROOT_TAG_NAME", ")", "self", ".", "hidden", "=", "1", "self", ".", "builder", ".", "reset", "(", ")", "self", ".", "current_data", "=", "[", "]", "self", ".", "currentTag", "=", "None", "self", ".", "tagStack", "=", "[", "]", "self", ".", "preserve_whitespace_tag_stack", "=", "[", "]", "self", ".", "pushTag", "(", "self", ")" ]
https://github.com/nasa/CrisisMappingToolkit/blob/0296487974d74cec6aa8be42eafbb5cd24dc6a51/app_engine/libs/bs4/__init__.py#L245-L253