repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
pycontribs/pyversion
version/version.py
https://github.com/pycontribs/pyversion/blob/6bbb799846ed4e97e84a3f0f2dbe14685f2ddb39/version/version.py#L100-L174
def increment(version): """Return an incremented version string.""" release_version = os.environ.get("RELEASE_VERSION", None) if release_version is not None: return release_version if isinstance(version, LegacyVersion): msg = """{0} is considered a legacy version and does not support automatic incrementing. Please bring your version numbering into PEP440 standards and then it can be automatically incremented. """ raise Exception(msg.format(version)) release_type = os.environ.get("RELEASE_TYPE", "micro") v = version._version # epoch epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!") pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre") post_name, post = VersionUtils.get_version_number(v, 4, None, "post") dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev") _, major = VersionUtils.get_version_number(v[1], 0, 0) _, minor = VersionUtils.get_version_number(v[1], 1, None) _, micro = VersionUtils.get_version_number(v[1], 2, None) # Handle dev/pre/post if release_type == "pre": micro, post, pre = VersionUtils.process_pre(micro, post, pre) if release_type == "post": dev, post = VersionUtils.process_post(dev, post) if release_type == "dev": dev = VersionUtils.process_dev(dev) if release_type == "micro": dev, micro, minor, post, pre = VersionUtils.process_micro( dev, micro, minor, post, pre ) if release_type == "minor": dev, micro, minor, post, pre = VersionUtils.process_minor( dev, micro, minor, post, pre ) if release_type == "major": dev, major, micro, minor, post, pre = VersionUtils.process_major( dev, major, micro, minor, post, pre ) # Handle Epoch if release_type == "epoch": dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch( dev, epoch, major, micro, minor, post, pre ) local = "".join(v[5] or []) or None version_list = [major, minor, micro] if release_type not in ["epoch", "major", "minor", "micro", "pre"]: version_list += list(v[1][3:]) version_string = ".".join([str(x) for x in version_list if x or x == 0]) if epoch: version_string = str(epoch) + epoch_name + version_string if pre is not None: version_string = VersionUtils.calc_pre_version_string( pre, pre_name, version_string ) if post is not None: version_string += "." + post_name + str(post) if dev is not None: version_string += "." + dev_name + str(dev) if local is not None: version_string += "." + str(local) return version_string
[ "def", "increment", "(", "version", ")", ":", "release_version", "=", "os", ".", "environ", ".", "get", "(", "\"RELEASE_VERSION\"", ",", "None", ")", "if", "release_version", "is", "not", "None", ":", "return", "release_version", "if", "isinstance", "(", "version", ",", "LegacyVersion", ")", ":", "msg", "=", "\"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"", "raise", "Exception", "(", "msg", ".", "format", "(", "version", ")", ")", "release_type", "=", "os", ".", "environ", ".", "get", "(", "\"RELEASE_TYPE\"", ",", "\"micro\"", ")", "v", "=", "version", ".", "_version", "# epoch", "epoch_name", ",", "epoch", "=", "VersionUtils", ".", "get_version_number", "(", "v", ",", "0", ",", "None", ",", "\"!\"", ")", "pre_name", ",", "pre", "=", "VersionUtils", ".", "get_version_number", "(", "v", ",", "3", ",", "None", ",", "\"pre\"", ")", "post_name", ",", "post", "=", "VersionUtils", ".", "get_version_number", "(", "v", ",", "4", ",", "None", ",", "\"post\"", ")", "dev_name", ",", "dev", "=", "VersionUtils", ".", "get_version_number", "(", "v", ",", "2", ",", "None", ",", "\"dev\"", ")", "_", ",", "major", "=", "VersionUtils", ".", "get_version_number", "(", "v", "[", "1", "]", ",", "0", ",", "0", ")", "_", ",", "minor", "=", "VersionUtils", ".", "get_version_number", "(", "v", "[", "1", "]", ",", "1", ",", "None", ")", "_", ",", "micro", "=", "VersionUtils", ".", "get_version_number", "(", "v", "[", "1", "]", ",", "2", ",", "None", ")", "# Handle dev/pre/post", "if", "release_type", "==", "\"pre\"", ":", "micro", ",", "post", ",", "pre", "=", "VersionUtils", ".", "process_pre", "(", "micro", ",", "post", ",", "pre", ")", "if", "release_type", "==", "\"post\"", ":", "dev", ",", "post", "=", "VersionUtils", ".", "process_post", "(", "dev", ",", "post", ")", "if", "release_type", "==", "\"dev\"", ":", "dev", "=", "VersionUtils", ".", "process_dev", "(", "dev", ")", "if", "release_type", "==", "\"micro\"", ":", "dev", ",", "micro", ",", "minor", ",", "post", ",", "pre", "=", "VersionUtils", ".", "process_micro", "(", "dev", ",", "micro", ",", "minor", ",", "post", ",", "pre", ")", "if", "release_type", "==", "\"minor\"", ":", "dev", ",", "micro", ",", "minor", ",", "post", ",", "pre", "=", "VersionUtils", ".", "process_minor", "(", "dev", ",", "micro", ",", "minor", ",", "post", ",", "pre", ")", "if", "release_type", "==", "\"major\"", ":", "dev", ",", "major", ",", "micro", ",", "minor", ",", "post", ",", "pre", "=", "VersionUtils", ".", "process_major", "(", "dev", ",", "major", ",", "micro", ",", "minor", ",", "post", ",", "pre", ")", "# Handle Epoch", "if", "release_type", "==", "\"epoch\"", ":", "dev", ",", "epoch", ",", "major", ",", "micro", ",", "minor", ",", "post", ",", "pre", "=", "VersionUtils", ".", "process_epoch", "(", "dev", ",", "epoch", ",", "major", ",", "micro", ",", "minor", ",", "post", ",", "pre", ")", "local", "=", "\"\"", ".", "join", "(", "v", "[", "5", "]", "or", "[", "]", ")", "or", "None", "version_list", "=", "[", "major", ",", "minor", ",", "micro", "]", "if", "release_type", "not", "in", "[", "\"epoch\"", ",", "\"major\"", ",", "\"minor\"", ",", "\"micro\"", ",", "\"pre\"", "]", ":", "version_list", "+=", "list", "(", "v", "[", "1", "]", "[", "3", ":", "]", ")", "version_string", "=", "\".\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "version_list", "if", "x", "or", "x", "==", "0", "]", ")", "if", "epoch", ":", "version_string", "=", "str", "(", "epoch", ")", "+", "epoch_name", "+", "version_string", "if", "pre", "is", "not", "None", ":", "version_string", "=", "VersionUtils", ".", "calc_pre_version_string", "(", "pre", ",", "pre_name", ",", "version_string", ")", "if", "post", "is", "not", "None", ":", "version_string", "+=", "\".\"", "+", "post_name", "+", "str", "(", "post", ")", "if", "dev", "is", "not", "None", ":", "version_string", "+=", "\".\"", "+", "dev_name", "+", "str", "(", "dev", ")", "if", "local", "is", "not", "None", ":", "version_string", "+=", "\".\"", "+", "str", "(", "local", ")", "return", "version_string" ]
Return an incremented version string.
[ "Return", "an", "incremented", "version", "string", "." ]
python
train
39.6
apache/incubator-mxnet
python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/contrib/onnx/mx2onnx/_op_translations.py#L341-L361
def convert_batchnorm(node, **kwargs): """Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node. """ name, input_nodes, attrs = get_inputs(node, kwargs) momentum = float(attrs.get("momentum", 0.9)) eps = float(attrs.get("eps", 0.001)) bn_node = onnx.helper.make_node( "BatchNormalization", input_nodes, [name], name=name, epsilon=eps, momentum=momentum, # MXNet computes mean and variance per feature for batchnorm # Default for onnx is across all spatial features. So disabling the parameter. spatial=0 ) return [bn_node]
[ "def", "convert_batchnorm", "(", "node", ",", "*", "*", "kwargs", ")", ":", "name", ",", "input_nodes", ",", "attrs", "=", "get_inputs", "(", "node", ",", "kwargs", ")", "momentum", "=", "float", "(", "attrs", ".", "get", "(", "\"momentum\"", ",", "0.9", ")", ")", "eps", "=", "float", "(", "attrs", ".", "get", "(", "\"eps\"", ",", "0.001", ")", ")", "bn_node", "=", "onnx", ".", "helper", ".", "make_node", "(", "\"BatchNormalization\"", ",", "input_nodes", ",", "[", "name", "]", ",", "name", "=", "name", ",", "epsilon", "=", "eps", ",", "momentum", "=", "momentum", ",", "# MXNet computes mean and variance per feature for batchnorm", "# Default for onnx is across all spatial features. So disabling the parameter.", "spatial", "=", "0", ")", "return", "[", "bn_node", "]" ]
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator and return the created node.
[ "Map", "MXNet", "s", "BatchNorm", "operator", "attributes", "to", "onnx", "s", "BatchNormalization", "operator", "and", "return", "the", "created", "node", "." ]
python
train
31.714286
pingali/dgit
dgitcore/plugins/common.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/plugins/common.py#L96-L102
def discover_all_plugins(self): """ Load all plugins from dgit extension """ for v in pkg_resources.iter_entry_points('dgit.plugins'): m = v.load() m.setup(self)
[ "def", "discover_all_plugins", "(", "self", ")", ":", "for", "v", "in", "pkg_resources", ".", "iter_entry_points", "(", "'dgit.plugins'", ")", ":", "m", "=", "v", ".", "load", "(", ")", "m", ".", "setup", "(", "self", ")" ]
Load all plugins from dgit extension
[ "Load", "all", "plugins", "from", "dgit", "extension" ]
python
valid
30.142857
biocore/burrito-fillings
bfillings/muscle_v38.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/muscle_v38.py#L531-L569
def align_unaligned_seqs(seqs, moltype=DNA, params=None): """Returns an Alignment object from seqs. seqs: SequenceCollection object, or data that can be used to build one. moltype: a MolType object. DNA, RNA, or PROTEIN. params: dict of parameters to pass in to the Muscle app controller. Result will be an Alignment object. """ if not params: params = {} #create SequenceCollection object from seqs seq_collection = SequenceCollection(seqs,MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seq_collection.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) #get temporary filename params.update({'-out':get_tmp_filename()}) #Create Muscle app. app = Muscle(InputHandler='_input_as_multiline_string',\ params=params, WorkingDir=tempfile.gettempdir()) #Get results using int_map as input to app res = app(int_map.toFasta()) #Get alignment as dict out of results alignment = dict(parse_fasta(res['MuscleOut'])) #Make new dict mapping original IDs new_alignment = {} for k,v in alignment.items(): new_alignment[int_keys[k]]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) #Clean up res.cleanUp() del(seq_collection,int_map,int_keys,app,res,alignment,params) return new_alignment
[ "def", "align_unaligned_seqs", "(", "seqs", ",", "moltype", "=", "DNA", ",", "params", "=", "None", ")", ":", "if", "not", "params", ":", "params", "=", "{", "}", "#create SequenceCollection object from seqs", "seq_collection", "=", "SequenceCollection", "(", "seqs", ",", "MolType", "=", "moltype", ")", "#Create mapping between abbreviated IDs and full IDs", "int_map", ",", "int_keys", "=", "seq_collection", ".", "getIntMap", "(", ")", "#Create SequenceCollection from int_map.", "int_map", "=", "SequenceCollection", "(", "int_map", ",", "MolType", "=", "moltype", ")", "#get temporary filename", "params", ".", "update", "(", "{", "'-out'", ":", "get_tmp_filename", "(", ")", "}", ")", "#Create Muscle app.", "app", "=", "Muscle", "(", "InputHandler", "=", "'_input_as_multiline_string'", ",", "params", "=", "params", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ")", "#Get results using int_map as input to app", "res", "=", "app", "(", "int_map", ".", "toFasta", "(", ")", ")", "#Get alignment as dict out of results", "alignment", "=", "dict", "(", "parse_fasta", "(", "res", "[", "'MuscleOut'", "]", ")", ")", "#Make new dict mapping original IDs", "new_alignment", "=", "{", "}", "for", "k", ",", "v", "in", "alignment", ".", "items", "(", ")", ":", "new_alignment", "[", "int_keys", "[", "k", "]", "]", "=", "v", "#Create an Alignment object from alignment dict", "new_alignment", "=", "Alignment", "(", "new_alignment", ",", "MolType", "=", "moltype", ")", "#Clean up", "res", ".", "cleanUp", "(", ")", "del", "(", "seq_collection", ",", "int_map", ",", "int_keys", ",", "app", ",", "res", ",", "alignment", ",", "params", ")", "return", "new_alignment" ]
Returns an Alignment object from seqs. seqs: SequenceCollection object, or data that can be used to build one. moltype: a MolType object. DNA, RNA, or PROTEIN. params: dict of parameters to pass in to the Muscle app controller. Result will be an Alignment object.
[ "Returns", "an", "Alignment", "object", "from", "seqs", "." ]
python
train
37.179487
tensorflow/tensor2tensor
tensor2tensor/layers/common_image_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_image_attention.py#L190-L222
def dilated_attention_1d(x, hparams, attention_type="masked_dilated_1d", q_padding="VALID", kv_padding="VALID", gap_size=2): """Dilated 1d self attention.""" # self-attention x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) with tf.variable_scope("masked_dilated_1d"): y = common_attention.multihead_attention( x, None, None, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=attention_type, block_width=hparams.block_width, block_length=hparams.block_length, q_padding=q_padding, kv_padding=kv_padding, q_filter_width=hparams.q_filter_width, kv_filter_width=hparams.kv_filter_width, gap_size=gap_size, num_memory_blocks=hparams.num_memory_blocks, name="self_attention") if is_4d: y = tf.reshape(y, x_shape) y.set_shape([None, None, None, hparams.hidden_size]) return y
[ "def", "dilated_attention_1d", "(", "x", ",", "hparams", ",", "attention_type", "=", "\"masked_dilated_1d\"", ",", "q_padding", "=", "\"VALID\"", ",", "kv_padding", "=", "\"VALID\"", ",", "gap_size", "=", "2", ")", ":", "# self-attention", "x", ",", "x_shape", ",", "is_4d", "=", "maybe_reshape_4d_to_3d", "(", "x", ")", "with", "tf", ".", "variable_scope", "(", "\"masked_dilated_1d\"", ")", ":", "y", "=", "common_attention", ".", "multihead_attention", "(", "x", ",", "None", ",", "None", ",", "hparams", ".", "attention_key_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "attention_value_channels", "or", "hparams", ".", "hidden_size", ",", "hparams", ".", "hidden_size", ",", "hparams", ".", "num_heads", ",", "hparams", ".", "attention_dropout", ",", "attention_type", "=", "attention_type", ",", "block_width", "=", "hparams", ".", "block_width", ",", "block_length", "=", "hparams", ".", "block_length", ",", "q_padding", "=", "q_padding", ",", "kv_padding", "=", "kv_padding", ",", "q_filter_width", "=", "hparams", ".", "q_filter_width", ",", "kv_filter_width", "=", "hparams", ".", "kv_filter_width", ",", "gap_size", "=", "gap_size", ",", "num_memory_blocks", "=", "hparams", ".", "num_memory_blocks", ",", "name", "=", "\"self_attention\"", ")", "if", "is_4d", ":", "y", "=", "tf", ".", "reshape", "(", "y", ",", "x_shape", ")", "y", ".", "set_shape", "(", "[", "None", ",", "None", ",", "None", ",", "hparams", ".", "hidden_size", "]", ")", "return", "y" ]
Dilated 1d self attention.
[ "Dilated", "1d", "self", "attention", "." ]
python
train
35.727273
ARMmbed/yotta
yotta/lib/component.py
https://github.com/ARMmbed/yotta/blob/56bc1e56c602fa20307b23fe27518e9cd6c11af1/yotta/lib/component.py#L233-L247
def hasDependencyRecursively(self, name, target=None, test_dependencies=False): ''' Check if this module, or any of its dependencies, have a dependencies with the specified name in their dependencies, or in their targetDependencies corresponding to the specified target. Note that if recursive dependencies are not installed, this test may return a false-negative. ''' # checking dependencies recursively isn't entirely straightforward, so # use the existing method to resolve them all before checking: dependencies = self.getDependenciesRecursive( target = target, test = test_dependencies ) return (name in dependencies)
[ "def", "hasDependencyRecursively", "(", "self", ",", "name", ",", "target", "=", "None", ",", "test_dependencies", "=", "False", ")", ":", "# checking dependencies recursively isn't entirely straightforward, so", "# use the existing method to resolve them all before checking:", "dependencies", "=", "self", ".", "getDependenciesRecursive", "(", "target", "=", "target", ",", "test", "=", "test_dependencies", ")", "return", "(", "name", "in", "dependencies", ")" ]
Check if this module, or any of its dependencies, have a dependencies with the specified name in their dependencies, or in their targetDependencies corresponding to the specified target. Note that if recursive dependencies are not installed, this test may return a false-negative.
[ "Check", "if", "this", "module", "or", "any", "of", "its", "dependencies", "have", "a", "dependencies", "with", "the", "specified", "name", "in", "their", "dependencies", "or", "in", "their", "targetDependencies", "corresponding", "to", "the", "specified", "target", "." ]
python
valid
51.8
ungarj/mapchete
mapchete/config.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/config.py#L694-L711
def raw_conf_process_pyramid(raw_conf): """ Loads the process pyramid of a raw configuration. Parameters ---------- raw_conf : dict Raw mapchete configuration as dictionary. Returns ------- BufferedTilePyramid """ return BufferedTilePyramid( raw_conf["pyramid"]["grid"], metatiling=raw_conf["pyramid"].get("metatiling", 1), pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0) )
[ "def", "raw_conf_process_pyramid", "(", "raw_conf", ")", ":", "return", "BufferedTilePyramid", "(", "raw_conf", "[", "\"pyramid\"", "]", "[", "\"grid\"", "]", ",", "metatiling", "=", "raw_conf", "[", "\"pyramid\"", "]", ".", "get", "(", "\"metatiling\"", ",", "1", ")", ",", "pixelbuffer", "=", "raw_conf", "[", "\"pyramid\"", "]", ".", "get", "(", "\"pixelbuffer\"", ",", "0", ")", ")" ]
Loads the process pyramid of a raw configuration. Parameters ---------- raw_conf : dict Raw mapchete configuration as dictionary. Returns ------- BufferedTilePyramid
[ "Loads", "the", "process", "pyramid", "of", "a", "raw", "configuration", "." ]
python
valid
24.444444
delfick/harpoon
harpoon/actions.py
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/actions.py#L313-L342
def tag(collector, image, artifact, **kwargs): """Tag an image!""" if artifact in (None, "", NotSpecified): raise BadOption("Please specify a tag using the artifact option") if image.image_index in (None, "", NotSpecified): raise BadOption("Please specify an image with an image_index option") tag = image.image_name if collector.configuration["harpoon"].tag is not NotSpecified: tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag) else: tag = "{0}:latest".format(tag) images = image.harpoon.docker_api.images() current_tags = chain.from_iterable(image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None) if tag not in current_tags: raise BadOption("Please build or pull the image down to your local cache before tagging it") for image_conf in images: if image_conf["RepoTags"] is not None: if tag in image_conf["RepoTags"]: image_id = image_conf["Id"] break log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact)) image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True) image.tag = artifact Syncer().push(image)
[ "def", "tag", "(", "collector", ",", "image", ",", "artifact", ",", "*", "*", "kwargs", ")", ":", "if", "artifact", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", ":", "raise", "BadOption", "(", "\"Please specify a tag using the artifact option\"", ")", "if", "image", ".", "image_index", "in", "(", "None", ",", "\"\"", ",", "NotSpecified", ")", ":", "raise", "BadOption", "(", "\"Please specify an image with an image_index option\"", ")", "tag", "=", "image", ".", "image_name", "if", "collector", ".", "configuration", "[", "\"harpoon\"", "]", ".", "tag", "is", "not", "NotSpecified", ":", "tag", "=", "\"{0}:{1}\"", ".", "format", "(", "tag", ",", "collector", ".", "configuration", "[", "\"harpoon\"", "]", ".", "tag", ")", "else", ":", "tag", "=", "\"{0}:latest\"", ".", "format", "(", "tag", ")", "images", "=", "image", ".", "harpoon", ".", "docker_api", ".", "images", "(", ")", "current_tags", "=", "chain", ".", "from_iterable", "(", "image_conf", "[", "\"RepoTags\"", "]", "for", "image_conf", "in", "images", "if", "image_conf", "[", "\"RepoTags\"", "]", "is", "not", "None", ")", "if", "tag", "not", "in", "current_tags", ":", "raise", "BadOption", "(", "\"Please build or pull the image down to your local cache before tagging it\"", ")", "for", "image_conf", "in", "images", ":", "if", "image_conf", "[", "\"RepoTags\"", "]", "is", "not", "None", ":", "if", "tag", "in", "image_conf", "[", "\"RepoTags\"", "]", ":", "image_id", "=", "image_conf", "[", "\"Id\"", "]", "break", "log", ".", "info", "(", "\"Tagging {0} ({1}) as {2}\"", ".", "format", "(", "image_id", ",", "image", ".", "image_name", ",", "artifact", ")", ")", "image", ".", "harpoon", ".", "docker_api", ".", "tag", "(", "image_id", ",", "repository", "=", "image", ".", "image_name", ",", "tag", "=", "artifact", ",", "force", "=", "True", ")", "image", ".", "tag", "=", "artifact", "Syncer", "(", ")", ".", "push", "(", "image", ")" ]
Tag an image!
[ "Tag", "an", "image!" ]
python
train
41.533333
tamasgal/km3pipe
km3pipe/core.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L531-L537
def get(self, name, default=None): """Return the value of the requested parameter or `default` if None.""" value = self.parameters.get(name) self._processed_parameters.append(name) if value is None: return default return value
[ "def", "get", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "value", "=", "self", ".", "parameters", ".", "get", "(", "name", ")", "self", ".", "_processed_parameters", ".", "append", "(", "name", ")", "if", "value", "is", "None", ":", "return", "default", "return", "value" ]
Return the value of the requested parameter or `default` if None.
[ "Return", "the", "value", "of", "the", "requested", "parameter", "or", "default", "if", "None", "." ]
python
train
38.857143
sigsep/sigsep-mus-eval
museval/metrics.py
https://github.com/sigsep/sigsep-mus-eval/blob/a7c9af3647f0c0bb9bbaeccec0b1a6a9e09d1e2d/museval/metrics.py#L488-L497
def _zeropad(sig, N, axis=0): """pads with N zeros at the end of the signal, along given axis""" # ensures concatenation dimension is the first sig = np.moveaxis(sig, axis, 0) # zero pad out = np.zeros((sig.shape[0] + N,) + sig.shape[1:]) out[:sig.shape[0], ...] = sig # put back axis in place out = np.moveaxis(out, 0, axis) return out
[ "def", "_zeropad", "(", "sig", ",", "N", ",", "axis", "=", "0", ")", ":", "# ensures concatenation dimension is the first", "sig", "=", "np", ".", "moveaxis", "(", "sig", ",", "axis", ",", "0", ")", "# zero pad", "out", "=", "np", ".", "zeros", "(", "(", "sig", ".", "shape", "[", "0", "]", "+", "N", ",", ")", "+", "sig", ".", "shape", "[", "1", ":", "]", ")", "out", "[", ":", "sig", ".", "shape", "[", "0", "]", ",", "...", "]", "=", "sig", "# put back axis in place", "out", "=", "np", ".", "moveaxis", "(", "out", ",", "0", ",", "axis", ")", "return", "out" ]
pads with N zeros at the end of the signal, along given axis
[ "pads", "with", "N", "zeros", "at", "the", "end", "of", "the", "signal", "along", "given", "axis" ]
python
train
36.3
pingali/dgit
dgitcore/datasets/validation.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/datasets/validation.py#L17-L82
def instantiate(repo, validator_name=None, filename=None, rulesfiles=None): """ Instantiate the validation specification """ default_validators = repo.options.get('validator', {}) validators = {} if validator_name is not None: # Handle the case validator is specified.. if validator_name in default_validators: validators = { validator_name : default_validators[validator_name] } else: validators = { validator_name : { 'files': [], 'rules': {}, 'rules-files': [] } } else: validators = default_validators #========================================= # Insert the file names #========================================= if filename is not None: matching_files = repo.find_matching_files([filename]) if len(matching_files) == 0: print("Filename could not be found", filename) raise Exception("Invalid filename pattern") for v in validators: validators[v]['files'] = matching_files else: # Instantiate the files from the patterns specified for v in validators: if 'files' not in validators[v]: validators[v]['files'] = [] elif len(validators[v]['files']) > 0: matching_files = repo.find_matching_files(validators[v]['files']) validators[v]['files'] = matching_files #========================================= # Insert the rules files.. #========================================= if rulesfiles is not None: # Command lines... matching_files = repo.find_matching_files([rulesfiles]) if len(matching_files) == 0: print("Could not find matching rules files ({}) for {}".format(rulesfiles,v)) raise Exception("Invalid rules") for v in validators: validators[v]['rules-files'] = matching_files else: # Instantiate the files from the patterns specified for v in validators: if 'rules-files' not in validators[v]: validators[v]['rules-files'] = [] else: rulesfiles = validators[v]['rules-files'] matching_files = repo.find_matching_files(rulesfiles) validators[v]['rules-files'] = matching_files return validators
[ "def", "instantiate", "(", "repo", ",", "validator_name", "=", "None", ",", "filename", "=", "None", ",", "rulesfiles", "=", "None", ")", ":", "default_validators", "=", "repo", ".", "options", ".", "get", "(", "'validator'", ",", "{", "}", ")", "validators", "=", "{", "}", "if", "validator_name", "is", "not", "None", ":", "# Handle the case validator is specified..", "if", "validator_name", "in", "default_validators", ":", "validators", "=", "{", "validator_name", ":", "default_validators", "[", "validator_name", "]", "}", "else", ":", "validators", "=", "{", "validator_name", ":", "{", "'files'", ":", "[", "]", ",", "'rules'", ":", "{", "}", ",", "'rules-files'", ":", "[", "]", "}", "}", "else", ":", "validators", "=", "default_validators", "#=========================================", "# Insert the file names", "#=========================================", "if", "filename", "is", "not", "None", ":", "matching_files", "=", "repo", ".", "find_matching_files", "(", "[", "filename", "]", ")", "if", "len", "(", "matching_files", ")", "==", "0", ":", "print", "(", "\"Filename could not be found\"", ",", "filename", ")", "raise", "Exception", "(", "\"Invalid filename pattern\"", ")", "for", "v", "in", "validators", ":", "validators", "[", "v", "]", "[", "'files'", "]", "=", "matching_files", "else", ":", "# Instantiate the files from the patterns specified", "for", "v", "in", "validators", ":", "if", "'files'", "not", "in", "validators", "[", "v", "]", ":", "validators", "[", "v", "]", "[", "'files'", "]", "=", "[", "]", "elif", "len", "(", "validators", "[", "v", "]", "[", "'files'", "]", ")", ">", "0", ":", "matching_files", "=", "repo", ".", "find_matching_files", "(", "validators", "[", "v", "]", "[", "'files'", "]", ")", "validators", "[", "v", "]", "[", "'files'", "]", "=", "matching_files", "#=========================================", "# Insert the rules files..", "#=========================================", "if", "rulesfiles", "is", "not", "None", ":", "# Command lines...", "matching_files", "=", "repo", ".", "find_matching_files", "(", "[", "rulesfiles", "]", ")", "if", "len", "(", "matching_files", ")", "==", "0", ":", "print", "(", "\"Could not find matching rules files ({}) for {}\"", ".", "format", "(", "rulesfiles", ",", "v", ")", ")", "raise", "Exception", "(", "\"Invalid rules\"", ")", "for", "v", "in", "validators", ":", "validators", "[", "v", "]", "[", "'rules-files'", "]", "=", "matching_files", "else", ":", "# Instantiate the files from the patterns specified", "for", "v", "in", "validators", ":", "if", "'rules-files'", "not", "in", "validators", "[", "v", "]", ":", "validators", "[", "v", "]", "[", "'rules-files'", "]", "=", "[", "]", "else", ":", "rulesfiles", "=", "validators", "[", "v", "]", "[", "'rules-files'", "]", "matching_files", "=", "repo", ".", "find_matching_files", "(", "rulesfiles", ")", "validators", "[", "v", "]", "[", "'rules-files'", "]", "=", "matching_files", "return", "validators" ]
Instantiate the validation specification
[ "Instantiate", "the", "validation", "specification" ]
python
valid
36.651515
geographika/mappyfile
docs/examples/geometry/geometry.py
https://github.com/geographika/mappyfile/blob/aecbc5e66ec06896bc4c5db41313503468829d00/docs/examples/geometry/geometry.py#L23-L45
def erosion(mapfile, dilated): """ We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it """ ll = mappyfile.find(mapfile["layers"], "name", "line") ll["status"] = "OFF" pl = mappyfile.find(mapfile["layers"], "name", "polygon") # make a deep copy of the polygon layer in the Map # so any modification are made to this layer only pl2 = deepcopy(pl) pl2["name"] = "newpolygon" mapfile["layers"].append(pl2) dilated = dilated.buffer(-0.3) pl2["features"][0]["wkt"] = dilated.wkt style = pl["classes"][0]["styles"][0] style["color"] = "#999999" style["outlinecolor"] = "#b2b2b2"
[ "def", "erosion", "(", "mapfile", ",", "dilated", ")", ":", "ll", "=", "mappyfile", ".", "find", "(", "mapfile", "[", "\"layers\"", "]", ",", "\"name\"", ",", "\"line\"", ")", "ll", "[", "\"status\"", "]", "=", "\"OFF\"", "pl", "=", "mappyfile", ".", "find", "(", "mapfile", "[", "\"layers\"", "]", ",", "\"name\"", ",", "\"polygon\"", ")", "# make a deep copy of the polygon layer in the Map", "# so any modification are made to this layer only", "pl2", "=", "deepcopy", "(", "pl", ")", "pl2", "[", "\"name\"", "]", "=", "\"newpolygon\"", "mapfile", "[", "\"layers\"", "]", ".", "append", "(", "pl2", ")", "dilated", "=", "dilated", ".", "buffer", "(", "-", "0.3", ")", "pl2", "[", "\"features\"", "]", "[", "0", "]", "[", "\"wkt\"", "]", "=", "dilated", ".", "wkt", "style", "=", "pl", "[", "\"classes\"", "]", "[", "0", "]", "[", "\"styles\"", "]", "[", "0", "]", "style", "[", "\"color\"", "]", "=", "\"#999999\"", "style", "[", "\"outlinecolor\"", "]", "=", "\"#b2b2b2\"" ]
We will continue to work with the modified Mapfile If we wanted to start from scratch we could simply reread it
[ "We", "will", "continue", "to", "work", "with", "the", "modified", "Mapfile", "If", "we", "wanted", "to", "start", "from", "scratch", "we", "could", "simply", "reread", "it" ]
python
train
29.956522
openclimatedata/pymagicc
pymagicc/core.py
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L903-L925
def set_emission_scenario_setup(self, scenario, config_dict): """Set the emissions flags correctly. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. config_dict : dict Dictionary with current input configurations which is to be validated and updated where necessary. Returns ------- dict Updated configuration """ self.write(scenario, self._scen_file_name) # can be lazy in this line as fix backwards key handles errors for us config_dict["file_emissionscenario"] = self._scen_file_name config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict) return config_dict
[ "def", "set_emission_scenario_setup", "(", "self", ",", "scenario", ",", "config_dict", ")", ":", "self", ".", "write", "(", "scenario", ",", "self", ".", "_scen_file_name", ")", "# can be lazy in this line as fix backwards key handles errors for us", "config_dict", "[", "\"file_emissionscenario\"", "]", "=", "self", ".", "_scen_file_name", "config_dict", "=", "self", ".", "_fix_any_backwards_emissions_scen_key_in_config", "(", "config_dict", ")", "return", "config_dict" ]
Set the emissions flags correctly. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. config_dict : dict Dictionary with current input configurations which is to be validated and updated where necessary. Returns ------- dict Updated configuration
[ "Set", "the", "emissions", "flags", "correctly", "." ]
python
train
32.956522
SmokinCaterpillar/pypet
pypet/naturalnaming.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/naturalnaming.py#L1824-L1838
def _make_child_iterator(node, with_links, current_depth=0): """Returns an iterator over a node's children. In case of using a trajectory as a run (setting 'v_crun') some sub branches that do not belong to the run are blinded out. """ cdp1 = current_depth + 1 if with_links: iterator = ((cdp1, x[0], x[1]) for x in node._children.items()) else: leaves = ((cdp1, x[0], x[1]) for x in node._leaves.items()) groups = ((cdp1, y[0], y[1]) for y in node._groups.items()) iterator = itools.chain(groups, leaves) return iterator
[ "def", "_make_child_iterator", "(", "node", ",", "with_links", ",", "current_depth", "=", "0", ")", ":", "cdp1", "=", "current_depth", "+", "1", "if", "with_links", ":", "iterator", "=", "(", "(", "cdp1", ",", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "node", ".", "_children", ".", "items", "(", ")", ")", "else", ":", "leaves", "=", "(", "(", "cdp1", ",", "x", "[", "0", "]", ",", "x", "[", "1", "]", ")", "for", "x", "in", "node", ".", "_leaves", ".", "items", "(", ")", ")", "groups", "=", "(", "(", "cdp1", ",", "y", "[", "0", "]", ",", "y", "[", "1", "]", ")", "for", "y", "in", "node", ".", "_groups", ".", "items", "(", ")", ")", "iterator", "=", "itools", ".", "chain", "(", "groups", ",", "leaves", ")", "return", "iterator" ]
Returns an iterator over a node's children. In case of using a trajectory as a run (setting 'v_crun') some sub branches that do not belong to the run are blinded out.
[ "Returns", "an", "iterator", "over", "a", "node", "s", "children", "." ]
python
test
41.333333
payu-org/payu
payu/models/um.py
https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/models/um.py#L212-L219
def date_to_um_date(date): """ Convert a date object to 'year, month, day, hour, minute, second.' """ assert date.hour == 0 and date.minute == 0 and date.second == 0 return [date.year, date.month, date.day, 0, 0, 0]
[ "def", "date_to_um_date", "(", "date", ")", ":", "assert", "date", ".", "hour", "==", "0", "and", "date", ".", "minute", "==", "0", "and", "date", ".", "second", "==", "0", "return", "[", "date", ".", "year", ",", "date", ".", "month", ",", "date", ".", "day", ",", "0", ",", "0", ",", "0", "]" ]
Convert a date object to 'year, month, day, hour, minute, second.'
[ "Convert", "a", "date", "object", "to", "year", "month", "day", "hour", "minute", "second", "." ]
python
train
28.75
atztogo/phonopy
phonopy/structure/grid_points.py
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/structure/grid_points.py#L221-L246
def _shift2boolean(self, q_mesh_shift, is_gamma_center=False, tolerance=1e-5): """ Tolerance is used to judge zero/half gird shift. This value is not necessary to be changed usually. """ if q_mesh_shift is None: shift = np.zeros(3, dtype='double') else: shift = np.array(q_mesh_shift, dtype='double') diffby2 = np.abs(shift * 2 - np.rint(shift * 2)) if (diffby2 < 0.01).all(): # zero or half shift diff = np.abs(shift - np.rint(shift)) if is_gamma_center: is_shift = list(diff > 0.1) else: # Monkhorst-pack is_shift = list(np.logical_xor((diff > 0.1), (self._mesh % 2 == 0)) * 1) else: is_shift = None return is_shift
[ "def", "_shift2boolean", "(", "self", ",", "q_mesh_shift", ",", "is_gamma_center", "=", "False", ",", "tolerance", "=", "1e-5", ")", ":", "if", "q_mesh_shift", "is", "None", ":", "shift", "=", "np", ".", "zeros", "(", "3", ",", "dtype", "=", "'double'", ")", "else", ":", "shift", "=", "np", ".", "array", "(", "q_mesh_shift", ",", "dtype", "=", "'double'", ")", "diffby2", "=", "np", ".", "abs", "(", "shift", "*", "2", "-", "np", ".", "rint", "(", "shift", "*", "2", ")", ")", "if", "(", "diffby2", "<", "0.01", ")", ".", "all", "(", ")", ":", "# zero or half shift", "diff", "=", "np", ".", "abs", "(", "shift", "-", "np", ".", "rint", "(", "shift", ")", ")", "if", "is_gamma_center", ":", "is_shift", "=", "list", "(", "diff", ">", "0.1", ")", "else", ":", "# Monkhorst-pack", "is_shift", "=", "list", "(", "np", ".", "logical_xor", "(", "(", "diff", ">", "0.1", ")", ",", "(", "self", ".", "_mesh", "%", "2", "==", "0", ")", ")", "*", "1", ")", "else", ":", "is_shift", "=", "None", "return", "is_shift" ]
Tolerance is used to judge zero/half gird shift. This value is not necessary to be changed usually.
[ "Tolerance", "is", "used", "to", "judge", "zero", "/", "half", "gird", "shift", ".", "This", "value", "is", "not", "necessary", "to", "be", "changed", "usually", "." ]
python
train
34.5
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/lib/input_reader/_gcs.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/lib/input_reader/_gcs.py#L379-L405
def next(self): """Returns the next input from this input reader, a record. Returns: The next input from this input reader in the form of a record read from an LevelDB file. Raises: StopIteration: The ordered set records has been exhausted. """ while True: if not hasattr(self, "_cur_handle") or self._cur_handle is None: # If there are no more files, StopIteration is raised here self._cur_handle = super(GCSRecordInputReader, self).next() if not hasattr(self, "_record_reader") or self._record_reader is None: self._record_reader = records.RecordsReader(self._cur_handle) try: start_time = time.time() content = self._record_reader.read() self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content)) self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC, int(time.time() - start_time) * 1000) return content except EOFError: self._cur_handle = None self._record_reader = None
[ "def", "next", "(", "self", ")", ":", "while", "True", ":", "if", "not", "hasattr", "(", "self", ",", "\"_cur_handle\"", ")", "or", "self", ".", "_cur_handle", "is", "None", ":", "# If there are no more files, StopIteration is raised here", "self", ".", "_cur_handle", "=", "super", "(", "GCSRecordInputReader", ",", "self", ")", ".", "next", "(", ")", "if", "not", "hasattr", "(", "self", ",", "\"_record_reader\"", ")", "or", "self", ".", "_record_reader", "is", "None", ":", "self", ".", "_record_reader", "=", "records", ".", "RecordsReader", "(", "self", ".", "_cur_handle", ")", "try", ":", "start_time", "=", "time", ".", "time", "(", ")", "content", "=", "self", ".", "_record_reader", ".", "read", "(", ")", "self", ".", "_slice_ctx", ".", "incr", "(", "self", ".", "COUNTER_IO_READ_BYTE", ",", "len", "(", "content", ")", ")", "self", ".", "_slice_ctx", ".", "incr", "(", "self", ".", "COUNTER_IO_READ_MSEC", ",", "int", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", "*", "1000", ")", "return", "content", "except", "EOFError", ":", "self", ".", "_cur_handle", "=", "None", "self", ".", "_record_reader", "=", "None" ]
Returns the next input from this input reader, a record. Returns: The next input from this input reader in the form of a record read from an LevelDB file. Raises: StopIteration: The ordered set records has been exhausted.
[ "Returns", "the", "next", "input", "from", "this", "input", "reader", "a", "record", "." ]
python
train
37.740741
72squared/redpipe
redpipe/keyspaces.py
https://github.com/72squared/redpipe/blob/e6ee518bc9f3e2fee323c8c53d08997799bd9b1b/redpipe/keyspaces.py#L1343-L1356
def rpoplpush(self, src, dst): """ RPOP a value off of the ``src`` list and atomically LPUSH it on to the ``dst`` list. Returns the value. """ with self.pipe as pipe: f = Future() res = pipe.rpoplpush(self.redis_key(src), self.redis_key(dst)) def cb(): f.set(self.valueparse.decode(res.result)) pipe.on_execute(cb) return f
[ "def", "rpoplpush", "(", "self", ",", "src", ",", "dst", ")", ":", "with", "self", ".", "pipe", "as", "pipe", ":", "f", "=", "Future", "(", ")", "res", "=", "pipe", ".", "rpoplpush", "(", "self", ".", "redis_key", "(", "src", ")", ",", "self", ".", "redis_key", "(", "dst", ")", ")", "def", "cb", "(", ")", ":", "f", ".", "set", "(", "self", ".", "valueparse", ".", "decode", "(", "res", ".", "result", ")", ")", "pipe", ".", "on_execute", "(", "cb", ")", "return", "f" ]
RPOP a value off of the ``src`` list and atomically LPUSH it on to the ``dst`` list. Returns the value.
[ "RPOP", "a", "value", "off", "of", "the", "src", "list", "and", "atomically", "LPUSH", "it", "on", "to", "the", "dst", "list", ".", "Returns", "the", "value", "." ]
python
train
30.642857
alexras/pylsdj
pylsdj/project.py
https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/project.py#L55-L82
def load_srm(filename): """Load a Project from an ``.srm`` file. :param filename: the name of the file from which to load :rtype: :py:class:`pylsdj.Project` """ # .srm files are just decompressed projects without headers # In order to determine the file's size in compressed blocks, we have to # compress it first with open(filename, 'rb') as fp: raw_data = fp.read() compressed_data = filepack.compress(raw_data) factory = BlockFactory() writer = BlockWriter() writer.write(compressed_data, factory) size_in_blocks = len(factory.blocks) # We'll give the file a dummy name ("SRMLOAD") and version, since we know # neither name = "SRMLOAD" version = 0 return Project(name, version, size_in_blocks, raw_data)
[ "def", "load_srm", "(", "filename", ")", ":", "# .srm files are just decompressed projects without headers", "# In order to determine the file's size in compressed blocks, we have to", "# compress it first", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "fp", ":", "raw_data", "=", "fp", ".", "read", "(", ")", "compressed_data", "=", "filepack", ".", "compress", "(", "raw_data", ")", "factory", "=", "BlockFactory", "(", ")", "writer", "=", "BlockWriter", "(", ")", "writer", ".", "write", "(", "compressed_data", ",", "factory", ")", "size_in_blocks", "=", "len", "(", "factory", ".", "blocks", ")", "# We'll give the file a dummy name (\"SRMLOAD\") and version, since we know", "# neither", "name", "=", "\"SRMLOAD\"", "version", "=", "0", "return", "Project", "(", "name", ",", "version", ",", "size_in_blocks", ",", "raw_data", ")" ]
Load a Project from an ``.srm`` file. :param filename: the name of the file from which to load :rtype: :py:class:`pylsdj.Project`
[ "Load", "a", "Project", "from", "an", ".", "srm", "file", "." ]
python
train
27.392857
odlgroup/odl
odl/space/weighting.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/weighting.py#L530-L552
def equiv(self, other): """Return True if other is an equivalent weighting. Returns ------- equivalent : bool ``True`` if ``other`` is a `Weighting` instance with the same `Weighting.impl`, which yields the same result as this weighting for any input, ``False`` otherwise. This is checked by entry-wise comparison of arrays/constants. """ # Optimization for equality if self == other: return True elif (not isinstance(other, Weighting) or self.exponent != other.exponent): return False elif isinstance(other, MatrixWeighting): return other.equiv(self) elif isinstance(other, ConstWeighting): return np.array_equiv(self.array, other.const) else: return np.array_equal(self.array, other.array)
[ "def", "equiv", "(", "self", ",", "other", ")", ":", "# Optimization for equality", "if", "self", "==", "other", ":", "return", "True", "elif", "(", "not", "isinstance", "(", "other", ",", "Weighting", ")", "or", "self", ".", "exponent", "!=", "other", ".", "exponent", ")", ":", "return", "False", "elif", "isinstance", "(", "other", ",", "MatrixWeighting", ")", ":", "return", "other", ".", "equiv", "(", "self", ")", "elif", "isinstance", "(", "other", ",", "ConstWeighting", ")", ":", "return", "np", ".", "array_equiv", "(", "self", ".", "array", ",", "other", ".", "const", ")", "else", ":", "return", "np", ".", "array_equal", "(", "self", ".", "array", ",", "other", ".", "array", ")" ]
Return True if other is an equivalent weighting. Returns ------- equivalent : bool ``True`` if ``other`` is a `Weighting` instance with the same `Weighting.impl`, which yields the same result as this weighting for any input, ``False`` otherwise. This is checked by entry-wise comparison of arrays/constants.
[ "Return", "True", "if", "other", "is", "an", "equivalent", "weighting", "." ]
python
train
38.26087
note35/sinon
sinon/lib/matcher.py
https://github.com/note35/sinon/blob/f1d551b679b393d64d926a8a279320904c38d0f5/sinon/lib/matcher.py#L46-L61
def __value_compare(self, target): """ Comparing result based on expectation if arg_type is "VALUE" Args: Anything Return: Boolean """ if self.expectation == "__ANY__": return True elif self.expectation == "__DEFINED__": return True if target is not None else False elif self.expectation == "__TYPE__": return True if type(target) == self.target_type else False #pylint:disable=unidiomatic-typecheck elif self.expectation == "__INSTANCE__": return True if isinstance(target, self.target_type.__class__) else False else: return True if target == self.expectation else False
[ "def", "__value_compare", "(", "self", ",", "target", ")", ":", "if", "self", ".", "expectation", "==", "\"__ANY__\"", ":", "return", "True", "elif", "self", ".", "expectation", "==", "\"__DEFINED__\"", ":", "return", "True", "if", "target", "is", "not", "None", "else", "False", "elif", "self", ".", "expectation", "==", "\"__TYPE__\"", ":", "return", "True", "if", "type", "(", "target", ")", "==", "self", ".", "target_type", "else", "False", "#pylint:disable=unidiomatic-typecheck", "elif", "self", ".", "expectation", "==", "\"__INSTANCE__\"", ":", "return", "True", "if", "isinstance", "(", "target", ",", "self", ".", "target_type", ".", "__class__", ")", "else", "False", "else", ":", "return", "True", "if", "target", "==", "self", ".", "expectation", "else", "False" ]
Comparing result based on expectation if arg_type is "VALUE" Args: Anything Return: Boolean
[ "Comparing", "result", "based", "on", "expectation", "if", "arg_type", "is", "VALUE", "Args", ":", "Anything", "Return", ":", "Boolean" ]
python
train
43.5625
tensorflow/tensorboard
tensorboard/plugins/debugger/debugger_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/debugger/debugger_plugin.py#L139-L152
def is_active(self): """Determines whether this plugin is active. This plugin is active if any health pills information is present for any run. Returns: A boolean. Whether this plugin is active. """ return bool( self._grpc_port is not None and self._event_multiplexer and self._event_multiplexer.PluginRunToTagToContent( constants.DEBUGGER_PLUGIN_NAME))
[ "def", "is_active", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "_grpc_port", "is", "not", "None", "and", "self", ".", "_event_multiplexer", "and", "self", ".", "_event_multiplexer", ".", "PluginRunToTagToContent", "(", "constants", ".", "DEBUGGER_PLUGIN_NAME", ")", ")" ]
Determines whether this plugin is active. This plugin is active if any health pills information is present for any run. Returns: A boolean. Whether this plugin is active.
[ "Determines", "whether", "this", "plugin", "is", "active", "." ]
python
train
29.142857
emlazzarin/acrylic
acrylic/datatable.py
https://github.com/emlazzarin/acrylic/blob/08c6702d73b9660ead1024653f4fa016f6340e46/acrylic/datatable.py#L856-L873
def writexlsx(self, path, sheetname="default"): """ Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way. """ writer = ExcelRW.UnicodeWriter(path) writer.set_active_sheet(sheetname) writer.writerow(self.fields) writer.writerows(self) writer.save()
[ "def", "writexlsx", "(", "self", ",", "path", ",", "sheetname", "=", "\"default\"", ")", ":", "writer", "=", "ExcelRW", ".", "UnicodeWriter", "(", "path", ")", "writer", ".", "set_active_sheet", "(", "sheetname", ")", "writer", ".", "writerow", "(", "self", ".", "fields", ")", "writer", ".", "writerows", "(", "self", ")", "writer", ".", "save", "(", ")" ]
Writes this table to an .xlsx file at the specified path. If you'd like to specify a sheetname, you may do so. If you'd like to write one workbook with different DataTables for each sheet, import the `excel` function from acrylic. You can see that code in `utils.py`. Note that the outgoing file is an .xlsx file, so it'd make sense to name that way.
[ "Writes", "this", "table", "to", "an", ".", "xlsx", "file", "at", "the", "specified", "path", "." ]
python
train
35.666667
OpenKMIP/PyKMIP
kmip/pie/sqltypes.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/pie/sqltypes.py#L46-L59
def process_bind_param(self, value, dialect): """ Returns the integer value of the usage mask bitmask. This value is stored in the database. Args: value(list<enums.CryptographicUsageMask>): list of enums in the usage mask dialect(string): SQL dialect """ bitmask = 0x00 for e in value: bitmask = bitmask | e.value return bitmask
[ "def", "process_bind_param", "(", "self", ",", "value", ",", "dialect", ")", ":", "bitmask", "=", "0x00", "for", "e", "in", "value", ":", "bitmask", "=", "bitmask", "|", "e", ".", "value", "return", "bitmask" ]
Returns the integer value of the usage mask bitmask. This value is stored in the database. Args: value(list<enums.CryptographicUsageMask>): list of enums in the usage mask dialect(string): SQL dialect
[ "Returns", "the", "integer", "value", "of", "the", "usage", "mask", "bitmask", ".", "This", "value", "is", "stored", "in", "the", "database", "." ]
python
test
30.571429
saltstack/salt
salt/spm/pkgdb/sqlite3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/pkgdb/sqlite3.py#L110-L126
def list_packages(conn=None): ''' List files for an installed package ''' close = False if conn is None: close = True conn = init() ret = [] data = conn.execute('SELECT package FROM packages') for pkg in data.fetchall(): ret.append(pkg) if close: conn.close() return ret
[ "def", "list_packages", "(", "conn", "=", "None", ")", ":", "close", "=", "False", "if", "conn", "is", "None", ":", "close", "=", "True", "conn", "=", "init", "(", ")", "ret", "=", "[", "]", "data", "=", "conn", ".", "execute", "(", "'SELECT package FROM packages'", ")", "for", "pkg", "in", "data", ".", "fetchall", "(", ")", ":", "ret", ".", "append", "(", "pkg", ")", "if", "close", ":", "conn", ".", "close", "(", ")", "return", "ret" ]
List files for an installed package
[ "List", "files", "for", "an", "installed", "package" ]
python
train
19.294118
google/grr
grr/server/grr_response_server/aff4_objects/standard.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4_objects/standard.py#L205-L225
def _ReadPartial(self, length): """Read as much as possible, but not more than length.""" chunk = self.offset // self.chunksize chunk_offset = self.offset % self.chunksize # If we're past the end of the file, we don't have a chunk to read from, so # we can't read anymore. We return the empty string here so we can read off # the end of a file without raising, and get as much data as is there. if chunk > self.last_chunk: return "" available_to_read = min(length, self.chunksize - chunk_offset) fd = self._GetChunkForReading(chunk) fd.seek(chunk_offset) result = fd.read(available_to_read) self.offset += len(result) return result
[ "def", "_ReadPartial", "(", "self", ",", "length", ")", ":", "chunk", "=", "self", ".", "offset", "//", "self", ".", "chunksize", "chunk_offset", "=", "self", ".", "offset", "%", "self", ".", "chunksize", "# If we're past the end of the file, we don't have a chunk to read from, so", "# we can't read anymore. We return the empty string here so we can read off", "# the end of a file without raising, and get as much data as is there.", "if", "chunk", ">", "self", ".", "last_chunk", ":", "return", "\"\"", "available_to_read", "=", "min", "(", "length", ",", "self", ".", "chunksize", "-", "chunk_offset", ")", "fd", "=", "self", ".", "_GetChunkForReading", "(", "chunk", ")", "fd", ".", "seek", "(", "chunk_offset", ")", "result", "=", "fd", ".", "read", "(", "available_to_read", ")", "self", ".", "offset", "+=", "len", "(", "result", ")", "return", "result" ]
Read as much as possible, but not more than length.
[ "Read", "as", "much", "as", "possible", "but", "not", "more", "than", "length", "." ]
python
train
32.142857
numba/llvmlite
llvmlite/ir/builder.py
https://github.com/numba/llvmlite/blob/fcadf8af11947f3fd041c5d6526c5bf231564883/llvmlite/ir/builder.py#L742-L755
def store_atomic(self, value, ptr, ordering, align): """ Store value to pointer, with optional guaranteed alignment: *ptr = name """ if not isinstance(ptr.type, types.PointerType): raise TypeError("cannot store to value of type %s (%r): not a pointer" % (ptr.type, str(ptr))) if ptr.type.pointee != value.type: raise TypeError("cannot store %s to %s: mismatching types" % (value.type, ptr.type)) st = instructions.StoreAtomicInstr(self.block, value, ptr, ordering, align) self._insert(st) return st
[ "def", "store_atomic", "(", "self", ",", "value", ",", "ptr", ",", "ordering", ",", "align", ")", ":", "if", "not", "isinstance", "(", "ptr", ".", "type", ",", "types", ".", "PointerType", ")", ":", "raise", "TypeError", "(", "\"cannot store to value of type %s (%r): not a pointer\"", "%", "(", "ptr", ".", "type", ",", "str", "(", "ptr", ")", ")", ")", "if", "ptr", ".", "type", ".", "pointee", "!=", "value", ".", "type", ":", "raise", "TypeError", "(", "\"cannot store %s to %s: mismatching types\"", "%", "(", "value", ".", "type", ",", "ptr", ".", "type", ")", ")", "st", "=", "instructions", ".", "StoreAtomicInstr", "(", "self", ".", "block", ",", "value", ",", "ptr", ",", "ordering", ",", "align", ")", "self", ".", "_insert", "(", "st", ")", "return", "st" ]
Store value to pointer, with optional guaranteed alignment: *ptr = name
[ "Store", "value", "to", "pointer", "with", "optional", "guaranteed", "alignment", ":", "*", "ptr", "=", "name" ]
python
train
45.785714
postlund/pyatv
pyatv/mrp/pairing.py
https://github.com/postlund/pyatv/blob/655dfcda4e2f9d1c501540e18da4f480d8bf0e70/pyatv/mrp/pairing.py#L88-L116
async def verify_credentials(self): """Verify credentials with device.""" _, public_key = self.srp.initialize() msg = messages.crypto_pairing({ tlv8.TLV_SEQ_NO: b'\x01', tlv8.TLV_PUBLIC_KEY: public_key}) resp = await self.protocol.send_and_receive( msg, generate_identifier=False) resp = _get_pairing_data(resp) session_pub_key = resp[tlv8.TLV_PUBLIC_KEY] encrypted = resp[tlv8.TLV_ENCRYPTED_DATA] log_binary(_LOGGER, 'Device', Public=self.credentials.ltpk, Encrypted=encrypted) encrypted_data = self.srp.verify1( self.credentials, session_pub_key, encrypted) msg = messages.crypto_pairing({ tlv8.TLV_SEQ_NO: b'\x03', tlv8.TLV_ENCRYPTED_DATA: encrypted_data}) resp = await self.protocol.send_and_receive( msg, generate_identifier=False) # TODO: check status code self._output_key, self._input_key = self.srp.verify2()
[ "async", "def", "verify_credentials", "(", "self", ")", ":", "_", ",", "public_key", "=", "self", ".", "srp", ".", "initialize", "(", ")", "msg", "=", "messages", ".", "crypto_pairing", "(", "{", "tlv8", ".", "TLV_SEQ_NO", ":", "b'\\x01'", ",", "tlv8", ".", "TLV_PUBLIC_KEY", ":", "public_key", "}", ")", "resp", "=", "await", "self", ".", "protocol", ".", "send_and_receive", "(", "msg", ",", "generate_identifier", "=", "False", ")", "resp", "=", "_get_pairing_data", "(", "resp", ")", "session_pub_key", "=", "resp", "[", "tlv8", ".", "TLV_PUBLIC_KEY", "]", "encrypted", "=", "resp", "[", "tlv8", ".", "TLV_ENCRYPTED_DATA", "]", "log_binary", "(", "_LOGGER", ",", "'Device'", ",", "Public", "=", "self", ".", "credentials", ".", "ltpk", ",", "Encrypted", "=", "encrypted", ")", "encrypted_data", "=", "self", ".", "srp", ".", "verify1", "(", "self", ".", "credentials", ",", "session_pub_key", ",", "encrypted", ")", "msg", "=", "messages", ".", "crypto_pairing", "(", "{", "tlv8", ".", "TLV_SEQ_NO", ":", "b'\\x03'", ",", "tlv8", ".", "TLV_ENCRYPTED_DATA", ":", "encrypted_data", "}", ")", "resp", "=", "await", "self", ".", "protocol", ".", "send_and_receive", "(", "msg", ",", "generate_identifier", "=", "False", ")", "# TODO: check status code", "self", ".", "_output_key", ",", "self", ".", "_input_key", "=", "self", ".", "srp", ".", "verify2", "(", ")" ]
Verify credentials with device.
[ "Verify", "credentials", "with", "device", "." ]
python
train
35.827586
h2oai/datatable
datatable/utils/misc.py
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/utils/misc.py#L130-L166
def normalize_range(e, n): """ Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid. """ if e.step > 0: count = max(0, (e.stop - e.start - 1) // e.step + 1) else: count = max(0, (e.start - e.stop - 1) // -e.step + 1) if count == 0: return (0, 0, e.step) start = e.start finish = e.start + (count - 1) * e.step if start >= 0: if start >= n or finish < 0 or finish >= n: return None else: start += n finish += n if start < 0 or start >= n or finish < 0 or finish >= n: return None assert count >= 0 return (start, count, e.step)
[ "def", "normalize_range", "(", "e", ",", "n", ")", ":", "if", "e", ".", "step", ">", "0", ":", "count", "=", "max", "(", "0", ",", "(", "e", ".", "stop", "-", "e", ".", "start", "-", "1", ")", "//", "e", ".", "step", "+", "1", ")", "else", ":", "count", "=", "max", "(", "0", ",", "(", "e", ".", "start", "-", "e", ".", "stop", "-", "1", ")", "//", "-", "e", ".", "step", "+", "1", ")", "if", "count", "==", "0", ":", "return", "(", "0", ",", "0", ",", "e", ".", "step", ")", "start", "=", "e", ".", "start", "finish", "=", "e", ".", "start", "+", "(", "count", "-", "1", ")", "*", "e", ".", "step", "if", "start", ">=", "0", ":", "if", "start", ">=", "n", "or", "finish", "<", "0", "or", "finish", ">=", "n", ":", "return", "None", "else", ":", "start", "+=", "n", "finish", "+=", "n", "if", "start", "<", "0", "or", "start", ">=", "n", "or", "finish", "<", "0", "or", "finish", ">=", "n", ":", "return", "None", "assert", "count", ">=", "0", "return", "(", "start", ",", "count", ",", "e", ".", "step", ")" ]
Return the range tuple normalized for an ``n``-element object. The semantics of a range is slightly different than that of a slice. In particular, a range is similar to a list in meaning (and on Py2 it was eagerly expanded into a list). Thus we do not allow the range to generate indices that would be invalid for an ``n``-array. Furthermore, we restrict the range to produce only positive or only negative indices. For example, ``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing to treat the last "-1" as the last element in the list. :param e: a range object representing a selector :param n: number of elements in a sequence to which ``e`` is applied :returns: tuple ``(start, count, step)`` derived from ``e``, or None if the range is invalid.
[ "Return", "the", "range", "tuple", "normalized", "for", "an", "n", "-", "element", "object", "." ]
python
train
37.486486
cthorey/pdsimage
pdsimage/PDS_Extractor.py
https://github.com/cthorey/pdsimage/blob/f71de6dfddd3d538d76da229b4b9605c40f3fbac/pdsimage/PDS_Extractor.py#L647-L657
def _control_longitude(self): ''' Control on longitude values ''' if self.lonm < 0.0: self.lonm = 360.0 + self.lonm if self.lonM < 0.0: self.lonM = 360.0 + self.lonM if self.lonm > 360.0: self.lonm = self.lonm - 360.0 if self.lonM > 360.0: self.lonM = self.lonM - 360.0
[ "def", "_control_longitude", "(", "self", ")", ":", "if", "self", ".", "lonm", "<", "0.0", ":", "self", ".", "lonm", "=", "360.0", "+", "self", ".", "lonm", "if", "self", ".", "lonM", "<", "0.0", ":", "self", ".", "lonM", "=", "360.0", "+", "self", ".", "lonM", "if", "self", ".", "lonm", ">", "360.0", ":", "self", ".", "lonm", "=", "self", ".", "lonm", "-", "360.0", "if", "self", ".", "lonM", ">", "360.0", ":", "self", ".", "lonM", "=", "self", ".", "lonM", "-", "360.0" ]
Control on longitude values
[ "Control", "on", "longitude", "values" ]
python
train
31.636364
mikedh/trimesh
trimesh/creation.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/creation.py#L711-L784
def cylinder(radius=1.0, height=1.0, sections=32, segment=None, transform=None, **kwargs): """ Create a mesh of a cylinder along Z centered at the origin. Parameters ---------- radius : float The radius of the cylinder height : float The height of the cylinder sections : int How many pie wedges should the cylinder have segment : (2, 3) float Endpoints of axis, overrides transform and height transform : (4, 4) float Transform to apply **kwargs: passed to Trimesh to create cylinder Returns ---------- cylinder: trimesh.Trimesh Resulting mesh of a cylinder """ if segment is not None: segment = np.asanyarray(segment, dtype=np.float64) if segment.shape != (2, 3): raise ValueError('segment must be 2 3D points!') vector = segment[1] - segment[0] # override height with segment length height = np.linalg.norm(vector) # point in middle of line midpoint = segment[0] + (vector * 0.5) # align Z with our desired direction rotation = align_vectors([0, 0, 1], vector) # translate to midpoint of segment translation = transformations.translation_matrix(midpoint) # compound the rotation and translation transform = np.dot(translation, rotation) # create a 2D pie out of wedges theta = np.linspace(0, np.pi * 2, sections) vertices = np.column_stack((np.sin(theta), np.cos(theta))) * radius # the single vertex at the center of the circle # we're overwriting the duplicated start/end vertex vertices[0] = [0, 0] # whangle indexes into a triangulation of the pie wedges index = np.arange(1, len(vertices) + 1).reshape((-1, 1)) index[-1] = 1 faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2)) faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces)) # extrude the 2D triangulation into a Trimesh object cylinder = extrude_triangulation(vertices=vertices, faces=faces, height=height, **kwargs) # the extrusion was along +Z, so move the cylinder # center of mass back to the origin cylinder.vertices[:, 2] -= height * .5 if transform is not None: # apply a transform here before any cache stuff is generated # and would have to be dumped after the transform is applied cylinder.apply_transform(transform) return cylinder
[ "def", "cylinder", "(", "radius", "=", "1.0", ",", "height", "=", "1.0", ",", "sections", "=", "32", ",", "segment", "=", "None", ",", "transform", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "segment", "is", "not", "None", ":", "segment", "=", "np", ".", "asanyarray", "(", "segment", ",", "dtype", "=", "np", ".", "float64", ")", "if", "segment", ".", "shape", "!=", "(", "2", ",", "3", ")", ":", "raise", "ValueError", "(", "'segment must be 2 3D points!'", ")", "vector", "=", "segment", "[", "1", "]", "-", "segment", "[", "0", "]", "# override height with segment length", "height", "=", "np", ".", "linalg", ".", "norm", "(", "vector", ")", "# point in middle of line", "midpoint", "=", "segment", "[", "0", "]", "+", "(", "vector", "*", "0.5", ")", "# align Z with our desired direction", "rotation", "=", "align_vectors", "(", "[", "0", ",", "0", ",", "1", "]", ",", "vector", ")", "# translate to midpoint of segment", "translation", "=", "transformations", ".", "translation_matrix", "(", "midpoint", ")", "# compound the rotation and translation", "transform", "=", "np", ".", "dot", "(", "translation", ",", "rotation", ")", "# create a 2D pie out of wedges", "theta", "=", "np", ".", "linspace", "(", "0", ",", "np", ".", "pi", "*", "2", ",", "sections", ")", "vertices", "=", "np", ".", "column_stack", "(", "(", "np", ".", "sin", "(", "theta", ")", ",", "np", ".", "cos", "(", "theta", ")", ")", ")", "*", "radius", "# the single vertex at the center of the circle", "# we're overwriting the duplicated start/end vertex", "vertices", "[", "0", "]", "=", "[", "0", ",", "0", "]", "# whangle indexes into a triangulation of the pie wedges", "index", "=", "np", ".", "arange", "(", "1", ",", "len", "(", "vertices", ")", "+", "1", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "index", "[", "-", "1", "]", "=", "1", "faces", "=", "np", ".", "tile", "(", "index", ",", "(", "1", ",", "2", ")", ")", ".", "reshape", "(", "-", "1", ")", "[", "1", ":", "-", "1", "]", ".", "reshape", "(", "(", "-", "1", ",", "2", ")", ")", "faces", "=", "np", ".", "column_stack", "(", "(", "np", ".", "zeros", "(", "len", "(", "faces", ")", ",", "dtype", "=", "np", ".", "int", ")", ",", "faces", ")", ")", "# extrude the 2D triangulation into a Trimesh object", "cylinder", "=", "extrude_triangulation", "(", "vertices", "=", "vertices", ",", "faces", "=", "faces", ",", "height", "=", "height", ",", "*", "*", "kwargs", ")", "# the extrusion was along +Z, so move the cylinder", "# center of mass back to the origin", "cylinder", ".", "vertices", "[", ":", ",", "2", "]", "-=", "height", "*", ".5", "if", "transform", "is", "not", "None", ":", "# apply a transform here before any cache stuff is generated", "# and would have to be dumped after the transform is applied", "cylinder", ".", "apply_transform", "(", "transform", ")", "return", "cylinder" ]
Create a mesh of a cylinder along Z centered at the origin. Parameters ---------- radius : float The radius of the cylinder height : float The height of the cylinder sections : int How many pie wedges should the cylinder have segment : (2, 3) float Endpoints of axis, overrides transform and height transform : (4, 4) float Transform to apply **kwargs: passed to Trimesh to create cylinder Returns ---------- cylinder: trimesh.Trimesh Resulting mesh of a cylinder
[ "Create", "a", "mesh", "of", "a", "cylinder", "along", "Z", "centered", "at", "the", "origin", "." ]
python
train
35.054054
rwl/godot
godot/mapping.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/mapping.py#L295-L329
def map_element(self, obj, name, event): """ Handles mapping elements to diagram components """ canvas = self.diagram.diagram_canvas parser = XDotParser() for element in event.added: logger.debug("Mapping new element [%s] to diagram node" % element) for node_mapping in self.nodes: ct = name[:-6] #strip '_items' if node_mapping.containment_trait == ct: dot_attrs = node_mapping.dot_node dot = Dot() graph_node = Node(str(id(element))) self._style_node(graph_node, dot_attrs) dot.add_node(graph_node) xdot = graph_from_dot_data(dot.create(self.program,"xdot")) diagram_nodes = parser.parse_nodes(xdot)#.get_node_list()) for dn in diagram_nodes: if dn is not None: dn.element = element # Tools for tool in node_mapping.tools: dn.tools.append(tool(dn)) canvas.add(dn) canvas.request_redraw() for element in event.removed: logger.debug("Unmapping element [%s] from diagram" % element) for component in canvas.components: if element == component.element: canvas.remove(component) canvas.request_redraw() break
[ "def", "map_element", "(", "self", ",", "obj", ",", "name", ",", "event", ")", ":", "canvas", "=", "self", ".", "diagram", ".", "diagram_canvas", "parser", "=", "XDotParser", "(", ")", "for", "element", "in", "event", ".", "added", ":", "logger", ".", "debug", "(", "\"Mapping new element [%s] to diagram node\"", "%", "element", ")", "for", "node_mapping", "in", "self", ".", "nodes", ":", "ct", "=", "name", "[", ":", "-", "6", "]", "#strip '_items'", "if", "node_mapping", ".", "containment_trait", "==", "ct", ":", "dot_attrs", "=", "node_mapping", ".", "dot_node", "dot", "=", "Dot", "(", ")", "graph_node", "=", "Node", "(", "str", "(", "id", "(", "element", ")", ")", ")", "self", ".", "_style_node", "(", "graph_node", ",", "dot_attrs", ")", "dot", ".", "add_node", "(", "graph_node", ")", "xdot", "=", "graph_from_dot_data", "(", "dot", ".", "create", "(", "self", ".", "program", ",", "\"xdot\"", ")", ")", "diagram_nodes", "=", "parser", ".", "parse_nodes", "(", "xdot", ")", "#.get_node_list())", "for", "dn", "in", "diagram_nodes", ":", "if", "dn", "is", "not", "None", ":", "dn", ".", "element", "=", "element", "# Tools", "for", "tool", "in", "node_mapping", ".", "tools", ":", "dn", ".", "tools", ".", "append", "(", "tool", "(", "dn", ")", ")", "canvas", ".", "add", "(", "dn", ")", "canvas", ".", "request_redraw", "(", ")", "for", "element", "in", "event", ".", "removed", ":", "logger", ".", "debug", "(", "\"Unmapping element [%s] from diagram\"", "%", "element", ")", "for", "component", "in", "canvas", ".", "components", ":", "if", "element", "==", "component", ".", "element", ":", "canvas", ".", "remove", "(", "component", ")", "canvas", ".", "request_redraw", "(", ")", "break" ]
Handles mapping elements to diagram components
[ "Handles", "mapping", "elements", "to", "diagram", "components" ]
python
test
43.628571
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/color/colormap.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/color/colormap.py#L160-L167
def _process_glsl_template(template, colors): """Replace $color_i by color #i in the GLSL template.""" for i in range(len(colors) - 1, -1, -1): color = colors[i] assert len(color) == 4 vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color) template = template.replace('$color_%d' % i, vec4_color) return template
[ "def", "_process_glsl_template", "(", "template", ",", "colors", ")", ":", "for", "i", "in", "range", "(", "len", "(", "colors", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "color", "=", "colors", "[", "i", "]", "assert", "len", "(", "color", ")", "==", "4", "vec4_color", "=", "'vec4(%.3f, %.3f, %.3f, %.3f)'", "%", "tuple", "(", "color", ")", "template", "=", "template", ".", "replace", "(", "'$color_%d'", "%", "i", ",", "vec4_color", ")", "return", "template" ]
Replace $color_i by color #i in the GLSL template.
[ "Replace", "$color_i", "by", "color", "#i", "in", "the", "GLSL", "template", "." ]
python
train
44.125
numenta/nupic
src/nupic/algorithms/spatial_pooler.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/spatial_pooler.py#L1177-L1190
def _bumpUpWeakColumns(self): """ This method increases the permanence values of synapses of columns whose activity level has been too low. Such columns are identified by having an overlap duty cycle that drops too much below those of their peers. The permanence values for such columns are increased. """ weakColumns = numpy.where(self._overlapDutyCycles < self._minOverlapDutyCycles)[0] for columnIndex in weakColumns: perm = self._permanences[columnIndex].astype(realDType) maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0] perm[maskPotential] += self._synPermBelowStimulusInc self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
[ "def", "_bumpUpWeakColumns", "(", "self", ")", ":", "weakColumns", "=", "numpy", ".", "where", "(", "self", ".", "_overlapDutyCycles", "<", "self", ".", "_minOverlapDutyCycles", ")", "[", "0", "]", "for", "columnIndex", "in", "weakColumns", ":", "perm", "=", "self", ".", "_permanences", "[", "columnIndex", "]", ".", "astype", "(", "realDType", ")", "maskPotential", "=", "numpy", ".", "where", "(", "self", ".", "_potentialPools", "[", "columnIndex", "]", ">", "0", ")", "[", "0", "]", "perm", "[", "maskPotential", "]", "+=", "self", ".", "_synPermBelowStimulusInc", "self", ".", "_updatePermanencesForColumn", "(", "perm", ",", "columnIndex", ",", "raisePerm", "=", "False", ")" ]
This method increases the permanence values of synapses of columns whose activity level has been too low. Such columns are identified by having an overlap duty cycle that drops too much below those of their peers. The permanence values for such columns are increased.
[ "This", "method", "increases", "the", "permanence", "values", "of", "synapses", "of", "columns", "whose", "activity", "level", "has", "been", "too", "low", ".", "Such", "columns", "are", "identified", "by", "having", "an", "overlap", "duty", "cycle", "that", "drops", "too", "much", "below", "those", "of", "their", "peers", ".", "The", "permanence", "values", "for", "such", "columns", "are", "increased", "." ]
python
valid
53.071429
humilis/humilis-lambdautils
lambdautils/state.py
https://github.com/humilis/humilis-lambdautils/blob/58f75eb5ace23523c283708d56a9193181ea7e8e/lambdautils/state.py#L270-L324
def set_state(key, value, namespace=None, table_name=None, environment=None, layer=None, stage=None, shard_id=None, consistent=True, serializer=json.dumps, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000, ttl=None): """Set Lambda state value.""" if table_name is None: table_name = _state_table_name(environment=environment, layer=layer, stage=stage) if not table_name: msg = ("Can't produce state table name: unable to set state " "item '{}'".format(key)) logger.error(msg) raise StateTableError(msg) return dynamodb = boto3.resource("dynamodb") table = dynamodb.Table(table_name) logger.info("Putting {} -> {} in DynamoDB table {}".format(key, value, table_name)) if serializer: try: value = serializer(value) except TypeError: logger.error( "Value for state key '{}' is not json-serializable".format( key)) raise if namespace: key = "{}:{}".format(namespace, key) if shard_id: key = "{}:{}".format(shard_id, key) item = {"id": key, "value": value} if ttl: item["ttl"] = {"N": str(int(time.time() + ttl))} @retry(retry_on_exception=_is_critical_exception, wait_exponential_multiplier=500, wait_exponential_max=5000, stop_max_delay=10000) def put_item(): try: return table.put_item(Item=item) except Exception as err: if _is_dynamodb_critical_exception(err): raise CriticalError(err) else: raise resp = put_item() logger.info("Response from DynamoDB: '{}'".format(resp)) return resp
[ "def", "set_state", "(", "key", ",", "value", ",", "namespace", "=", "None", ",", "table_name", "=", "None", ",", "environment", "=", "None", ",", "layer", "=", "None", ",", "stage", "=", "None", ",", "shard_id", "=", "None", ",", "consistent", "=", "True", ",", "serializer", "=", "json", ".", "dumps", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ",", "ttl", "=", "None", ")", ":", "if", "table_name", "is", "None", ":", "table_name", "=", "_state_table_name", "(", "environment", "=", "environment", ",", "layer", "=", "layer", ",", "stage", "=", "stage", ")", "if", "not", "table_name", ":", "msg", "=", "(", "\"Can't produce state table name: unable to set state \"", "\"item '{}'\"", ".", "format", "(", "key", ")", ")", "logger", ".", "error", "(", "msg", ")", "raise", "StateTableError", "(", "msg", ")", "return", "dynamodb", "=", "boto3", ".", "resource", "(", "\"dynamodb\"", ")", "table", "=", "dynamodb", ".", "Table", "(", "table_name", ")", "logger", ".", "info", "(", "\"Putting {} -> {} in DynamoDB table {}\"", ".", "format", "(", "key", ",", "value", ",", "table_name", ")", ")", "if", "serializer", ":", "try", ":", "value", "=", "serializer", "(", "value", ")", "except", "TypeError", ":", "logger", ".", "error", "(", "\"Value for state key '{}' is not json-serializable\"", ".", "format", "(", "key", ")", ")", "raise", "if", "namespace", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "namespace", ",", "key", ")", "if", "shard_id", ":", "key", "=", "\"{}:{}\"", ".", "format", "(", "shard_id", ",", "key", ")", "item", "=", "{", "\"id\"", ":", "key", ",", "\"value\"", ":", "value", "}", "if", "ttl", ":", "item", "[", "\"ttl\"", "]", "=", "{", "\"N\"", ":", "str", "(", "int", "(", "time", ".", "time", "(", ")", "+", "ttl", ")", ")", "}", "@", "retry", "(", "retry_on_exception", "=", "_is_critical_exception", ",", "wait_exponential_multiplier", "=", "500", ",", "wait_exponential_max", "=", "5000", ",", "stop_max_delay", "=", "10000", ")", "def", "put_item", "(", ")", ":", "try", ":", "return", "table", ".", "put_item", "(", "Item", "=", "item", ")", "except", "Exception", "as", "err", ":", "if", "_is_dynamodb_critical_exception", "(", "err", ")", ":", "raise", "CriticalError", "(", "err", ")", "else", ":", "raise", "resp", "=", "put_item", "(", ")", "logger", ".", "info", "(", "\"Response from DynamoDB: '{}'\"", ".", "format", "(", "resp", ")", ")", "return", "resp" ]
Set Lambda state value.
[ "Set", "Lambda", "state", "value", "." ]
python
train
33.836364
fracpete/python-weka-wrapper3
python/weka/core/dataset.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/dataset.py#L235-L247
def add_instance(self, inst, index=None): """ Adds the specified instance to the dataset. :param inst: the Instance to add :type inst: Instance :param index: the 0-based index where to add the Instance :type index: int """ if index is None: self.__append_instance(inst.jobject) else: self.__insert_instance(index, inst.jobject)
[ "def", "add_instance", "(", "self", ",", "inst", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "self", ".", "__append_instance", "(", "inst", ".", "jobject", ")", "else", ":", "self", ".", "__insert_instance", "(", "index", ",", "inst", ".", "jobject", ")" ]
Adds the specified instance to the dataset. :param inst: the Instance to add :type inst: Instance :param index: the 0-based index where to add the Instance :type index: int
[ "Adds", "the", "specified", "instance", "to", "the", "dataset", "." ]
python
train
31.692308
timothyb0912/pylogit
pylogit/mixed_logit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/mixed_logit.py#L135-L168
def add_mixl_specific_results_to_estimation_res(estimator, results_dict): """ Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict. """ # Get the probability of each sequence of choices, given the draws prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"], estimator.choice_vector, estimator.rows_to_mixers, return_type='all') # Add the various items to the results_dict. results_dict["simulated_sequence_probs"] = prob_res[0] results_dict["expanded_sequence_probs"] = prob_res[1] return results_dict
[ "def", "add_mixl_specific_results_to_estimation_res", "(", "estimator", ",", "results_dict", ")", ":", "# Get the probability of each sequence of choices, given the draws", "prob_res", "=", "mlc", ".", "calc_choice_sequence_probs", "(", "results_dict", "[", "\"long_probs\"", "]", ",", "estimator", ".", "choice_vector", ",", "estimator", ".", "rows_to_mixers", ",", "return_type", "=", "'all'", ")", "# Add the various items to the results_dict.", "results_dict", "[", "\"simulated_sequence_probs\"", "]", "=", "prob_res", "[", "0", "]", "results_dict", "[", "\"expanded_sequence_probs\"", "]", "=", "prob_res", "[", "1", "]", "return", "results_dict" ]
Stores particular items in the results dictionary that are unique to mixed logit-type models. In particular, this function calculates and adds `sequence_probs` and `expanded_sequence_probs` to the results dictionary. The `constrained_pos` object is also stored to the results_dict. Parameters ---------- estimator : an instance of the MixedEstimator class. Should contain a `choice_vector` attribute that is a 1D ndarray representing the choices made for this model's dataset. Should also contain a `rows_to_mixers` attribute that maps each row of the long format data to a unit of observation that the mixing is being performed over. results_dict : dict. This dictionary should be the dictionary returned from scipy.optimize.minimize. In particular, it should have the following `long_probs` key. Returns ------- results_dict.
[ "Stores", "particular", "items", "in", "the", "results", "dictionary", "that", "are", "unique", "to", "mixed", "logit", "-", "type", "models", ".", "In", "particular", "this", "function", "calculates", "and", "adds", "sequence_probs", "and", "expanded_sequence_probs", "to", "the", "results", "dictionary", ".", "The", "constrained_pos", "object", "is", "also", "stored", "to", "the", "results_dict", "." ]
python
train
45.235294
3DLIRIOUS/MeshLabXML
meshlabxml/files.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/files.py#L253-L279
def measure_all(fbasename=None, log=None, ml_version=ml_version): """Measures mesh geometry, aabb and topology.""" ml_script1_file = 'TEMP3D_measure_gAndT.mlx' if ml_version == '1.3.4BETA': file_out = 'TEMP3D_aabb.xyz' else: file_out = None ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version) compute.measure_geometry(ml_script1) compute.measure_topology(ml_script1) ml_script1.save_to_file(ml_script1_file) ml_script1.run_script(log=log, script_file=ml_script1_file) geometry = ml_script1.geometry topology = ml_script1.topology if ml_version == '1.3.4BETA': if log is not None: log_file = open(log, 'a') log_file.write( '***Axis Aligned Bounding Results for file "%s":\n' % fbasename) log_file.close() aabb = measure_aabb(file_out, log) else: aabb = geometry['aabb'] return aabb, geometry, topology
[ "def", "measure_all", "(", "fbasename", "=", "None", ",", "log", "=", "None", ",", "ml_version", "=", "ml_version", ")", ":", "ml_script1_file", "=", "'TEMP3D_measure_gAndT.mlx'", "if", "ml_version", "==", "'1.3.4BETA'", ":", "file_out", "=", "'TEMP3D_aabb.xyz'", "else", ":", "file_out", "=", "None", "ml_script1", "=", "mlx", ".", "FilterScript", "(", "file_in", "=", "fbasename", ",", "file_out", "=", "file_out", ",", "ml_version", "=", "ml_version", ")", "compute", ".", "measure_geometry", "(", "ml_script1", ")", "compute", ".", "measure_topology", "(", "ml_script1", ")", "ml_script1", ".", "save_to_file", "(", "ml_script1_file", ")", "ml_script1", ".", "run_script", "(", "log", "=", "log", ",", "script_file", "=", "ml_script1_file", ")", "geometry", "=", "ml_script1", ".", "geometry", "topology", "=", "ml_script1", ".", "topology", "if", "ml_version", "==", "'1.3.4BETA'", ":", "if", "log", "is", "not", "None", ":", "log_file", "=", "open", "(", "log", ",", "'a'", ")", "log_file", ".", "write", "(", "'***Axis Aligned Bounding Results for file \"%s\":\\n'", "%", "fbasename", ")", "log_file", ".", "close", "(", ")", "aabb", "=", "measure_aabb", "(", "file_out", ",", "log", ")", "else", ":", "aabb", "=", "geometry", "[", "'aabb'", "]", "return", "aabb", ",", "geometry", ",", "topology" ]
Measures mesh geometry, aabb and topology.
[ "Measures", "mesh", "geometry", "aabb", "and", "topology", "." ]
python
test
36.259259
saltstack/salt
salt/utils/network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/network.py#L200-L224
def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn
[ "def", "get_fqhostname", "(", ")", ":", "# try getaddrinfo()", "fqdn", "=", "None", "try", ":", "addrinfo", "=", "socket", ".", "getaddrinfo", "(", "socket", ".", "gethostname", "(", ")", ",", "0", ",", "socket", ".", "AF_UNSPEC", ",", "socket", ".", "SOCK_STREAM", ",", "socket", ".", "SOL_TCP", ",", "socket", ".", "AI_CANONNAME", ")", "for", "info", "in", "addrinfo", ":", "# info struct [family, socktype, proto, canonname, sockaddr]", "# On Windows `canonname` can be an empty string", "# This can cause the function to return `None`", "if", "len", "(", "info", ")", ">", "3", "and", "info", "[", "3", "]", ":", "fqdn", "=", "info", "[", "3", "]", "break", "except", "socket", ".", "gaierror", ":", "pass", "# NOTE: this used to log.error() but it was later disabled", "except", "socket", ".", "error", "as", "err", ":", "log", ".", "debug", "(", "'socket.getaddrinfo() failure while finding fqdn: %s'", ",", "err", ")", "if", "fqdn", "is", "None", ":", "fqdn", "=", "socket", ".", "getfqdn", "(", ")", "return", "fqdn" ]
Returns the fully qualified hostname
[ "Returns", "the", "fully", "qualified", "hostname" ]
python
train
34.88
googleapis/google-cloud-python
error_reporting/google/cloud/errorreporting_v1beta1/gapic/error_stats_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/error_reporting/google/cloud/errorreporting_v1beta1/gapic/error_stats_service_client.py#L472-L542
def delete_events( self, project_name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes all error events of a given project. Example: >>> from google.cloud import errorreporting_v1beta1 >>> >>> client = errorreporting_v1beta1.ErrorStatsServiceClient() >>> >>> project_name = client.project_path('[PROJECT]') >>> >>> response = client.delete_events(project_name) Args: project_name (str): [Required] The resource name of the Google Cloud Platform project. Written as ``projects/`` plus the `Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`__. Example: ``projects/my-project-123``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_events" not in self._inner_api_calls: self._inner_api_calls[ "delete_events" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_events, default_retry=self._method_configs["DeleteEvents"].retry, default_timeout=self._method_configs["DeleteEvents"].timeout, client_info=self._client_info, ) request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_name", project_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["delete_events"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "delete_events", "(", "self", ",", "project_name", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "metadata", "=", "None", ",", ")", ":", "# Wrap the transport method to add retry and timeout logic.", "if", "\"delete_events\"", "not", "in", "self", ".", "_inner_api_calls", ":", "self", ".", "_inner_api_calls", "[", "\"delete_events\"", "]", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "wrap_method", "(", "self", ".", "transport", ".", "delete_events", ",", "default_retry", "=", "self", ".", "_method_configs", "[", "\"DeleteEvents\"", "]", ".", "retry", ",", "default_timeout", "=", "self", ".", "_method_configs", "[", "\"DeleteEvents\"", "]", ".", "timeout", ",", "client_info", "=", "self", ".", "_client_info", ",", ")", "request", "=", "error_stats_service_pb2", ".", "DeleteEventsRequest", "(", "project_name", "=", "project_name", ")", "if", "metadata", "is", "None", ":", "metadata", "=", "[", "]", "metadata", "=", "list", "(", "metadata", ")", "try", ":", "routing_header", "=", "[", "(", "\"project_name\"", ",", "project_name", ")", "]", "except", "AttributeError", ":", "pass", "else", ":", "routing_metadata", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "routing_header", ".", "to_grpc_metadata", "(", "routing_header", ")", "metadata", ".", "append", "(", "routing_metadata", ")", "return", "self", ".", "_inner_api_calls", "[", "\"delete_events\"", "]", "(", "request", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")" ]
Deletes all error events of a given project. Example: >>> from google.cloud import errorreporting_v1beta1 >>> >>> client = errorreporting_v1beta1.ErrorStatsServiceClient() >>> >>> project_name = client.project_path('[PROJECT]') >>> >>> response = client.delete_events(project_name) Args: project_name (str): [Required] The resource name of the Google Cloud Platform project. Written as ``projects/`` plus the `Google Cloud Platform project ID <https://support.google.com/cloud/answer/6158840>`__. Example: ``projects/my-project-123``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Deletes", "all", "error", "events", "of", "a", "given", "project", "." ]
python
train
42.070423
dpkp/kafka-python
kafka/record/util.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/record/util.py#L63-L85
def size_of_varint(value): """ Number of bytes needed to encode an integer in variable-length format. """ value = (value << 1) ^ (value >> 63) if value <= 0x7f: return 1 if value <= 0x3fff: return 2 if value <= 0x1fffff: return 3 if value <= 0xfffffff: return 4 if value <= 0x7ffffffff: return 5 if value <= 0x3ffffffffff: return 6 if value <= 0x1ffffffffffff: return 7 if value <= 0xffffffffffffff: return 8 if value <= 0x7fffffffffffffff: return 9 return 10
[ "def", "size_of_varint", "(", "value", ")", ":", "value", "=", "(", "value", "<<", "1", ")", "^", "(", "value", ">>", "63", ")", "if", "value", "<=", "0x7f", ":", "return", "1", "if", "value", "<=", "0x3fff", ":", "return", "2", "if", "value", "<=", "0x1fffff", ":", "return", "3", "if", "value", "<=", "0xfffffff", ":", "return", "4", "if", "value", "<=", "0x7ffffffff", ":", "return", "5", "if", "value", "<=", "0x3ffffffffff", ":", "return", "6", "if", "value", "<=", "0x1ffffffffffff", ":", "return", "7", "if", "value", "<=", "0xffffffffffffff", ":", "return", "8", "if", "value", "<=", "0x7fffffffffffffff", ":", "return", "9", "return", "10" ]
Number of bytes needed to encode an integer in variable-length format.
[ "Number", "of", "bytes", "needed", "to", "encode", "an", "integer", "in", "variable", "-", "length", "format", "." ]
python
train
24.391304
angr/angr
angr/state_plugins/heap/heap_freelist.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_freelist.py#L59-L64
def next_chunk(self): """ Returns the chunk immediately following (and adjacent to) this one. """ raise NotImplementedError("%s not implemented for %s" % (self.next_chunk.__func__.__name__, self.__class__.__name__))
[ "def", "next_chunk", "(", "self", ")", ":", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "next_chunk", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Returns the chunk immediately following (and adjacent to) this one.
[ "Returns", "the", "chunk", "immediately", "following", "(", "and", "adjacent", "to", ")", "this", "one", "." ]
python
train
51.166667
lreis2415/PyGeoC
pygeoc/hydro.py
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/hydro.py#L103-L109
def get_cell_length(flow_model): """Get flow direction induced cell length dict. Args: flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported. """ assert flow_model.lower() in FlowModelConst.d8_lens return FlowModelConst.d8_lens.get(flow_model.lower())
[ "def", "get_cell_length", "(", "flow_model", ")", ":", "assert", "flow_model", ".", "lower", "(", ")", "in", "FlowModelConst", ".", "d8_lens", "return", "FlowModelConst", ".", "d8_lens", ".", "get", "(", "flow_model", ".", "lower", "(", ")", ")" ]
Get flow direction induced cell length dict. Args: flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
[ "Get", "flow", "direction", "induced", "cell", "length", "dict", ".", "Args", ":", "flow_model", ":", "Currently", "TauDEM", "ArcGIS", "and", "Whitebox", "are", "supported", "." ]
python
train
45
CalebBell/ht
ht/conv_internal.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_internal.py#L1024-L1063
def turbulent_Nunner(Re, Pr, fd, fd_smooth): r'''Calculates internal convection Nusselt number for turbulent flows in pipe according to [2]_ as shown in [1]_. .. math:: Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]} Parameters ---------- Re : float Reynolds number, [-] Pr : float Prandtl number, [-] fd : float Darcy friction factor [-] fd_smooth : float Darcy friction factor of a smooth pipe [-] Returns ------- Nu : float Nusselt number, [-] Notes ----- Valid for Pr ≅ 0.7; bad results for Pr > 1. Examples -------- >>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005) 101.15841010919947 References ---------- .. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren," VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956 ''' return Re*Pr*fd/8./(1 + 1.5*Re**-0.125*Pr**(-1/6.)*(Pr*fd/fd_smooth - 1.))
[ "def", "turbulent_Nunner", "(", "Re", ",", "Pr", ",", "fd", ",", "fd_smooth", ")", ":", "return", "Re", "*", "Pr", "*", "fd", "/", "8.", "/", "(", "1", "+", "1.5", "*", "Re", "**", "-", "0.125", "*", "Pr", "**", "(", "-", "1", "/", "6.", ")", "*", "(", "Pr", "*", "fd", "/", "fd_smooth", "-", "1.", ")", ")" ]
r'''Calculates internal convection Nusselt number for turbulent flows in pipe according to [2]_ as shown in [1]_. .. math:: Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]} Parameters ---------- Re : float Reynolds number, [-] Pr : float Prandtl number, [-] fd : float Darcy friction factor [-] fd_smooth : float Darcy friction factor of a smooth pipe [-] Returns ------- Nu : float Nusselt number, [-] Notes ----- Valid for Pr ≅ 0.7; bad results for Pr > 1. Examples -------- >>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005) 101.15841010919947 References ---------- .. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren," VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
[ "r", "Calculates", "internal", "convection", "Nusselt", "number", "for", "turbulent", "flows", "in", "pipe", "according", "to", "[", "2", "]", "_", "as", "shown", "in", "[", "1", "]", "_", "." ]
python
train
27.075
coldfix/udiskie
udiskie/config.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/config.py#L159-L174
def match_config(filters, device, kind, default): """ Matches devices against multiple :class:`DeviceFilter`s. :param list filters: device filters :param Device device: device to be mounted :param str kind: value kind :param default: default value :returns: value of the first matching filter """ if device is None: return default matches = (f.value(kind, device) for f in filters if f.has_value(kind) and f.match(device)) return next(matches, default)
[ "def", "match_config", "(", "filters", ",", "device", ",", "kind", ",", "default", ")", ":", "if", "device", "is", "None", ":", "return", "default", "matches", "=", "(", "f", ".", "value", "(", "kind", ",", "device", ")", "for", "f", "in", "filters", "if", "f", ".", "has_value", "(", "kind", ")", "and", "f", ".", "match", "(", "device", ")", ")", "return", "next", "(", "matches", ",", "default", ")" ]
Matches devices against multiple :class:`DeviceFilter`s. :param list filters: device filters :param Device device: device to be mounted :param str kind: value kind :param default: default value :returns: value of the first matching filter
[ "Matches", "devices", "against", "multiple", ":", "class", ":", "DeviceFilter", "s", "." ]
python
train
32.5
spdx/tools-python
spdx/parsers/tagvaluebuilders.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvaluebuilders.py#L1074-L1090
def set_lic_text(self, doc, text): """Sets license extracted text. Raises SPDXValueError if text is not free form text. Raises OrderError if no license ID defined. """ if self.has_extr_lic(doc): if not self.extr_text_set: self.extr_text_set = True if validations.validate_is_free_form_text(text): self.extr_lic(doc).text = str_from_text(text) return True else: raise SPDXValueError('ExtractedLicense::text') else: raise CardinalityError('ExtractedLicense::text') else: raise OrderError('ExtractedLicense::text')
[ "def", "set_lic_text", "(", "self", ",", "doc", ",", "text", ")", ":", "if", "self", ".", "has_extr_lic", "(", "doc", ")", ":", "if", "not", "self", ".", "extr_text_set", ":", "self", ".", "extr_text_set", "=", "True", "if", "validations", ".", "validate_is_free_form_text", "(", "text", ")", ":", "self", ".", "extr_lic", "(", "doc", ")", ".", "text", "=", "str_from_text", "(", "text", ")", "return", "True", "else", ":", "raise", "SPDXValueError", "(", "'ExtractedLicense::text'", ")", "else", ":", "raise", "CardinalityError", "(", "'ExtractedLicense::text'", ")", "else", ":", "raise", "OrderError", "(", "'ExtractedLicense::text'", ")" ]
Sets license extracted text. Raises SPDXValueError if text is not free form text. Raises OrderError if no license ID defined.
[ "Sets", "license", "extracted", "text", ".", "Raises", "SPDXValueError", "if", "text", "is", "not", "free", "form", "text", ".", "Raises", "OrderError", "if", "no", "license", "ID", "defined", "." ]
python
valid
41.352941
SwissDataScienceCenter/renku-python
renku/cli/rerun.py
https://github.com/SwissDataScienceCenter/renku-python/blob/691644d695b055a01e0ca22b2620e55bbd928c0d/renku/cli/rerun.py#L68-L77
def show_inputs(client, workflow): """Show workflow inputs and exit.""" for input_ in workflow.inputs: click.echo( '{id}: {default}'.format( id=input_.id, default=_format_default(client, input_.default), ) ) sys.exit(0)
[ "def", "show_inputs", "(", "client", ",", "workflow", ")", ":", "for", "input_", "in", "workflow", ".", "inputs", ":", "click", ".", "echo", "(", "'{id}: {default}'", ".", "format", "(", "id", "=", "input_", ".", "id", ",", "default", "=", "_format_default", "(", "client", ",", "input_", ".", "default", ")", ",", ")", ")", "sys", ".", "exit", "(", "0", ")" ]
Show workflow inputs and exit.
[ "Show", "workflow", "inputs", "and", "exit", "." ]
python
train
29.4
MediaFire/mediafire-python-open-sdk
mediafire/client.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/client.py#L255-L280
def get_folder_contents_iter(self, uri): """Return iterator for directory contents. uri -- mediafire URI Example: for item in get_folder_contents_iter('mf:///Documents'): print(item) """ resource = self.get_resource_by_uri(uri) if not isinstance(resource, Folder): raise NotAFolderError(uri) folder_key = resource['folderkey'] for item in self._folder_get_content_iter(folder_key): if 'filename' in item: # Work around https://mediafire.mantishub.com/view.php?id=5 # TODO: remove in 1.0 if ".patch." in item['filename']: continue yield File(item) elif 'name' in item: yield Folder(item)
[ "def", "get_folder_contents_iter", "(", "self", ",", "uri", ")", ":", "resource", "=", "self", ".", "get_resource_by_uri", "(", "uri", ")", "if", "not", "isinstance", "(", "resource", ",", "Folder", ")", ":", "raise", "NotAFolderError", "(", "uri", ")", "folder_key", "=", "resource", "[", "'folderkey'", "]", "for", "item", "in", "self", ".", "_folder_get_content_iter", "(", "folder_key", ")", ":", "if", "'filename'", "in", "item", ":", "# Work around https://mediafire.mantishub.com/view.php?id=5", "# TODO: remove in 1.0", "if", "\".patch.\"", "in", "item", "[", "'filename'", "]", ":", "continue", "yield", "File", "(", "item", ")", "elif", "'name'", "in", "item", ":", "yield", "Folder", "(", "item", ")" ]
Return iterator for directory contents. uri -- mediafire URI Example: for item in get_folder_contents_iter('mf:///Documents'): print(item)
[ "Return", "iterator", "for", "directory", "contents", "." ]
python
train
30.576923
475Cumulus/TBone
tbone/db/models.py
https://github.com/475Cumulus/TBone/blob/5a6672d8bbac449a0ab9e99560609f671fe84d4d/tbone/db/models.py#L336-L378
async def create_collection(db, model_class: MongoCollectionMixin): ''' Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class :param db: A database handle :type db: motor.motor_asyncio.AsyncIOMotorClient :param model_class: The model to create :type model_class: Subclass of ``Model`` mixed with ``MongoCollectionMixin`` ''' name = model_class.get_collection_name() if name: try: # create collection coll = await db.create_collection(name, **model_class._meta.creation_args) except CollectionInvalid: # collection already exists coll = db[name] # create indices if hasattr(model_class._meta, 'indices') and isinstance(model_class._meta.indices, list): for index in model_class._meta.indices: try: index_kwargs = { 'name': index.get('name', '_'.join([x[0] for x in index['fields']])), 'unique': index.get('unique', False), 'sparse': index.get('sparse', False), 'expireAfterSeconds': index.get('expireAfterSeconds', None), 'background': True } if 'partialFilterExpression' in index: index_kwargs['partialFilterExpression'] = index.get('partialFilterExpression', {}) await db[name].create_index( index['fields'], **index_kwargs ) except OperationFailure as ex: pass # index already exists ? TODO: do something with this return coll return None
[ "async", "def", "create_collection", "(", "db", ",", "model_class", ":", "MongoCollectionMixin", ")", ":", "name", "=", "model_class", ".", "get_collection_name", "(", ")", "if", "name", ":", "try", ":", "# create collection", "coll", "=", "await", "db", ".", "create_collection", "(", "name", ",", "*", "*", "model_class", ".", "_meta", ".", "creation_args", ")", "except", "CollectionInvalid", ":", "# collection already exists", "coll", "=", "db", "[", "name", "]", "# create indices", "if", "hasattr", "(", "model_class", ".", "_meta", ",", "'indices'", ")", "and", "isinstance", "(", "model_class", ".", "_meta", ".", "indices", ",", "list", ")", ":", "for", "index", "in", "model_class", ".", "_meta", ".", "indices", ":", "try", ":", "index_kwargs", "=", "{", "'name'", ":", "index", ".", "get", "(", "'name'", ",", "'_'", ".", "join", "(", "[", "x", "[", "0", "]", "for", "x", "in", "index", "[", "'fields'", "]", "]", ")", ")", ",", "'unique'", ":", "index", ".", "get", "(", "'unique'", ",", "False", ")", ",", "'sparse'", ":", "index", ".", "get", "(", "'sparse'", ",", "False", ")", ",", "'expireAfterSeconds'", ":", "index", ".", "get", "(", "'expireAfterSeconds'", ",", "None", ")", ",", "'background'", ":", "True", "}", "if", "'partialFilterExpression'", "in", "index", ":", "index_kwargs", "[", "'partialFilterExpression'", "]", "=", "index", ".", "get", "(", "'partialFilterExpression'", ",", "{", "}", ")", "await", "db", "[", "name", "]", ".", "create_index", "(", "index", "[", "'fields'", "]", ",", "*", "*", "index_kwargs", ")", "except", "OperationFailure", "as", "ex", ":", "pass", "# index already exists ? TODO: do something with this", "return", "coll", "return", "None" ]
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class :param db: A database handle :type db: motor.motor_asyncio.AsyncIOMotorClient :param model_class: The model to create :type model_class: Subclass of ``Model`` mixed with ``MongoCollectionMixin``
[ "Creates", "a", "MongoDB", "collection", "and", "all", "the", "declared", "indices", "in", "the", "model", "s", "Meta", "class" ]
python
train
40.511628
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L303-L319
def remove_sister(self, sister=None): """ Removes a sister node. It has the same effect as **`TreeNode.up.remove_child(sister)`** If a sister node is not supplied, the first sister will be deleted and returned. :argument sister: A node instance :return: The node removed """ sisters = self.get_sisters() if len(sisters) > 0: if sister is None: sister = sisters.pop(0) return self.up.remove_child(sister)
[ "def", "remove_sister", "(", "self", ",", "sister", "=", "None", ")", ":", "sisters", "=", "self", ".", "get_sisters", "(", ")", "if", "len", "(", "sisters", ")", ">", "0", ":", "if", "sister", "is", "None", ":", "sister", "=", "sisters", ".", "pop", "(", "0", ")", "return", "self", ".", "up", ".", "remove_child", "(", "sister", ")" ]
Removes a sister node. It has the same effect as **`TreeNode.up.remove_child(sister)`** If a sister node is not supplied, the first sister will be deleted and returned. :argument sister: A node instance :return: The node removed
[ "Removes", "a", "sister", "node", ".", "It", "has", "the", "same", "effect", "as", "**", "TreeNode", ".", "up", ".", "remove_child", "(", "sister", ")", "**" ]
python
train
30
tanghaibao/goatools
goatools/rpt/goea_nt_xfrm.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/rpt/goea_nt_xfrm.py#L25-L30
def get_study_items(self): """Get all study items (e.g., geneids).""" study_items = set() for rec in self.goea_results: study_items |= rec.study_items return study_items
[ "def", "get_study_items", "(", "self", ")", ":", "study_items", "=", "set", "(", ")", "for", "rec", "in", "self", ".", "goea_results", ":", "study_items", "|=", "rec", ".", "study_items", "return", "study_items" ]
Get all study items (e.g., geneids).
[ "Get", "all", "study", "items", "(", "e", ".", "g", ".", "geneids", ")", "." ]
python
train
34.666667
mozilla/amo-validator
validator/errorbundler.py
https://github.com/mozilla/amo-validator/blob/0251bfbd7d93106e01ecdb6de5fcd1dc1a180664/validator/errorbundler.py#L186-L255
def _save_message(self, stack, type_, message, context=None, from_merge=False): 'Stores a message in the appropriate message stack.' uid = uuid.uuid4().hex message['uid'] = uid # Get the context for the message (if there's a context available) if context is not None: if isinstance(context, tuple): message['context'] = context else: message['context'] = ( context.get_context(line=message['line'], column=message['column'])) else: message['context'] = None if self.package_stack: if not isinstance(message['file'], list): message['file'] = [message['file']] message['file'] = self.package_stack + message['file'] # Test that if for_appversions is set that we're only applying to # supported add-ons. THIS IS THE LAST FILTER BEFORE THE MESSAGE IS # ADDED TO THE STACK! if message['for_appversions']: if not self.supports_version(message['for_appversions']): if self.instant: print '(Instant error discarded)' self._print_message(type_, message, verbose=True) return elif self.version_requirements: # If there was no for_appversions but there were version # requirements detailed in the decorator, use the ones from the # decorator. message['for_appversions'] = self.version_requirements # Save the message to the stack. stack.append(message) # Mark the tier that the error occurred at. if message['tier'] is None: message['tier'] = self.tier # Build out the compatibility summary if possible. if message['compatibility_type'] and not from_merge: self.compat_summary['%ss' % message['compatibility_type']] += 1 # Build out the message tree entry. if message['id']: tree = self.message_tree last_id = None for eid in message['id']: if last_id is not None: tree = tree[last_id] if eid not in tree: tree[eid] = {'__errors': 0, '__warnings': 0, '__notices': 0, '__messages': []} tree[eid]['__%s' % type_] += 1 last_id = eid tree[last_id]['__messages'].append(uid) # If instant mode is turned on, output the message immediately. if self.instant: self._print_message(type_, message, verbose=True)
[ "def", "_save_message", "(", "self", ",", "stack", ",", "type_", ",", "message", ",", "context", "=", "None", ",", "from_merge", "=", "False", ")", ":", "uid", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "message", "[", "'uid'", "]", "=", "uid", "# Get the context for the message (if there's a context available)", "if", "context", "is", "not", "None", ":", "if", "isinstance", "(", "context", ",", "tuple", ")", ":", "message", "[", "'context'", "]", "=", "context", "else", ":", "message", "[", "'context'", "]", "=", "(", "context", ".", "get_context", "(", "line", "=", "message", "[", "'line'", "]", ",", "column", "=", "message", "[", "'column'", "]", ")", ")", "else", ":", "message", "[", "'context'", "]", "=", "None", "if", "self", ".", "package_stack", ":", "if", "not", "isinstance", "(", "message", "[", "'file'", "]", ",", "list", ")", ":", "message", "[", "'file'", "]", "=", "[", "message", "[", "'file'", "]", "]", "message", "[", "'file'", "]", "=", "self", ".", "package_stack", "+", "message", "[", "'file'", "]", "# Test that if for_appversions is set that we're only applying to", "# supported add-ons. THIS IS THE LAST FILTER BEFORE THE MESSAGE IS", "# ADDED TO THE STACK!", "if", "message", "[", "'for_appversions'", "]", ":", "if", "not", "self", ".", "supports_version", "(", "message", "[", "'for_appversions'", "]", ")", ":", "if", "self", ".", "instant", ":", "print", "'(Instant error discarded)'", "self", ".", "_print_message", "(", "type_", ",", "message", ",", "verbose", "=", "True", ")", "return", "elif", "self", ".", "version_requirements", ":", "# If there was no for_appversions but there were version", "# requirements detailed in the decorator, use the ones from the", "# decorator.", "message", "[", "'for_appversions'", "]", "=", "self", ".", "version_requirements", "# Save the message to the stack.", "stack", ".", "append", "(", "message", ")", "# Mark the tier that the error occurred at.", "if", "message", "[", "'tier'", "]", "is", "None", ":", "message", "[", "'tier'", "]", "=", "self", ".", "tier", "# Build out the compatibility summary if possible.", "if", "message", "[", "'compatibility_type'", "]", "and", "not", "from_merge", ":", "self", ".", "compat_summary", "[", "'%ss'", "%", "message", "[", "'compatibility_type'", "]", "]", "+=", "1", "# Build out the message tree entry.", "if", "message", "[", "'id'", "]", ":", "tree", "=", "self", ".", "message_tree", "last_id", "=", "None", "for", "eid", "in", "message", "[", "'id'", "]", ":", "if", "last_id", "is", "not", "None", ":", "tree", "=", "tree", "[", "last_id", "]", "if", "eid", "not", "in", "tree", ":", "tree", "[", "eid", "]", "=", "{", "'__errors'", ":", "0", ",", "'__warnings'", ":", "0", ",", "'__notices'", ":", "0", ",", "'__messages'", ":", "[", "]", "}", "tree", "[", "eid", "]", "[", "'__%s'", "%", "type_", "]", "+=", "1", "last_id", "=", "eid", "tree", "[", "last_id", "]", "[", "'__messages'", "]", ".", "append", "(", "uid", ")", "# If instant mode is turned on, output the message immediately.", "if", "self", ".", "instant", ":", "self", ".", "_print_message", "(", "type_", ",", "message", ",", "verbose", "=", "True", ")" ]
Stores a message in the appropriate message stack.
[ "Stores", "a", "message", "in", "the", "appropriate", "message", "stack", "." ]
python
train
38.8
BernardFW/bernard
src/bernard/platforms/facebook/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L437-L470
async def receive_events(self, request: HttpRequest): """ Events received from Facebook """ body = await request.read() s = self.settings() try: content = ujson.loads(body) except ValueError: return json_response({ 'error': True, 'message': 'Cannot decode body' }, status=400) secret = s['app_secret'] actual_sig = request.headers['X-Hub-Signature'] expected_sig = sign_message(body, secret) if not hmac.compare_digest(actual_sig, expected_sig): return json_response({ 'error': True, 'message': 'Invalid signature', }, status=401) for entry in content['entry']: for raw_message in entry.get('messaging', []): message = FacebookMessage(raw_message, self) await self.handle_event(message) return json_response({ 'ok': True, })
[ "async", "def", "receive_events", "(", "self", ",", "request", ":", "HttpRequest", ")", ":", "body", "=", "await", "request", ".", "read", "(", ")", "s", "=", "self", ".", "settings", "(", ")", "try", ":", "content", "=", "ujson", ".", "loads", "(", "body", ")", "except", "ValueError", ":", "return", "json_response", "(", "{", "'error'", ":", "True", ",", "'message'", ":", "'Cannot decode body'", "}", ",", "status", "=", "400", ")", "secret", "=", "s", "[", "'app_secret'", "]", "actual_sig", "=", "request", ".", "headers", "[", "'X-Hub-Signature'", "]", "expected_sig", "=", "sign_message", "(", "body", ",", "secret", ")", "if", "not", "hmac", ".", "compare_digest", "(", "actual_sig", ",", "expected_sig", ")", ":", "return", "json_response", "(", "{", "'error'", ":", "True", ",", "'message'", ":", "'Invalid signature'", ",", "}", ",", "status", "=", "401", ")", "for", "entry", "in", "content", "[", "'entry'", "]", ":", "for", "raw_message", "in", "entry", ".", "get", "(", "'messaging'", ",", "[", "]", ")", ":", "message", "=", "FacebookMessage", "(", "raw_message", ",", "self", ")", "await", "self", ".", "handle_event", "(", "message", ")", "return", "json_response", "(", "{", "'ok'", ":", "True", ",", "}", ")" ]
Events received from Facebook
[ "Events", "received", "from", "Facebook" ]
python
train
29.088235
jpscaletti/pyceo
pyceo/params.py
https://github.com/jpscaletti/pyceo/blob/7f37eaf8e557d25f8e54634176139e0aad84b8df/pyceo/params.py#L20-L30
def param(name, help=""): """Decorator that add a parameter to the wrapped command or function.""" def decorator(func): params = getattr(func, "params", []) _param = Param(name, help) # Insert at the beginning so the apparent order is preserved params.insert(0, _param) func.params = params return func return decorator
[ "def", "param", "(", "name", ",", "help", "=", "\"\"", ")", ":", "def", "decorator", "(", "func", ")", ":", "params", "=", "getattr", "(", "func", ",", "\"params\"", ",", "[", "]", ")", "_param", "=", "Param", "(", "name", ",", "help", ")", "# Insert at the beginning so the apparent order is preserved", "params", ".", "insert", "(", "0", ",", "_param", ")", "func", ".", "params", "=", "params", "return", "func", "return", "decorator" ]
Decorator that add a parameter to the wrapped command or function.
[ "Decorator", "that", "add", "a", "parameter", "to", "the", "wrapped", "command", "or", "function", "." ]
python
train
33.636364
cltk/cltk
cltk/corpus/utils/formatter.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/utils/formatter.py#L165-L170
def assemble_tlg_author_filepaths(): """Reads TLG index and builds a list of absolute filepaths.""" plaintext_dir_rel = '~/cltk_data/greek/text/tlg/plaintext/' plaintext_dir = os.path.expanduser(plaintext_dir_rel) filepaths = [os.path.join(plaintext_dir, x + '.TXT') for x in TLG_INDEX] return filepaths
[ "def", "assemble_tlg_author_filepaths", "(", ")", ":", "plaintext_dir_rel", "=", "'~/cltk_data/greek/text/tlg/plaintext/'", "plaintext_dir", "=", "os", ".", "path", ".", "expanduser", "(", "plaintext_dir_rel", ")", "filepaths", "=", "[", "os", ".", "path", ".", "join", "(", "plaintext_dir", ",", "x", "+", "'.TXT'", ")", "for", "x", "in", "TLG_INDEX", "]", "return", "filepaths" ]
Reads TLG index and builds a list of absolute filepaths.
[ "Reads", "TLG", "index", "and", "builds", "a", "list", "of", "absolute", "filepaths", "." ]
python
train
53
ff0000/scarlet
scarlet/assets/crops.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/assets/crops.py#L158-L185
def create_crop(self, name, file_obj, x=None, x2=None, y=None, y2=None): """ Generate Version for an Image. value has to be a serverpath relative to MEDIA_ROOT. Returns the spec for the crop that was created. """ if name not in self._registry: return file_obj.seek(0) im = Image.open(file_obj) config = self._registry[name] if x is not None and x2 and y is not None and y2 and not config.editable: # You can't ask for something special # for non editable images return im = config.rotate_by_exif(im) crop_spec = config.get_crop_spec(im, x=x, x2=x2, y=y, y2=y2) image = config.process_image(im, crop_spec=crop_spec) if image: crop_name = utils.get_size_filename(file_obj.name, name) self._save_file(image, crop_name) return crop_spec
[ "def", "create_crop", "(", "self", ",", "name", ",", "file_obj", ",", "x", "=", "None", ",", "x2", "=", "None", ",", "y", "=", "None", ",", "y2", "=", "None", ")", ":", "if", "name", "not", "in", "self", ".", "_registry", ":", "return", "file_obj", ".", "seek", "(", "0", ")", "im", "=", "Image", ".", "open", "(", "file_obj", ")", "config", "=", "self", ".", "_registry", "[", "name", "]", "if", "x", "is", "not", "None", "and", "x2", "and", "y", "is", "not", "None", "and", "y2", "and", "not", "config", ".", "editable", ":", "# You can't ask for something special", "# for non editable images", "return", "im", "=", "config", ".", "rotate_by_exif", "(", "im", ")", "crop_spec", "=", "config", ".", "get_crop_spec", "(", "im", ",", "x", "=", "x", ",", "x2", "=", "x2", ",", "y", "=", "y", ",", "y2", "=", "y2", ")", "image", "=", "config", ".", "process_image", "(", "im", ",", "crop_spec", "=", "crop_spec", ")", "if", "image", ":", "crop_name", "=", "utils", ".", "get_size_filename", "(", "file_obj", ".", "name", ",", "name", ")", "self", ".", "_save_file", "(", "image", ",", "crop_name", ")", "return", "crop_spec" ]
Generate Version for an Image. value has to be a serverpath relative to MEDIA_ROOT. Returns the spec for the crop that was created.
[ "Generate", "Version", "for", "an", "Image", ".", "value", "has", "to", "be", "a", "serverpath", "relative", "to", "MEDIA_ROOT", "." ]
python
train
33.071429
quodlibet/mutagen
mutagen/_senf/_fsnative.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_fsnative.py#L49-L56
def _swap_bytes(data): """swaps bytes for 16 bit, leaves remaining trailing bytes alone""" a, b = data[1::2], data[::2] data = bytearray().join(bytearray(x) for x in zip(a, b)) if len(b) > len(a): data += b[-1:] return bytes(data)
[ "def", "_swap_bytes", "(", "data", ")", ":", "a", ",", "b", "=", "data", "[", "1", ":", ":", "2", "]", ",", "data", "[", ":", ":", "2", "]", "data", "=", "bytearray", "(", ")", ".", "join", "(", "bytearray", "(", "x", ")", "for", "x", "in", "zip", "(", "a", ",", "b", ")", ")", "if", "len", "(", "b", ")", ">", "len", "(", "a", ")", ":", "data", "+=", "b", "[", "-", "1", ":", "]", "return", "bytes", "(", "data", ")" ]
swaps bytes for 16 bit, leaves remaining trailing bytes alone
[ "swaps", "bytes", "for", "16", "bit", "leaves", "remaining", "trailing", "bytes", "alone" ]
python
train
31.5
c0fec0de/anytree
anytree/walker.py
https://github.com/c0fec0de/anytree/blob/775477e206a75e697983e70dae6372b5a7e42dcf/anytree/walker.py#L10-L85
def walk(self, start, end): """ Walk from `start` node to `end` node. Returns: (upwards, common, downwards): `upwards` is a list of nodes to go upward to. `common` top node. `downwards` is a list of nodes to go downward to. Raises: WalkError: on no common root node. >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle())) Node('/f') |-- Node('/f/b') | |-- Node('/f/b/a') | +-- Node('/f/b/d') | |-- Node('/f/b/d/c') | +-- Node('/f/b/d/e') +-- Node('/f/g') +-- Node('/f/g/i') +-- Node('/f/g/i/h') Create a walker: >>> w = Walker() This class is made for walking: >>> w.walk(f, f) ((), Node('/f'), ()) >>> w.walk(f, b) ((), Node('/f'), (Node('/f/b'),)) >>> w.walk(b, f) ((Node('/f/b'),), Node('/f'), ()) >>> w.walk(h, e) ((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e'))) >>> w.walk(d, e) ((), Node('/f/b/d'), (Node('/f/b/d/e'),)) For a proper walking the nodes need to be part of the same tree: >>> w.walk(Node("a"), Node("b")) Traceback (most recent call last): ... anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree. """ s = start.path e = end.path if start.root != end.root: msg = "%r and %r are not part of the same tree." % (start, end) raise WalkError(msg) # common c = Walker.__calc_common(s, e) assert c[0] is start.root len_c = len(c) # up if start is c[-1]: up = tuple() else: up = tuple(reversed(s[len_c:])) # down if end is c[-1]: down = tuple() else: down = e[len_c:] return up, c[-1], down
[ "def", "walk", "(", "self", ",", "start", ",", "end", ")", ":", "s", "=", "start", ".", "path", "e", "=", "end", ".", "path", "if", "start", ".", "root", "!=", "end", ".", "root", ":", "msg", "=", "\"%r and %r are not part of the same tree.\"", "%", "(", "start", ",", "end", ")", "raise", "WalkError", "(", "msg", ")", "# common", "c", "=", "Walker", ".", "__calc_common", "(", "s", ",", "e", ")", "assert", "c", "[", "0", "]", "is", "start", ".", "root", "len_c", "=", "len", "(", "c", ")", "# up", "if", "start", "is", "c", "[", "-", "1", "]", ":", "up", "=", "tuple", "(", ")", "else", ":", "up", "=", "tuple", "(", "reversed", "(", "s", "[", "len_c", ":", "]", ")", ")", "# down", "if", "end", "is", "c", "[", "-", "1", "]", ":", "down", "=", "tuple", "(", ")", "else", ":", "down", "=", "e", "[", "len_c", ":", "]", "return", "up", ",", "c", "[", "-", "1", "]", ",", "down" ]
Walk from `start` node to `end` node. Returns: (upwards, common, downwards): `upwards` is a list of nodes to go upward to. `common` top node. `downwards` is a list of nodes to go downward to. Raises: WalkError: on no common root node. >>> from anytree import Node, RenderTree, AsciiStyle >>> f = Node("f") >>> b = Node("b", parent=f) >>> a = Node("a", parent=b) >>> d = Node("d", parent=b) >>> c = Node("c", parent=d) >>> e = Node("e", parent=d) >>> g = Node("g", parent=f) >>> i = Node("i", parent=g) >>> h = Node("h", parent=i) >>> print(RenderTree(f, style=AsciiStyle())) Node('/f') |-- Node('/f/b') | |-- Node('/f/b/a') | +-- Node('/f/b/d') | |-- Node('/f/b/d/c') | +-- Node('/f/b/d/e') +-- Node('/f/g') +-- Node('/f/g/i') +-- Node('/f/g/i/h') Create a walker: >>> w = Walker() This class is made for walking: >>> w.walk(f, f) ((), Node('/f'), ()) >>> w.walk(f, b) ((), Node('/f'), (Node('/f/b'),)) >>> w.walk(b, f) ((Node('/f/b'),), Node('/f'), ()) >>> w.walk(h, e) ((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e'))) >>> w.walk(d, e) ((), Node('/f/b/d'), (Node('/f/b/d/e'),)) For a proper walking the nodes need to be part of the same tree: >>> w.walk(Node("a"), Node("b")) Traceback (most recent call last): ... anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
[ "Walk", "from", "start", "node", "to", "end", "node", "." ]
python
train
30.355263
Qiskit/qiskit-terra
qiskit/qasm/qasmlexer.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/qasm/qasmlexer.py#L52-L55
def input(self, data): """Set the input text data.""" self.data = data self.lexer.input(data)
[ "def", "input", "(", "self", ",", "data", ")", ":", "self", ".", "data", "=", "data", "self", ".", "lexer", ".", "input", "(", "data", ")" ]
Set the input text data.
[ "Set", "the", "input", "text", "data", "." ]
python
test
28.5
SystemRDL/systemrdl-compiler
systemrdl/core/backports.py
https://github.com/SystemRDL/systemrdl-compiler/blob/6ae64f2bb6ecbbe9db356e20e8ac94e85bdeed3a/systemrdl/core/backports.py#L9-L57
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs): """Run command with arguments and return a CompletedProcess instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If check is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If timeout is given, and the process takes too long, a TimeoutExpired exception will be raised. There is an optional argument "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the "input" argument must be a string and stdout/stderr in the returned object will be strings rather than bytes. """ #pylint: disable=redefined-builtin if input is not None: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') kwargs['stdin'] = subprocess.PIPE with subprocess.Popen(*popenargs, **kwargs) as process: try: stdout, stderr = process.communicate(input, timeout=timeout) except subprocess.TimeoutExpired: process.kill() stdout, stderr = process.communicate() raise subprocess.TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) except: process.kill() process.wait() raise retcode = process.poll() if check and retcode: raise subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr) return CompletedProcess(process.args, retcode, stdout, stderr)
[ "def", "subprocess_run", "(", "*", "popenargs", ",", "input", "=", "None", ",", "timeout", "=", "None", ",", "check", "=", "False", ",", "*", "*", "kwargs", ")", ":", "#pylint: disable=redefined-builtin", "if", "input", "is", "not", "None", ":", "if", "'stdin'", "in", "kwargs", ":", "raise", "ValueError", "(", "'stdin and input arguments may not both be used.'", ")", "kwargs", "[", "'stdin'", "]", "=", "subprocess", ".", "PIPE", "with", "subprocess", ".", "Popen", "(", "*", "popenargs", ",", "*", "*", "kwargs", ")", "as", "process", ":", "try", ":", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", "input", ",", "timeout", "=", "timeout", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "process", ".", "kill", "(", ")", "stdout", ",", "stderr", "=", "process", ".", "communicate", "(", ")", "raise", "subprocess", ".", "TimeoutExpired", "(", "process", ".", "args", ",", "timeout", ",", "output", "=", "stdout", ",", "stderr", "=", "stderr", ")", "except", ":", "process", ".", "kill", "(", ")", "process", ".", "wait", "(", ")", "raise", "retcode", "=", "process", ".", "poll", "(", ")", "if", "check", "and", "retcode", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "retcode", ",", "process", ".", "args", ",", "output", "=", "stdout", ",", "stderr", "=", "stderr", ")", "return", "CompletedProcess", "(", "process", ".", "args", ",", "retcode", ",", "stdout", ",", "stderr", ")" ]
Run command with arguments and return a CompletedProcess instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If check is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If timeout is given, and the process takes too long, a TimeoutExpired exception will be raised. There is an optional argument "input", allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's "stdin" argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the "input" argument must be a string and stdout/stderr in the returned object will be strings rather than bytes.
[ "Run", "command", "with", "arguments", "and", "return", "a", "CompletedProcess", "instance", "." ]
python
train
43.653061
tjcsl/ion
intranet/apps/events/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/events/views.py#L94-L119
def join_event_view(request, id): """Join event page. If a POST request, actually add or remove the attendance of the current user. Otherwise, display a page with confirmation. id: event id """ event = get_object_or_404(Event, id=id) if request.method == "POST": if not event.show_attending: return redirect("events") if "attending" in request.POST: attending = request.POST.get("attending") attending = (attending == "true") if attending: event.attending.add(request.user) else: event.attending.remove(request.user) return redirect("events") context = {"event": event, "is_events_admin": request.user.has_admin_permission('events')} return render(request, "events/join_event.html", context)
[ "def", "join_event_view", "(", "request", ",", "id", ")", ":", "event", "=", "get_object_or_404", "(", "Event", ",", "id", "=", "id", ")", "if", "request", ".", "method", "==", "\"POST\"", ":", "if", "not", "event", ".", "show_attending", ":", "return", "redirect", "(", "\"events\"", ")", "if", "\"attending\"", "in", "request", ".", "POST", ":", "attending", "=", "request", ".", "POST", ".", "get", "(", "\"attending\"", ")", "attending", "=", "(", "attending", "==", "\"true\"", ")", "if", "attending", ":", "event", ".", "attending", ".", "add", "(", "request", ".", "user", ")", "else", ":", "event", ".", "attending", ".", "remove", "(", "request", ".", "user", ")", "return", "redirect", "(", "\"events\"", ")", "context", "=", "{", "\"event\"", ":", "event", ",", "\"is_events_admin\"", ":", "request", ".", "user", ".", "has_admin_permission", "(", "'events'", ")", "}", "return", "render", "(", "request", ",", "\"events/join_event.html\"", ",", "context", ")" ]
Join event page. If a POST request, actually add or remove the attendance of the current user. Otherwise, display a page with confirmation. id: event id
[ "Join", "event", "page", ".", "If", "a", "POST", "request", "actually", "add", "or", "remove", "the", "attendance", "of", "the", "current", "user", ".", "Otherwise", "display", "a", "page", "with", "confirmation", "." ]
python
train
31.730769
LudovicRousseau/pyscard
smartcard/Examples/wx/apdumanager/SampleAPDUManagerPanel.py
https://github.com/LudovicRousseau/pyscard/blob/62e675028086c75656444cc21d563d9f08ebf8e7/smartcard/Examples/wx/apdumanager/SampleAPDUManagerPanel.py#L95-L100
def OnSelectReader(self, reader): """Called when a reader is selected by clicking on the reader tree control or toolbar.""" SimpleSCardAppEventObserver.OnSelectReader(self, reader) self.feedbacktext.SetLabel('Selected reader: ' + repr(reader)) self.transmitbutton.Disable()
[ "def", "OnSelectReader", "(", "self", ",", "reader", ")", ":", "SimpleSCardAppEventObserver", ".", "OnSelectReader", "(", "self", ",", "reader", ")", "self", ".", "feedbacktext", ".", "SetLabel", "(", "'Selected reader: '", "+", "repr", "(", "reader", ")", ")", "self", ".", "transmitbutton", ".", "Disable", "(", ")" ]
Called when a reader is selected by clicking on the reader tree control or toolbar.
[ "Called", "when", "a", "reader", "is", "selected", "by", "clicking", "on", "the", "reader", "tree", "control", "or", "toolbar", "." ]
python
train
51.333333
zomux/deepy
deepy/core/graph.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/core/graph.py#L158-L194
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False): """ Load parameters from file to fill all blocks sequentially. :type blocks: list of deepy.layers.Block """ if not os.path.exists(path): raise Exception("model {} does not exist".format(path)) # Decide which parameters to load normal_params = sum([nn.parameters for nn in blocks], []) all_params = sum([nn.all_parameters for nn in blocks], []) # Load parameters if path.endswith(".gz"): opener = gzip.open if path.lower().endswith('.gz') else open handle = opener(path, 'rb') saved_params = pickle.load(handle) handle.close() # Write parameters if len(all_params) != len(saved_params): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(saved_params))) for target, source in zip(all_params, saved_params): if not exclude_free_params or target not in normal_params: target.set_value(source) elif path.endswith(".npz"): arrs = np.load(path) # Write parameters if len(all_params) != len(arrs.keys()): logging.warning( "parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params), len(arrs.keys()))) for target, idx in zip(all_params, range(len(arrs.keys()))): if not exclude_free_params or target not in normal_params: source = arrs['arr_%d' % idx] target.set_value(source) else: raise Exception("File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'" % path)
[ "def", "fill_parameters", "(", "self", ",", "path", ",", "blocks", ",", "exclude_free_params", "=", "False", ",", "check_parameters", "=", "False", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "Exception", "(", "\"model {} does not exist\"", ".", "format", "(", "path", ")", ")", "# Decide which parameters to load", "normal_params", "=", "sum", "(", "[", "nn", ".", "parameters", "for", "nn", "in", "blocks", "]", ",", "[", "]", ")", "all_params", "=", "sum", "(", "[", "nn", ".", "all_parameters", "for", "nn", "in", "blocks", "]", ",", "[", "]", ")", "# Load parameters", "if", "path", ".", "endswith", "(", "\".gz\"", ")", ":", "opener", "=", "gzip", ".", "open", "if", "path", ".", "lower", "(", ")", ".", "endswith", "(", "'.gz'", ")", "else", "open", "handle", "=", "opener", "(", "path", ",", "'rb'", ")", "saved_params", "=", "pickle", ".", "load", "(", "handle", ")", "handle", ".", "close", "(", ")", "# Write parameters", "if", "len", "(", "all_params", ")", "!=", "len", "(", "saved_params", ")", ":", "logging", ".", "warning", "(", "\"parameters in the network: {}, parameters in the dumped model: {}\"", ".", "format", "(", "len", "(", "all_params", ")", ",", "len", "(", "saved_params", ")", ")", ")", "for", "target", ",", "source", "in", "zip", "(", "all_params", ",", "saved_params", ")", ":", "if", "not", "exclude_free_params", "or", "target", "not", "in", "normal_params", ":", "target", ".", "set_value", "(", "source", ")", "elif", "path", ".", "endswith", "(", "\".npz\"", ")", ":", "arrs", "=", "np", ".", "load", "(", "path", ")", "# Write parameters", "if", "len", "(", "all_params", ")", "!=", "len", "(", "arrs", ".", "keys", "(", ")", ")", ":", "logging", ".", "warning", "(", "\"parameters in the network: {}, parameters in the dumped model: {}\"", ".", "format", "(", "len", "(", "all_params", ")", ",", "len", "(", "arrs", ".", "keys", "(", ")", ")", ")", ")", "for", "target", ",", "idx", "in", "zip", "(", "all_params", ",", "range", "(", "len", "(", "arrs", ".", "keys", "(", ")", ")", ")", ")", ":", "if", "not", "exclude_free_params", "or", "target", "not", "in", "normal_params", ":", "source", "=", "arrs", "[", "'arr_%d'", "%", "idx", "]", "target", ".", "set_value", "(", "source", ")", "else", ":", "raise", "Exception", "(", "\"File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'\"", "%", "path", ")" ]
Load parameters from file to fill all blocks sequentially. :type blocks: list of deepy.layers.Block
[ "Load", "parameters", "from", "file", "to", "fill", "all", "blocks", "sequentially", ".", ":", "type", "blocks", ":", "list", "of", "deepy", ".", "layers", ".", "Block" ]
python
test
54.972973
coghost/izen
izen/helper.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/helper.py#L729-L762
def crc16(cmd, use_byte=False): """ CRC16 检验 - 启用``use_byte`` 则返回 bytes 类型. :param cmd: 无crc检验的指令 :type cmd: :param use_byte: 是否返回byte类型 :type use_byte: :return: 返回crc值 :rtype: """ crc = 0xFFFF # crc16 计算方法, 需要使用 byte if hasattr(cmd, 'encode'): cmd = bytes.fromhex(cmd) for _ in cmd: c = _ & 0x00FF crc ^= c for i in range(8): if crc & 0x0001 > 0: crc >>= 1 crc ^= 0xA001 else: crc >>= 1 # modbus crc16计算时,需要高/低位倒置 t = [(crc & 0x00FF), (crc >> 8 & 0xFF)] crc = '%02X%02X' % (t[0], t[1]) if use_byte: crc = bytes.fromhex(crc) return crc
[ "def", "crc16", "(", "cmd", ",", "use_byte", "=", "False", ")", ":", "crc", "=", "0xFFFF", "# crc16 计算方法, 需要使用 byte", "if", "hasattr", "(", "cmd", ",", "'encode'", ")", ":", "cmd", "=", "bytes", ".", "fromhex", "(", "cmd", ")", "for", "_", "in", "cmd", ":", "c", "=", "_", "&", "0x00FF", "crc", "^=", "c", "for", "i", "in", "range", "(", "8", ")", ":", "if", "crc", "&", "0x0001", ">", "0", ":", "crc", ">>=", "1", "crc", "^=", "0xA001", "else", ":", "crc", ">>=", "1", "# modbus crc16计算时,需要高/低位倒置", "t", "=", "[", "(", "crc", "&", "0x00FF", ")", ",", "(", "crc", ">>", "8", "&", "0xFF", ")", "]", "crc", "=", "'%02X%02X'", "%", "(", "t", "[", "0", "]", ",", "t", "[", "1", "]", ")", "if", "use_byte", ":", "crc", "=", "bytes", ".", "fromhex", "(", "crc", ")", "return", "crc" ]
CRC16 检验 - 启用``use_byte`` 则返回 bytes 类型. :param cmd: 无crc检验的指令 :type cmd: :param use_byte: 是否返回byte类型 :type use_byte: :return: 返回crc值 :rtype:
[ "CRC16", "检验", "-", "启用", "use_byte", "则返回", "bytes", "类型", "." ]
python
train
20.647059
juju/theblues
theblues/charmstore.py
https://github.com/juju/theblues/blob/f4431f29e43d04fc32f38f4f86cea45cd4e6ae98/theblues/charmstore.py#L148-L159
def entities(self, entity_ids): '''Get the default data for entities. @param entity_ids A list of entity ids either as strings or references. ''' url = '%s/meta/any?include=id&' % self.url for entity_id in entity_ids: url += 'id=%s&' % _get_path(entity_id) # Remove the trailing '&' from the URL. url = url[:-1] data = self._get(url) return data.json()
[ "def", "entities", "(", "self", ",", "entity_ids", ")", ":", "url", "=", "'%s/meta/any?include=id&'", "%", "self", ".", "url", "for", "entity_id", "in", "entity_ids", ":", "url", "+=", "'id=%s&'", "%", "_get_path", "(", "entity_id", ")", "# Remove the trailing '&' from the URL.", "url", "=", "url", "[", ":", "-", "1", "]", "data", "=", "self", ".", "_get", "(", "url", ")", "return", "data", ".", "json", "(", ")" ]
Get the default data for entities. @param entity_ids A list of entity ids either as strings or references.
[ "Get", "the", "default", "data", "for", "entities", "." ]
python
train
35.5
keon/algorithms
algorithms/linkedlist/remove_duplicates.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/linkedlist/remove_duplicates.py#L6-L19
def remove_dups(head): """ Time Complexity: O(N) Space Complexity: O(N) """ hashset = set() prev = Node() while head: if head.val in hashset: prev.next = head.next else: hashset.add(head.val) prev = head head = head.next
[ "def", "remove_dups", "(", "head", ")", ":", "hashset", "=", "set", "(", ")", "prev", "=", "Node", "(", ")", "while", "head", ":", "if", "head", ".", "val", "in", "hashset", ":", "prev", ".", "next", "=", "head", ".", "next", "else", ":", "hashset", ".", "add", "(", "head", ".", "val", ")", "prev", "=", "head", "head", "=", "head", ".", "next" ]
Time Complexity: O(N) Space Complexity: O(N)
[ "Time", "Complexity", ":", "O", "(", "N", ")", "Space", "Complexity", ":", "O", "(", "N", ")" ]
python
train
21.071429
kodexlab/reliure
reliure/engine.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/engine.py#L489-L531
def select(self, comp_name, options=None): """ Select the components that will by played (with given options). `options` will be passed to :func:`.Optionable.parse_options` if the component is a subclass of :class:`Optionable`. .. Warning:: this function also setup the options (if given) of the selected component. Use :func:`clear_selections` to restore both selection and component's options. This method may be call at play 'time', before to call :func:`play` to run all selected components. :param name: name of the component to select :type comp_name: str :param options: options to set to the components :type options: dict """ self._logger.info("select comp '%s' for block '%s' (options: %s)" % (comp_name, self._name, options)) if comp_name not in self._components: raise ValueError("'%s' has no component '%s' (components are: %s)"\ % (self._name, comp_name, ", ".join(self.component_names()))) if options is None: options = {} # get the componsent component = self._components[comp_name] # check options make sens if not isinstance(component, Optionable) and len(options): raise ValueError("the component %s is not optionable you can't provide options..." % comp_name) # add component as selected, aware of multiple if comp_name not in self._selected: if not self.multiple and len(self._selected): assert len(self._selected) == 1 self._selected[0] = comp_name else: self._selected.append(comp_name) else: # TODO the component has already been selected pass # component might be a function or any callable # only Optionable will get options if isinstance(component, Optionable): component.set_options_values(options, parse=True, strict=True)
[ "def", "select", "(", "self", ",", "comp_name", ",", "options", "=", "None", ")", ":", "self", ".", "_logger", ".", "info", "(", "\"select comp '%s' for block '%s' (options: %s)\"", "%", "(", "comp_name", ",", "self", ".", "_name", ",", "options", ")", ")", "if", "comp_name", "not", "in", "self", ".", "_components", ":", "raise", "ValueError", "(", "\"'%s' has no component '%s' (components are: %s)\"", "%", "(", "self", ".", "_name", ",", "comp_name", ",", "\", \"", ".", "join", "(", "self", ".", "component_names", "(", ")", ")", ")", ")", "if", "options", "is", "None", ":", "options", "=", "{", "}", "# get the componsent", "component", "=", "self", ".", "_components", "[", "comp_name", "]", "# check options make sens", "if", "not", "isinstance", "(", "component", ",", "Optionable", ")", "and", "len", "(", "options", ")", ":", "raise", "ValueError", "(", "\"the component %s is not optionable you can't provide options...\"", "%", "comp_name", ")", "# add component as selected, aware of multiple", "if", "comp_name", "not", "in", "self", ".", "_selected", ":", "if", "not", "self", ".", "multiple", "and", "len", "(", "self", ".", "_selected", ")", ":", "assert", "len", "(", "self", ".", "_selected", ")", "==", "1", "self", ".", "_selected", "[", "0", "]", "=", "comp_name", "else", ":", "self", ".", "_selected", ".", "append", "(", "comp_name", ")", "else", ":", "# TODO the component has already been selected", "pass", "# component might be a function or any callable", "# only Optionable will get options", "if", "isinstance", "(", "component", ",", "Optionable", ")", ":", "component", ".", "set_options_values", "(", "options", ",", "parse", "=", "True", ",", "strict", "=", "True", ")" ]
Select the components that will by played (with given options). `options` will be passed to :func:`.Optionable.parse_options` if the component is a subclass of :class:`Optionable`. .. Warning:: this function also setup the options (if given) of the selected component. Use :func:`clear_selections` to restore both selection and component's options. This method may be call at play 'time', before to call :func:`play` to run all selected components. :param name: name of the component to select :type comp_name: str :param options: options to set to the components :type options: dict
[ "Select", "the", "components", "that", "will", "by", "played", "(", "with", "given", "options", ")", "." ]
python
train
46.302326
synw/dataswim
dataswim/data/export.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/export.py#L41-L56
def to_javascript_(self, table_name: str="data") -> str: """Convert the main dataframe to javascript code :param table_name: javascript variable name, defaults to "data" :param table_name: str, optional :return: a javascript constant with the data :rtype: str :example: ``ds.to_javastript_("myconst")`` """ try: renderer = pytablewriter.JavaScriptTableWriter data = self._build_export(renderer, table_name) return data except Exception as e: self.err(e, "Can not convert data to javascript code")
[ "def", "to_javascript_", "(", "self", ",", "table_name", ":", "str", "=", "\"data\"", ")", "->", "str", ":", "try", ":", "renderer", "=", "pytablewriter", ".", "JavaScriptTableWriter", "data", "=", "self", ".", "_build_export", "(", "renderer", ",", "table_name", ")", "return", "data", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "\"Can not convert data to javascript code\"", ")" ]
Convert the main dataframe to javascript code :param table_name: javascript variable name, defaults to "data" :param table_name: str, optional :return: a javascript constant with the data :rtype: str :example: ``ds.to_javastript_("myconst")``
[ "Convert", "the", "main", "dataframe", "to", "javascript", "code" ]
python
train
37.6875
pkgw/pwkit
pwkit/io.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/io.py#L616-L642
def rmtree (self, errors='warn'): """Recursively delete this directory and its contents. The *errors* keyword specifies how errors are handled: "warn" (the default) Print a warning to standard error. "ignore" Ignore errors. """ import shutil if errors == 'ignore': ignore_errors = True onerror = None elif errors == 'warn': ignore_errors = False from .cli import warn def onerror (func, path, exc_info): warn ('couldn\'t rmtree %s: in %s of %s: %s', self, func.__name__, path, exc_info[1]) else: raise ValueError ('unexpected "errors" keyword %r' % (errors,)) shutil.rmtree (text_type (self), ignore_errors=ignore_errors, onerror=onerror) return self
[ "def", "rmtree", "(", "self", ",", "errors", "=", "'warn'", ")", ":", "import", "shutil", "if", "errors", "==", "'ignore'", ":", "ignore_errors", "=", "True", "onerror", "=", "None", "elif", "errors", "==", "'warn'", ":", "ignore_errors", "=", "False", "from", ".", "cli", "import", "warn", "def", "onerror", "(", "func", ",", "path", ",", "exc_info", ")", ":", "warn", "(", "'couldn\\'t rmtree %s: in %s of %s: %s'", ",", "self", ",", "func", ".", "__name__", ",", "path", ",", "exc_info", "[", "1", "]", ")", "else", ":", "raise", "ValueError", "(", "'unexpected \"errors\" keyword %r'", "%", "(", "errors", ",", ")", ")", "shutil", ".", "rmtree", "(", "text_type", "(", "self", ")", ",", "ignore_errors", "=", "ignore_errors", ",", "onerror", "=", "onerror", ")", "return", "self" ]
Recursively delete this directory and its contents. The *errors* keyword specifies how errors are handled: "warn" (the default) Print a warning to standard error. "ignore" Ignore errors.
[ "Recursively", "delete", "this", "directory", "and", "its", "contents", ".", "The", "*", "errors", "*", "keyword", "specifies", "how", "errors", "are", "handled", ":" ]
python
train
31.37037
twisted/txacme
src/txacme/challenges/_libcloud.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/challenges/_libcloud.py#L27-L43
def _defer_to_worker(deliver, worker, work, *args, **kwargs): """ Run a task in a worker, delivering the result as a ``Deferred`` in the reactor thread. """ deferred = Deferred() def wrapped_work(): try: result = work(*args, **kwargs) except BaseException: f = Failure() deliver(lambda: deferred.errback(f)) else: deliver(lambda: deferred.callback(result)) worker.do(wrapped_work) return deferred
[ "def", "_defer_to_worker", "(", "deliver", ",", "worker", ",", "work", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "deferred", "=", "Deferred", "(", ")", "def", "wrapped_work", "(", ")", ":", "try", ":", "result", "=", "work", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "BaseException", ":", "f", "=", "Failure", "(", ")", "deliver", "(", "lambda", ":", "deferred", ".", "errback", "(", "f", ")", ")", "else", ":", "deliver", "(", "lambda", ":", "deferred", ".", "callback", "(", "result", ")", ")", "worker", ".", "do", "(", "wrapped_work", ")", "return", "deferred" ]
Run a task in a worker, delivering the result as a ``Deferred`` in the reactor thread.
[ "Run", "a", "task", "in", "a", "worker", "delivering", "the", "result", "as", "a", "Deferred", "in", "the", "reactor", "thread", "." ]
python
train
28.529412
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_aaa.py#L496-L508
def ldap_server_host_use_vrf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa") host = ET.SubElement(ldap_server, "host") hostname_key = ET.SubElement(host, "hostname") hostname_key.text = kwargs.pop('hostname') use_vrf = ET.SubElement(host, "use-vrf") use_vrf.text = kwargs.pop('use_vrf') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ldap_server_host_use_vrf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ldap_server", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ldap-server\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "host", "=", "ET", ".", "SubElement", "(", "ldap_server", ",", "\"host\"", ")", "hostname_key", "=", "ET", ".", "SubElement", "(", "host", ",", "\"hostname\"", ")", "hostname_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'hostname'", ")", "use_vrf", "=", "ET", ".", "SubElement", "(", "host", ",", "\"use-vrf\"", ")", "use_vrf", ".", "text", "=", "kwargs", ".", "pop", "(", "'use_vrf'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.769231
awslabs/aws-sam-cli
samcli/commands/local/lib/swagger/integration_uri.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/swagger/integration_uri.py#L253-L267
def _is_sub_intrinsic(data): """ Is this input data a Fn::Sub intrinsic function Parameters ---------- data Data to check Returns ------- bool True if the data Fn::Sub intrinsic function """ return isinstance(data, dict) and len(data) == 1 and LambdaUri._FN_SUB in data
[ "def", "_is_sub_intrinsic", "(", "data", ")", ":", "return", "isinstance", "(", "data", ",", "dict", ")", "and", "len", "(", "data", ")", "==", "1", "and", "LambdaUri", ".", "_FN_SUB", "in", "data" ]
Is this input data a Fn::Sub intrinsic function Parameters ---------- data Data to check Returns ------- bool True if the data Fn::Sub intrinsic function
[ "Is", "this", "input", "data", "a", "Fn", "::", "Sub", "intrinsic", "function" ]
python
train
24.066667
kajala/django-jutil
jutil/command.py
https://github.com/kajala/django-jutil/blob/2abd93ebad51042744eaeb1ee1074ed0eb55ad0c/jutil/command.py#L89-L123
def parse_date_range_arguments(options: dict, default_range='last_month') -> (datetime, datetime, list): """ :param options: :param default_range: Default datetime range to return if no other selected :return: begin, end, [(begin1,end1), (begin2,end2), ...] """ begin, end = get_date_range_by_name(default_range) for range_name in TIME_RANGE_NAMES: if options.get(range_name): begin, end = get_date_range_by_name(range_name) if options.get('begin'): t = parse(options['begin'], default=datetime(2000, 1, 1)) begin = pytz.utc.localize(t) end = now() if options.get('end'): end = pytz.utc.localize(parse(options['end'], default=datetime(2000, 1, 1))) step_type = None after_end = end for step_name in TIME_STEP_NAMES: if options.get(step_name): step_type = getattr(rrule, step_name.upper()) if rrule.DAILY == step_type: after_end += timedelta(days=1) if rrule.WEEKLY == step_type: after_end += timedelta(days=7) if rrule.MONTHLY == step_type: after_end += timedelta(days=31) steps = None if step_type: begins = [t for t in rrule.rrule(step_type, dtstart=begin, until=after_end)] steps = [(begins[i], begins[i+1]) for i in range(len(begins)-1)] if steps is None: steps = [(begin, end)] return begin, end, steps
[ "def", "parse_date_range_arguments", "(", "options", ":", "dict", ",", "default_range", "=", "'last_month'", ")", "->", "(", "datetime", ",", "datetime", ",", "list", ")", ":", "begin", ",", "end", "=", "get_date_range_by_name", "(", "default_range", ")", "for", "range_name", "in", "TIME_RANGE_NAMES", ":", "if", "options", ".", "get", "(", "range_name", ")", ":", "begin", ",", "end", "=", "get_date_range_by_name", "(", "range_name", ")", "if", "options", ".", "get", "(", "'begin'", ")", ":", "t", "=", "parse", "(", "options", "[", "'begin'", "]", ",", "default", "=", "datetime", "(", "2000", ",", "1", ",", "1", ")", ")", "begin", "=", "pytz", ".", "utc", ".", "localize", "(", "t", ")", "end", "=", "now", "(", ")", "if", "options", ".", "get", "(", "'end'", ")", ":", "end", "=", "pytz", ".", "utc", ".", "localize", "(", "parse", "(", "options", "[", "'end'", "]", ",", "default", "=", "datetime", "(", "2000", ",", "1", ",", "1", ")", ")", ")", "step_type", "=", "None", "after_end", "=", "end", "for", "step_name", "in", "TIME_STEP_NAMES", ":", "if", "options", ".", "get", "(", "step_name", ")", ":", "step_type", "=", "getattr", "(", "rrule", ",", "step_name", ".", "upper", "(", ")", ")", "if", "rrule", ".", "DAILY", "==", "step_type", ":", "after_end", "+=", "timedelta", "(", "days", "=", "1", ")", "if", "rrule", ".", "WEEKLY", "==", "step_type", ":", "after_end", "+=", "timedelta", "(", "days", "=", "7", ")", "if", "rrule", ".", "MONTHLY", "==", "step_type", ":", "after_end", "+=", "timedelta", "(", "days", "=", "31", ")", "steps", "=", "None", "if", "step_type", ":", "begins", "=", "[", "t", "for", "t", "in", "rrule", ".", "rrule", "(", "step_type", ",", "dtstart", "=", "begin", ",", "until", "=", "after_end", ")", "]", "steps", "=", "[", "(", "begins", "[", "i", "]", ",", "begins", "[", "i", "+", "1", "]", ")", "for", "i", "in", "range", "(", "len", "(", "begins", ")", "-", "1", ")", "]", "if", "steps", "is", "None", ":", "steps", "=", "[", "(", "begin", ",", "end", ")", "]", "return", "begin", ",", "end", ",", "steps" ]
:param options: :param default_range: Default datetime range to return if no other selected :return: begin, end, [(begin1,end1), (begin2,end2), ...]
[ ":", "param", "options", ":", ":", "param", "default_range", ":", "Default", "datetime", "range", "to", "return", "if", "no", "other", "selected", ":", "return", ":", "begin", "end", "[", "(", "begin1", "end1", ")", "(", "begin2", "end2", ")", "...", "]" ]
python
train
40.514286
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/deserialize/__init__.py#L51-L61
def decode_value(stream): """Decode the contents of a value from a serialized stream. :param stream: Source data stream :type stream: io.BytesIO :returns: Decoded value :rtype: bytes """ length = decode_length(stream) (value,) = unpack_value(">{:d}s".format(length), stream) return value
[ "def", "decode_value", "(", "stream", ")", ":", "length", "=", "decode_length", "(", "stream", ")", "(", "value", ",", ")", "=", "unpack_value", "(", "\">{:d}s\"", ".", "format", "(", "length", ")", ",", "stream", ")", "return", "value" ]
Decode the contents of a value from a serialized stream. :param stream: Source data stream :type stream: io.BytesIO :returns: Decoded value :rtype: bytes
[ "Decode", "the", "contents", "of", "a", "value", "from", "a", "serialized", "stream", "." ]
python
train
28.545455
SheffieldML/GPy
GPy/core/gp.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/gp.py#L714-L721
def posterior_covariance_between_points(self, X1, X2): """ Computes the posterior covariance between points. :param X1: some input observations :param X2: other input observations """ return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
[ "def", "posterior_covariance_between_points", "(", "self", ",", "X1", ",", "X2", ")", ":", "return", "self", ".", "posterior", ".", "covariance_between_points", "(", "self", ".", "kern", ",", "self", ".", "X", ",", "X1", ",", "X2", ")" ]
Computes the posterior covariance between points. :param X1: some input observations :param X2: other input observations
[ "Computes", "the", "posterior", "covariance", "between", "points", "." ]
python
train
37.5
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L867-L880
def addView(self, viewType): """ Adds a new view of the inputed view type. :param viewType | <subclass of XView> :return <XView> || None """ if not viewType: return None view = viewType.createInstance(self, self.viewWidget()) self.addTab(view, view.windowTitle()) return view
[ "def", "addView", "(", "self", ",", "viewType", ")", ":", "if", "not", "viewType", ":", "return", "None", "view", "=", "viewType", ".", "createInstance", "(", "self", ",", "self", ".", "viewWidget", "(", ")", ")", "self", ".", "addTab", "(", "view", ",", "view", ".", "windowTitle", "(", ")", ")", "return", "view" ]
Adds a new view of the inputed view type. :param viewType | <subclass of XView> :return <XView> || None
[ "Adds", "a", "new", "view", "of", "the", "inputed", "view", "type", ".", ":", "param", "viewType", "|", "<subclass", "of", "XView", ">", ":", "return", "<XView", ">", "||", "None" ]
python
train
27.285714
afilipovich/gglsbl
gglsbl/storage.py
https://github.com/afilipovich/gglsbl/blob/89c4665bd6487a3689ccb6b1f3e53ff85e056103/gglsbl/storage.py#L361-L371
def dump_hash_prefix_values(self): """Export all hash prefix values. Returns a list of known hash prefix values """ q = '''SELECT distinct value from hash_prefix''' output = [] with self.get_cursor() as dbc: dbc.execute(q) output = [bytes(r[0]) for r in dbc.fetchall()] return output
[ "def", "dump_hash_prefix_values", "(", "self", ")", ":", "q", "=", "'''SELECT distinct value from hash_prefix'''", "output", "=", "[", "]", "with", "self", ".", "get_cursor", "(", ")", "as", "dbc", ":", "dbc", ".", "execute", "(", "q", ")", "output", "=", "[", "bytes", "(", "r", "[", "0", "]", ")", "for", "r", "in", "dbc", ".", "fetchall", "(", ")", "]", "return", "output" ]
Export all hash prefix values. Returns a list of known hash prefix values
[ "Export", "all", "hash", "prefix", "values", "." ]
python
train
32.181818
insightindustry/validator-collection
validator_collection/checkers.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/checkers.py#L1406-L1450
def is_domain(value, **kwargs): """Indicate whether ``value`` is a valid domain. .. caution:: This validator does not verify that ``value`` **exists** as a domain. It merely verifies that its contents *might* exist as a domain. .. note:: This validator checks to validate that ``value`` resembles a valid domain name. It is - generally - compliant with `RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and `RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges in a number of key ways: * Including authentication (e.g. ``username:[email protected]``) will fail validation. * Including a path (e.g. ``domain.dev/path/to/file``) will fail validation. * Including a port (e.g. ``domain.dev:8080``) will fail validation. If you are hoping to validate a more complete URL, we recommend that you see :func:`url <validator_collection.validators.url>`. :param value: The value to evaluate. :param allow_ips: If ``True``, will succeed when validating IP addresses, If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``. :type allow_ips: :class:`bool <python:bool>` :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator """ try: value = validators.domain(value, **kwargs) except SyntaxError as error: raise error except Exception: return False return True
[ "def", "is_domain", "(", "value", ",", "*", "*", "kwargs", ")", ":", "try", ":", "value", "=", "validators", ".", "domain", "(", "value", ",", "*", "*", "kwargs", ")", "except", "SyntaxError", "as", "error", ":", "raise", "error", "except", "Exception", ":", "return", "False", "return", "True" ]
Indicate whether ``value`` is a valid domain. .. caution:: This validator does not verify that ``value`` **exists** as a domain. It merely verifies that its contents *might* exist as a domain. .. note:: This validator checks to validate that ``value`` resembles a valid domain name. It is - generally - compliant with `RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and `RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges in a number of key ways: * Including authentication (e.g. ``username:[email protected]``) will fail validation. * Including a path (e.g. ``domain.dev/path/to/file``) will fail validation. * Including a port (e.g. ``domain.dev:8080``) will fail validation. If you are hoping to validate a more complete URL, we recommend that you see :func:`url <validator_collection.validators.url>`. :param value: The value to evaluate. :param allow_ips: If ``True``, will succeed when validating IP addresses, If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``. :type allow_ips: :class:`bool <python:bool>` :returns: ``True`` if ``value`` is valid, ``False`` if it is not. :rtype: :class:`bool <python:bool>` :raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates keyword parameters passed to the underlying validator
[ "Indicate", "whether", "value", "is", "a", "valid", "domain", "." ]
python
train
36.088889
Nukesor/pueue
pueue/daemon/daemon.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/daemon.py#L308-L317
def pipe_to_process(self, payload): """Send something to stdin of a specific process.""" message = payload['input'] key = payload['key'] if not self.process_handler.is_running(key): return {'message': 'No running process for this key', 'status': 'error'} self.process_handler.send_to_process(message, key) return {'message': 'Message sent', 'status': 'success'}
[ "def", "pipe_to_process", "(", "self", ",", "payload", ")", ":", "message", "=", "payload", "[", "'input'", "]", "key", "=", "payload", "[", "'key'", "]", "if", "not", "self", ".", "process_handler", ".", "is_running", "(", "key", ")", ":", "return", "{", "'message'", ":", "'No running process for this key'", ",", "'status'", ":", "'error'", "}", "self", ".", "process_handler", ".", "send_to_process", "(", "message", ",", "key", ")", "return", "{", "'message'", ":", "'Message sent'", ",", "'status'", ":", "'success'", "}" ]
Send something to stdin of a specific process.
[ "Send", "something", "to", "stdin", "of", "a", "specific", "process", "." ]
python
train
44.8
ns1/ns1-python
ns1/zones.py
https://github.com/ns1/ns1-python/blob/f3e1d90a3b76a1bd18f855f2c622a8a49d4b585e/ns1/zones.py#L67-L74
def search(self, q=None, has_geo=False, callback=None, errback=None): """ Search within a zone for specific metadata. Zone must already be loaded. """ if not self.data: raise ZoneException('zone not loaded') return self._rest.search(self.zone, q, has_geo, callback, errback)
[ "def", "search", "(", "self", ",", "q", "=", "None", ",", "has_geo", "=", "False", ",", "callback", "=", "None", ",", "errback", "=", "None", ")", ":", "if", "not", "self", ".", "data", ":", "raise", "ZoneException", "(", "'zone not loaded'", ")", "return", "self", ".", "_rest", ".", "search", "(", "self", ".", "zone", ",", "q", ",", "has_geo", ",", "callback", ",", "errback", ")" ]
Search within a zone for specific metadata. Zone must already be loaded.
[ "Search", "within", "a", "zone", "for", "specific", "metadata", ".", "Zone", "must", "already", "be", "loaded", "." ]
python
train
40
gboeing/osmnx
osmnx/plot.py
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/plot.py#L763-L809
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None): """ Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with attributes. Parameters ---------- edge : GeoSeries a row from the gdf_edges GeoDataFrame edge_color : string color of the edge lines edge_width : numeric width of the edge lines edge_opacity : numeric opacity of the edge lines popup_attribute : string edge attribute to display in a pop-up when an edge is clicked, if None, no popup Returns ------- pl : folium.PolyLine """ # check if we were able to import folium successfully if not folium: raise ImportError('The folium package must be installed to use this optional feature.') # locations is a list of points for the polyline # folium takes coords in lat,lon but geopandas provides them in lon,lat # so we have to flip them around locations = list([(lat, lon) for lon, lat in edge['geometry'].coords]) # if popup_attribute is None, then create no pop-up if popup_attribute is None: popup = None else: # folium doesn't interpret html in the html argument (weird), so can't # do newlines without an iframe popup_text = json.dumps(edge[popup_attribute]) popup = folium.Popup(html=popup_text) # create a folium polyline with attributes pl = folium.PolyLine(locations=locations, popup=popup, color=edge_color, weight=edge_width, opacity=edge_opacity) return pl
[ "def", "make_folium_polyline", "(", "edge", ",", "edge_color", ",", "edge_width", ",", "edge_opacity", ",", "popup_attribute", "=", "None", ")", ":", "# check if we were able to import folium successfully", "if", "not", "folium", ":", "raise", "ImportError", "(", "'The folium package must be installed to use this optional feature.'", ")", "# locations is a list of points for the polyline", "# folium takes coords in lat,lon but geopandas provides them in lon,lat", "# so we have to flip them around", "locations", "=", "list", "(", "[", "(", "lat", ",", "lon", ")", "for", "lon", ",", "lat", "in", "edge", "[", "'geometry'", "]", ".", "coords", "]", ")", "# if popup_attribute is None, then create no pop-up", "if", "popup_attribute", "is", "None", ":", "popup", "=", "None", "else", ":", "# folium doesn't interpret html in the html argument (weird), so can't", "# do newlines without an iframe", "popup_text", "=", "json", ".", "dumps", "(", "edge", "[", "popup_attribute", "]", ")", "popup", "=", "folium", ".", "Popup", "(", "html", "=", "popup_text", ")", "# create a folium polyline with attributes", "pl", "=", "folium", ".", "PolyLine", "(", "locations", "=", "locations", ",", "popup", "=", "popup", ",", "color", "=", "edge_color", ",", "weight", "=", "edge_width", ",", "opacity", "=", "edge_opacity", ")", "return", "pl" ]
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with attributes. Parameters ---------- edge : GeoSeries a row from the gdf_edges GeoDataFrame edge_color : string color of the edge lines edge_width : numeric width of the edge lines edge_opacity : numeric opacity of the edge lines popup_attribute : string edge attribute to display in a pop-up when an edge is clicked, if None, no popup Returns ------- pl : folium.PolyLine
[ "Turn", "a", "row", "from", "the", "gdf_edges", "GeoDataFrame", "into", "a", "folium", "PolyLine", "with", "attributes", "." ]
python
train
33.234043
SeattleTestbed/seash
seash_helper.py
https://github.com/SeattleTestbed/seash/blob/40f9d2285662ff8b61e0468b4196acee089b273b/seash_helper.py#L909-L1044
def print_vessel_errors(retdict): """ <Purpose> Prints out any errors that occurred while performing an action on vessels, in a human readable way. Errors will be printed out in the following format: description [reason] Affected vessels: nodelist To define a new error, add the following entry to ERROR_RESPONSES in this function: 'error_identifier': { 'error': 'description for the error', 'reason': 'reason for the error' (optional). 'error_identifier' This is the substring of the error that can be used to identify it. Longer identifiers will have a higher priority over shorter identifiers. For example, authentication errors could be identified using the string 'Insufficient Permissions'. 'error' This is where you put the description for the error to show to the user. 'reason' (optional) This is where you put clarification for the error to ease the user. Additionally, you may put things that they could do to fix the error here, if applicable. If you don't want to show a reason, don't include this key in the dictionary. Examples when you would not put a reason is if you received a timeout, since the user can't do anything to fix them. <Arguments> retdict: A list of longnames mapped against a tuple (Success?, Message/Errortext). <Side Effects> Prints error messages onto the screen. See documentation for ERROR_RESPONSES for more information. <Exceptions> Exception <Return> None """ ERROR_RESPONSES = { "Node Manager error 'Insufficient Permissions'": { 'error': "You lack sufficient permissions to perform this action.", 'reason': "Did you release the resource(s) by accident?"}, 'timed out': { 'error':'Connection timed out.'}, "file not found": { 'error': "The specified file(s) could not be found.", 'reason': "Please check the filename."}, "Node Manager error 'Programming language platform is not supported.'": { 'error': "Requested platform is not supported by the target vessel."}, } # A dictionary mapping error identifiers to a list of vessels that share # that error. error_longnames = {} for longname in retdict: # if the first item is true, then there is no error. if not retdict[longname][0]: matches = [] # Loop until we find the response for error_string in ERROR_RESPONSES: if error_string.lower() in retdict[longname][1].lower(): # This is the first match if not matches: matches = [error_string] else: # This is a better match, forget about the previous matches if len(error_string) > len(matches[0]): matches = [error_string] elif len(error_string) == len(matches[0]): matches.append(error_string) # If there isn't a match, use the error string as an error identifier if not matches: errorid = retdict[longname][1] else: # There should not be more than 1 match for any error. # If there is, log the error to a file. if len(matches) != 1: errfile = open('seasherrors.txt', 'a') errorstring = "Multiple matches with same priority:" + '\n'.join(matches) errfile.write(errorstring) errfile.close() raise Exception(errorstring) errorid = matches[0] # Create the longname list if it doesn't already exist if errorid not in error_longnames: error_longnames[errorid] = [] error_longnames[errorid].append(longname) # Print the errors out for errorid in error_longnames: # Represent the list of nodes in a human readable way. nodestring = '' for node in error_longnames[errorid]: # This is the first node if node == error_longnames[errorid][0]: divider = '' # This is a node in the middle elif node != error_longnames[errorid][-1]: divider = ', ' # This is the last node else: # We will always have at least 2 nodes at this point, since if there # is only one node, it will be treated as the first node. Therefore, # we only have two cases, where there are exactly 2 nodes, or more than # 2 nodes. # If we have two nodes, we want: "node_1 and node_2". # Otherwise, we want: "node_1, node_2, ..., and node_n". divider = " and " if len(error_longnames[errorid]) > 2: divider = ',' + divider nodestring += divider + node if errorid in ERROR_RESPONSES: print ERROR_RESPONSES[errorid]['error'], if 'reason' in ERROR_RESPONSES[errorid]: print ERROR_RESPONSES[errorid]['reason'] else: # Caret is still on the same line as the list of nodes print else: # Unknown error. print "An error occurred: " + errorid print "Affected vessels:", nodestring + '.'
[ "def", "print_vessel_errors", "(", "retdict", ")", ":", "ERROR_RESPONSES", "=", "{", "\"Node Manager error 'Insufficient Permissions'\"", ":", "{", "'error'", ":", "\"You lack sufficient permissions to perform this action.\"", ",", "'reason'", ":", "\"Did you release the resource(s) by accident?\"", "}", ",", "'timed out'", ":", "{", "'error'", ":", "'Connection timed out.'", "}", ",", "\"file not found\"", ":", "{", "'error'", ":", "\"The specified file(s) could not be found.\"", ",", "'reason'", ":", "\"Please check the filename.\"", "}", ",", "\"Node Manager error 'Programming language platform is not supported.'\"", ":", "{", "'error'", ":", "\"Requested platform is not supported by the target vessel.\"", "}", ",", "}", "# A dictionary mapping error identifiers to a list of vessels that share", "# that error.", "error_longnames", "=", "{", "}", "for", "longname", "in", "retdict", ":", "# if the first item is true, then there is no error.", "if", "not", "retdict", "[", "longname", "]", "[", "0", "]", ":", "matches", "=", "[", "]", "# Loop until we find the response", "for", "error_string", "in", "ERROR_RESPONSES", ":", "if", "error_string", ".", "lower", "(", ")", "in", "retdict", "[", "longname", "]", "[", "1", "]", ".", "lower", "(", ")", ":", "# This is the first match", "if", "not", "matches", ":", "matches", "=", "[", "error_string", "]", "else", ":", "# This is a better match, forget about the previous matches", "if", "len", "(", "error_string", ")", ">", "len", "(", "matches", "[", "0", "]", ")", ":", "matches", "=", "[", "error_string", "]", "elif", "len", "(", "error_string", ")", "==", "len", "(", "matches", "[", "0", "]", ")", ":", "matches", ".", "append", "(", "error_string", ")", "# If there isn't a match, use the error string as an error identifier", "if", "not", "matches", ":", "errorid", "=", "retdict", "[", "longname", "]", "[", "1", "]", "else", ":", "# There should not be more than 1 match for any error.", "# If there is, log the error to a file.", "if", "len", "(", "matches", ")", "!=", "1", ":", "errfile", "=", "open", "(", "'seasherrors.txt'", ",", "'a'", ")", "errorstring", "=", "\"Multiple matches with same priority:\"", "+", "'\\n'", ".", "join", "(", "matches", ")", "errfile", ".", "write", "(", "errorstring", ")", "errfile", ".", "close", "(", ")", "raise", "Exception", "(", "errorstring", ")", "errorid", "=", "matches", "[", "0", "]", "# Create the longname list if it doesn't already exist", "if", "errorid", "not", "in", "error_longnames", ":", "error_longnames", "[", "errorid", "]", "=", "[", "]", "error_longnames", "[", "errorid", "]", ".", "append", "(", "longname", ")", "# Print the errors out", "for", "errorid", "in", "error_longnames", ":", "# Represent the list of nodes in a human readable way.", "nodestring", "=", "''", "for", "node", "in", "error_longnames", "[", "errorid", "]", ":", "# This is the first node", "if", "node", "==", "error_longnames", "[", "errorid", "]", "[", "0", "]", ":", "divider", "=", "''", "# This is a node in the middle", "elif", "node", "!=", "error_longnames", "[", "errorid", "]", "[", "-", "1", "]", ":", "divider", "=", "', '", "# This is the last node", "else", ":", "# We will always have at least 2 nodes at this point, since if there", "# is only one node, it will be treated as the first node. Therefore,", "# we only have two cases, where there are exactly 2 nodes, or more than", "# 2 nodes.", "# If we have two nodes, we want: \"node_1 and node_2\".", "# Otherwise, we want: \"node_1, node_2, ..., and node_n\".", "divider", "=", "\" and \"", "if", "len", "(", "error_longnames", "[", "errorid", "]", ")", ">", "2", ":", "divider", "=", "','", "+", "divider", "nodestring", "+=", "divider", "+", "node", "if", "errorid", "in", "ERROR_RESPONSES", ":", "print", "ERROR_RESPONSES", "[", "errorid", "]", "[", "'error'", "]", ",", "if", "'reason'", "in", "ERROR_RESPONSES", "[", "errorid", "]", ":", "print", "ERROR_RESPONSES", "[", "errorid", "]", "[", "'reason'", "]", "else", ":", "# Caret is still on the same line as the list of nodes", "print", "else", ":", "# Unknown error.", "print", "\"An error occurred: \"", "+", "errorid", "print", "\"Affected vessels:\"", ",", "nodestring", "+", "'.'" ]
<Purpose> Prints out any errors that occurred while performing an action on vessels, in a human readable way. Errors will be printed out in the following format: description [reason] Affected vessels: nodelist To define a new error, add the following entry to ERROR_RESPONSES in this function: 'error_identifier': { 'error': 'description for the error', 'reason': 'reason for the error' (optional). 'error_identifier' This is the substring of the error that can be used to identify it. Longer identifiers will have a higher priority over shorter identifiers. For example, authentication errors could be identified using the string 'Insufficient Permissions'. 'error' This is where you put the description for the error to show to the user. 'reason' (optional) This is where you put clarification for the error to ease the user. Additionally, you may put things that they could do to fix the error here, if applicable. If you don't want to show a reason, don't include this key in the dictionary. Examples when you would not put a reason is if you received a timeout, since the user can't do anything to fix them. <Arguments> retdict: A list of longnames mapped against a tuple (Success?, Message/Errortext). <Side Effects> Prints error messages onto the screen. See documentation for ERROR_RESPONSES for more information. <Exceptions> Exception <Return> None
[ "<Purpose", ">", "Prints", "out", "any", "errors", "that", "occurred", "while", "performing", "an", "action", "on", "vessels", "in", "a", "human", "readable", "way", "." ]
python
train
35.838235
gem/oq-engine
openquake/hazardlib/gsim/nga_east.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/nga_east.py#L683-L700
def _get_tau_vector(self, tau_mean, tau_std, imt_list): """ Gets the vector of mean and variance of tau values corresponding to the specific model and returns them as dictionaries """ self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"] self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"] t_bar = {} t_std = {} for imt in imt_list: t_bar[imt] = [] t_std[imt] = [] for mag, key in zip(self.magnitude_limits, self.tau_keys): t_bar[imt].append( TAU_EXECUTION[self.tau_model](imt, mag, tau_mean)) t_std[imt].append( TAU_EXECUTION[self.tau_model](imt, mag, tau_std)) return t_bar, t_std
[ "def", "_get_tau_vector", "(", "self", ",", "tau_mean", ",", "tau_std", ",", "imt_list", ")", ":", "self", ".", "magnitude_limits", "=", "MAG_LIMS_KEYS", "[", "self", ".", "tau_model", "]", "[", "\"mag\"", "]", "self", ".", "tau_keys", "=", "MAG_LIMS_KEYS", "[", "self", ".", "tau_model", "]", "[", "\"keys\"", "]", "t_bar", "=", "{", "}", "t_std", "=", "{", "}", "for", "imt", "in", "imt_list", ":", "t_bar", "[", "imt", "]", "=", "[", "]", "t_std", "[", "imt", "]", "=", "[", "]", "for", "mag", ",", "key", "in", "zip", "(", "self", ".", "magnitude_limits", ",", "self", ".", "tau_keys", ")", ":", "t_bar", "[", "imt", "]", ".", "append", "(", "TAU_EXECUTION", "[", "self", ".", "tau_model", "]", "(", "imt", ",", "mag", ",", "tau_mean", ")", ")", "t_std", "[", "imt", "]", ".", "append", "(", "TAU_EXECUTION", "[", "self", ".", "tau_model", "]", "(", "imt", ",", "mag", ",", "tau_std", ")", ")", "return", "t_bar", ",", "t_std" ]
Gets the vector of mean and variance of tau values corresponding to the specific model and returns them as dictionaries
[ "Gets", "the", "vector", "of", "mean", "and", "variance", "of", "tau", "values", "corresponding", "to", "the", "specific", "model", "and", "returns", "them", "as", "dictionaries" ]
python
train
42.333333
MacHu-GWU/windtalker-project
windtalker/files.py
https://github.com/MacHu-GWU/windtalker-project/blob/1dcff7c3692d5883cf1b55d1ea745723cfc6c3ce/windtalker/files.py#L45-L79
def transform(src, dst, converter, overwrite=False, stream=True, chunksize=1024**2, **kwargs): """ A file stream transform IO utility function. :param src: original file path :param dst: destination file path :param converter: binary content converter function :param overwrite: default False, :param stream: default True, if True, use stream IO mode, chunksize has to be specified. :param chunksize: default 1MB """ if not overwrite: # pragma: no cover if Path(dst).exists(): raise EnvironmentError("'%s' already exists!" % dst) with open(src, "rb") as f_input: with open(dst, "wb") as f_output: if stream: # fix chunksize to a reasonable range if chunksize > 1024 ** 2 * 10: chunksize = 1024 ** 2 * 10 elif chunksize < 1024 ** 2: chunksize = 1024 ** 2 # write file while 1: content = f_input.read(chunksize) if content: f_output.write(converter(content, **kwargs)) else: break else: # pragma: no cover f_output.write(converter(f_input.read(), **kwargs))
[ "def", "transform", "(", "src", ",", "dst", ",", "converter", ",", "overwrite", "=", "False", ",", "stream", "=", "True", ",", "chunksize", "=", "1024", "**", "2", ",", "*", "*", "kwargs", ")", ":", "if", "not", "overwrite", ":", "# pragma: no cover", "if", "Path", "(", "dst", ")", ".", "exists", "(", ")", ":", "raise", "EnvironmentError", "(", "\"'%s' already exists!\"", "%", "dst", ")", "with", "open", "(", "src", ",", "\"rb\"", ")", "as", "f_input", ":", "with", "open", "(", "dst", ",", "\"wb\"", ")", "as", "f_output", ":", "if", "stream", ":", "# fix chunksize to a reasonable range", "if", "chunksize", ">", "1024", "**", "2", "*", "10", ":", "chunksize", "=", "1024", "**", "2", "*", "10", "elif", "chunksize", "<", "1024", "**", "2", ":", "chunksize", "=", "1024", "**", "2", "# write file", "while", "1", ":", "content", "=", "f_input", ".", "read", "(", "chunksize", ")", "if", "content", ":", "f_output", ".", "write", "(", "converter", "(", "content", ",", "*", "*", "kwargs", ")", ")", "else", ":", "break", "else", ":", "# pragma: no cover", "f_output", ".", "write", "(", "converter", "(", "f_input", ".", "read", "(", ")", ",", "*", "*", "kwargs", ")", ")" ]
A file stream transform IO utility function. :param src: original file path :param dst: destination file path :param converter: binary content converter function :param overwrite: default False, :param stream: default True, if True, use stream IO mode, chunksize has to be specified. :param chunksize: default 1MB
[ "A", "file", "stream", "transform", "IO", "utility", "function", "." ]
python
train
36.742857
mojaie/chorus
chorus/molutil.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L25-L34
def assign_descriptors(mol): """ Throws: RuntimeError: if minify_ring failed """ topology.recognize(mol) descriptor.assign_valence(mol) descriptor.assign_rotatable(mol) topology.minify_ring(mol) descriptor.assign_aromatic(mol)
[ "def", "assign_descriptors", "(", "mol", ")", ":", "topology", ".", "recognize", "(", "mol", ")", "descriptor", ".", "assign_valence", "(", "mol", ")", "descriptor", ".", "assign_rotatable", "(", "mol", ")", "topology", ".", "minify_ring", "(", "mol", ")", "descriptor", ".", "assign_aromatic", "(", "mol", ")" ]
Throws: RuntimeError: if minify_ring failed
[ "Throws", ":", "RuntimeError", ":", "if", "minify_ring", "failed" ]
python
train
25.7
hazelcast/hazelcast-python-client
hazelcast/cluster.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/cluster.py#L277-L289
def get_members(self, selector): """ Returns the members that satisfy the given selector. :param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members. :return: (List), List of members. """ members = [] for member in self.get_member_list(): if selector.select(member): members.append(member) return members
[ "def", "get_members", "(", "self", ",", "selector", ")", ":", "members", "=", "[", "]", "for", "member", "in", "self", ".", "get_member_list", "(", ")", ":", "if", "selector", ".", "select", "(", "member", ")", ":", "members", ".", "append", "(", "member", ")", "return", "members" ]
Returns the members that satisfy the given selector. :param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members. :return: (List), List of members.
[ "Returns", "the", "members", "that", "satisfy", "the", "given", "selector", "." ]
python
train
32.692308
ontio/ontology-python-sdk
ontology/account/account.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/account/account.py#L229-L245
def get_private_key_from_wif(wif: str) -> bytes: """ This interface is used to decode a WIF encode ECDSA private key. :param wif: a WIF encode private key. :return: a ECDSA private key in the form of bytes. """ if wif is None or wif is "": raise Exception("none wif") data = base58.b58decode(wif) if len(data) != 38 or data[0] != 0x80 or data[33] != 0x01: raise Exception("wif wrong") checksum = Digest.hash256(data[0:34]) for i in range(4): if data[len(data) - 4 + i] != checksum[i]: raise Exception("wif wrong") return data[1:33]
[ "def", "get_private_key_from_wif", "(", "wif", ":", "str", ")", "->", "bytes", ":", "if", "wif", "is", "None", "or", "wif", "is", "\"\"", ":", "raise", "Exception", "(", "\"none wif\"", ")", "data", "=", "base58", ".", "b58decode", "(", "wif", ")", "if", "len", "(", "data", ")", "!=", "38", "or", "data", "[", "0", "]", "!=", "0x80", "or", "data", "[", "33", "]", "!=", "0x01", ":", "raise", "Exception", "(", "\"wif wrong\"", ")", "checksum", "=", "Digest", ".", "hash256", "(", "data", "[", "0", ":", "34", "]", ")", "for", "i", "in", "range", "(", "4", ")", ":", "if", "data", "[", "len", "(", "data", ")", "-", "4", "+", "i", "]", "!=", "checksum", "[", "i", "]", ":", "raise", "Exception", "(", "\"wif wrong\"", ")", "return", "data", "[", "1", ":", "33", "]" ]
This interface is used to decode a WIF encode ECDSA private key. :param wif: a WIF encode private key. :return: a ECDSA private key in the form of bytes.
[ "This", "interface", "is", "used", "to", "decode", "a", "WIF", "encode", "ECDSA", "private", "key", "." ]
python
train
38.588235
bcbio/bcbio-nextgen
bcbio/variation/germline.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/germline.py#L91-L123
def filter_to_pass_and_reject(in_file, paired, out_dir=None): """Filter VCF to only those with a strict PASS/REJECT: somatic + germline. Removes low quality calls filtered but also labeled with REJECT. """ from bcbio.heterogeneity import bubbletree out_file = "%s-prfilter.vcf.gz" % utils.splitext_plus(in_file)[0] if out_dir: out_file = os.path.join(out_dir, os.path.basename(out_file)) if not utils.file_uptodate(out_file, in_file): with file_transaction(paired.tumor_data, out_file) as tx_out_file: max_depth = bubbletree.max_normal_germline_depth(in_file, bubbletree.PARAMS, paired) tx_out_plain = tx_out_file.replace(".vcf.gz", ".vcf") with contextlib.closing(cyvcf2.VCF(in_file)) as reader: reader = _add_db_to_header(reader) with contextlib.closing(cyvcf2.Writer(tx_out_plain, reader)) as writer: for rec in reader: filters = rec.FILTER.split(";") if rec.FILTER else [] other_filters = [x for x in filters if x not in ["PASS", ".", "REJECT"]] if len(other_filters) == 0 or bubbletree.is_info_germline(rec): # Germline, check if we should include based on frequencies if "REJECT" in filters or bubbletree.is_info_germline(rec): stats = bubbletree._is_possible_loh(rec, reader, bubbletree.PARAMS, paired, use_status=True, max_normal_depth=max_depth) if stats: rec.FILTER = "PASS" rec.INFO["DB"] = True writer.write_record(rec) # Somatic, always include else: writer.write_record(rec) vcfutils.bgzip_and_index(tx_out_plain, paired.tumor_data["config"]) return out_file
[ "def", "filter_to_pass_and_reject", "(", "in_file", ",", "paired", ",", "out_dir", "=", "None", ")", ":", "from", "bcbio", ".", "heterogeneity", "import", "bubbletree", "out_file", "=", "\"%s-prfilter.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "in_file", ")", "[", "0", "]", "if", "out_dir", ":", "out_file", "=", "os", ".", "path", ".", "join", "(", "out_dir", ",", "os", ".", "path", ".", "basename", "(", "out_file", ")", ")", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "in_file", ")", ":", "with", "file_transaction", "(", "paired", ".", "tumor_data", ",", "out_file", ")", "as", "tx_out_file", ":", "max_depth", "=", "bubbletree", ".", "max_normal_germline_depth", "(", "in_file", ",", "bubbletree", ".", "PARAMS", ",", "paired", ")", "tx_out_plain", "=", "tx_out_file", ".", "replace", "(", "\".vcf.gz\"", ",", "\".vcf\"", ")", "with", "contextlib", ".", "closing", "(", "cyvcf2", ".", "VCF", "(", "in_file", ")", ")", "as", "reader", ":", "reader", "=", "_add_db_to_header", "(", "reader", ")", "with", "contextlib", ".", "closing", "(", "cyvcf2", ".", "Writer", "(", "tx_out_plain", ",", "reader", ")", ")", "as", "writer", ":", "for", "rec", "in", "reader", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "if", "rec", ".", "FILTER", "else", "[", "]", "other_filters", "=", "[", "x", "for", "x", "in", "filters", "if", "x", "not", "in", "[", "\"PASS\"", ",", "\".\"", ",", "\"REJECT\"", "]", "]", "if", "len", "(", "other_filters", ")", "==", "0", "or", "bubbletree", ".", "is_info_germline", "(", "rec", ")", ":", "# Germline, check if we should include based on frequencies", "if", "\"REJECT\"", "in", "filters", "or", "bubbletree", ".", "is_info_germline", "(", "rec", ")", ":", "stats", "=", "bubbletree", ".", "_is_possible_loh", "(", "rec", ",", "reader", ",", "bubbletree", ".", "PARAMS", ",", "paired", ",", "use_status", "=", "True", ",", "max_normal_depth", "=", "max_depth", ")", "if", "stats", ":", "rec", ".", "FILTER", "=", "\"PASS\"", "rec", ".", "INFO", "[", "\"DB\"", "]", "=", "True", "writer", ".", "write_record", "(", "rec", ")", "# Somatic, always include", "else", ":", "writer", ".", "write_record", "(", "rec", ")", "vcfutils", ".", "bgzip_and_index", "(", "tx_out_plain", ",", "paired", ".", "tumor_data", "[", "\"config\"", "]", ")", "return", "out_file" ]
Filter VCF to only those with a strict PASS/REJECT: somatic + germline. Removes low quality calls filtered but also labeled with REJECT.
[ "Filter", "VCF", "to", "only", "those", "with", "a", "strict", "PASS", "/", "REJECT", ":", "somatic", "+", "germline", "." ]
python
train
61.969697
idlesign/django-sitetree
sitetree/templatetags/sitetree.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/templatetags/sitetree.py#L12-L35
def sitetree_tree(parser, token): """Parses sitetree tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_tree from "mytree" %} Used to render tree for "mytree" site tree. 2. Four arguments: {% sitetree_tree from "mytree" template "sitetree/mytree.html" %} Used to render tree for "mytree" site tree using specific template "sitetree/mytree.html" """ tokens = token.split_contents() use_template = detect_clause(parser, 'template', tokens) tokens_num = len(tokens) if tokens_num in (3, 5): tree_alias = parser.compile_filter(tokens[2]) return sitetree_treeNode(tree_alias, use_template) else: raise template.TemplateSyntaxError( '%r tag requires two arguments. E.g. {%% sitetree_tree from "mytree" %%}.' % tokens[0])
[ "def", "sitetree_tree", "(", "parser", ",", "token", ")", ":", "tokens", "=", "token", ".", "split_contents", "(", ")", "use_template", "=", "detect_clause", "(", "parser", ",", "'template'", ",", "tokens", ")", "tokens_num", "=", "len", "(", "tokens", ")", "if", "tokens_num", "in", "(", "3", ",", "5", ")", ":", "tree_alias", "=", "parser", ".", "compile_filter", "(", "tokens", "[", "2", "]", ")", "return", "sitetree_treeNode", "(", "tree_alias", ",", "use_template", ")", "else", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "'%r tag requires two arguments. E.g. {%% sitetree_tree from \"mytree\" %%}.'", "%", "tokens", "[", "0", "]", ")" ]
Parses sitetree tag parameters. Two notation types are possible: 1. Two arguments: {% sitetree_tree from "mytree" %} Used to render tree for "mytree" site tree. 2. Four arguments: {% sitetree_tree from "mytree" template "sitetree/mytree.html" %} Used to render tree for "mytree" site tree using specific template "sitetree/mytree.html"
[ "Parses", "sitetree", "tag", "parameters", "." ]
python
test
35.916667
secdev/scapy
scapy/layers/tls/session.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/session.py#L699-L710
def compute_tls13_resumption_secret(self): """ self.handshake_messages should be ClientHello...ClientFinished. """ if self.connection_end == "server": hkdf = self.prcs.hkdf elif self.connection_end == "client": hkdf = self.pwcs.hkdf rs = hkdf.derive_secret(self.tls13_master_secret, b"resumption master secret", b"".join(self.handshake_messages)) self.tls13_derived_secrets["resumption_secret"] = rs
[ "def", "compute_tls13_resumption_secret", "(", "self", ")", ":", "if", "self", ".", "connection_end", "==", "\"server\"", ":", "hkdf", "=", "self", ".", "prcs", ".", "hkdf", "elif", "self", ".", "connection_end", "==", "\"client\"", ":", "hkdf", "=", "self", ".", "pwcs", ".", "hkdf", "rs", "=", "hkdf", ".", "derive_secret", "(", "self", ".", "tls13_master_secret", ",", "b\"resumption master secret\"", ",", "b\"\"", ".", "join", "(", "self", ".", "handshake_messages", ")", ")", "self", ".", "tls13_derived_secrets", "[", "\"resumption_secret\"", "]", "=", "rs" ]
self.handshake_messages should be ClientHello...ClientFinished.
[ "self", ".", "handshake_messages", "should", "be", "ClientHello", "...", "ClientFinished", "." ]
python
train
44.333333
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/ext.py
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/ext.py#L40-L43
def init_app(self, app): """Flask application initialization.""" app.cli.add_command(upgrader_cmd) app.extensions['invenio-upgrader'] = self
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "app", ".", "cli", ".", "add_command", "(", "upgrader_cmd", ")", "app", ".", "extensions", "[", "'invenio-upgrader'", "]", "=", "self" ]
Flask application initialization.
[ "Flask", "application", "initialization", "." ]
python
train
40.25
Azure/azure-cli-extensions
src/express-route/azext_express_route/vendored_sdks/network_management_client.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/express-route/azext_express_route/vendored_sdks/network_management_client.py#L772-L782
def express_route_connections(self): """Instance depends on the API version: * 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>` """ api_version = self._get_api_version('express_route_connections') if api_version == '2018-08-01': from .v2018_08_01.operations import ExpressRouteConnectionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "express_route_connections", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'express_route_connections'", ")", "if", "api_version", "==", "'2018-08-01'", ":", "from", ".", "v2018_08_01", ".", "operations", "import", "ExpressRouteConnectionsOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
train
63.454545
AnthonyBloomer/daftlistings
daftlistings/listing.py
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L121-L140
def upcoming_viewings(self): """ Returns an array of upcoming viewings for a property. :return: """ upcoming_viewings = [] try: if self._data_from_search: viewings = self._data_from_search.find_all( 'div', {'class': 'smi-onview-text'}) else: viewings = [] except Exception as e: if self._debug: logging.error( "Error getting upcoming_viewings. Error message: " + e.args[0]) return for viewing in viewings: upcoming_viewings.append(viewing.text.strip()) return upcoming_viewings
[ "def", "upcoming_viewings", "(", "self", ")", ":", "upcoming_viewings", "=", "[", "]", "try", ":", "if", "self", ".", "_data_from_search", ":", "viewings", "=", "self", ".", "_data_from_search", ".", "find_all", "(", "'div'", ",", "{", "'class'", ":", "'smi-onview-text'", "}", ")", "else", ":", "viewings", "=", "[", "]", "except", "Exception", "as", "e", ":", "if", "self", ".", "_debug", ":", "logging", ".", "error", "(", "\"Error getting upcoming_viewings. Error message: \"", "+", "e", ".", "args", "[", "0", "]", ")", "return", "for", "viewing", "in", "viewings", ":", "upcoming_viewings", ".", "append", "(", "viewing", ".", "text", ".", "strip", "(", ")", ")", "return", "upcoming_viewings" ]
Returns an array of upcoming viewings for a property. :return:
[ "Returns", "an", "array", "of", "upcoming", "viewings", "for", "a", "property", ".", ":", "return", ":" ]
python
train
33.9
CZ-NIC/yangson
yangson/__main__.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/__main__.py#L34-L165
def main(ylib: str = None, path: str = None, scope: ValidationScope = ValidationScope.all, ctype: ContentType = ContentType.config, set_id: bool = False, tree: bool = False, no_types: bool = False, digest: bool = False, validate: str = None) -> int: """Entry-point for a validation script. Args: ylib: Name of the file with YANG library path: Colon-separated list of directories to search for YANG modules. scope: Validation scope (syntax, semantics or all). ctype: Content type of the data instance (config, nonconfig or all) set_id: If `True`, print module set id. tree: If `True`, print schema tree. no_types: If `True`, don't print types in schema tree. digest: If `True`, print schema digest. validate: Name of file to validate against the schema. Returns: Numeric return code (0=no error, 2=YANG error, 1=other) """ if ylib is None: parser = argparse.ArgumentParser( prog="yangson", description="Validate JSON data against a YANG data model.") parser.add_argument( "-V", "--version", action="version", version=f"%(prog)s {pkg_resources.get_distribution('yangson').version}") parser.add_argument( "ylib", metavar="YLIB", help=("name of the file with description of the data model" " in JSON-encoded YANG library format [RFC 7895]")) parser.add_argument( "-p", "--path", help=("colon-separated list of directories to search" " for YANG modules")) grp = parser.add_mutually_exclusive_group() grp.add_argument( "-i", "--id", action="store_true", help="print module set id") grp.add_argument( "-t", "--tree", action="store_true", help="print schema tree as ASCII art") grp.add_argument( "-d", "--digest", action="store_true", help="print schema digest in JSON format") grp.add_argument( "-v", "--validate", metavar="INST", help="name of the file with JSON-encoded instance data") parser.add_argument( "-s", "--scope", choices=["syntax", "semantics", "all"], default="all", help="validation scope (default: %(default)s)") parser.add_argument( "-c", "--ctype", type=str, choices=["config", "nonconfig", "all"], default="config", help="content type of the data instance (default: %(default)s)") parser.add_argument( "-n", "--no-types", action="store_true", help="suppress type info in tree output") args = parser.parse_args() ylib: str = args.ylib path: Optional[str] = args.path scope = ValidationScope[args.scope] ctype = ContentType[args.ctype] set_id: bool = args.id tree: bool = args.tree no_types = args.no_types digest: bool = args.digest validate: str = args.validate try: with open(ylib, encoding="utf-8") as infile: yl = infile.read() except (FileNotFoundError, PermissionError, json.decoder.JSONDecodeError) as e: print("YANG library:", str(e), file=sys.stderr) return 1 sp = path if path else os.environ.get("YANG_MODPATH", ".") try: dm = DataModel(yl, tuple(sp.split(":"))) except BadYangLibraryData as e: print("Invalid YANG library:", str(e), file=sys.stderr) return 2 except FeaturePrerequisiteError as e: print("Unsupported pre-requisite feature:", str(e), file=sys.stderr) return 2 except MultipleImplementedRevisions as e: print("Multiple implemented revisions:", str(e), file=sys.stderr) return 2 except ModuleNotFound as e: print("Module not found:", str(e), file=sys.stderr) return 2 except ModuleNotRegistered as e: print("Module not registered:", str(e), file=sys.stderr) return 2 if set_id: print(dm.module_set_id()) return 0 if tree: print(dm.ascii_tree(no_types)) return 0 if digest: print(dm.schema_digest()) return 0 if not validate: return 0 try: with open(validate, encoding="utf-8") as infile: itxt = json.load(infile) except (FileNotFoundError, PermissionError, json.decoder.JSONDecodeError) as e: print("Instance data:", str(e), file=sys.stderr) return 1 try: i = dm.from_raw(itxt) except RawMemberError as e: print("Illegal object member:", str(e), file=sys.stderr) return 3 except RawTypeError as e: print("Invalid type:", str(e), file=sys.stderr) return 3 try: i.validate(scope, ctype) except SchemaError as e: print("Schema error:", str(e), file=sys.stderr) return 3 except SemanticError as e: print("Semantic error:", str(e), file=sys.stderr) return 3 except YangTypeError as e: print("Invalid type:", str(e), file=sys.stderr) return 3 return 0
[ "def", "main", "(", "ylib", ":", "str", "=", "None", ",", "path", ":", "str", "=", "None", ",", "scope", ":", "ValidationScope", "=", "ValidationScope", ".", "all", ",", "ctype", ":", "ContentType", "=", "ContentType", ".", "config", ",", "set_id", ":", "bool", "=", "False", ",", "tree", ":", "bool", "=", "False", ",", "no_types", ":", "bool", "=", "False", ",", "digest", ":", "bool", "=", "False", ",", "validate", ":", "str", "=", "None", ")", "->", "int", ":", "if", "ylib", "is", "None", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "\"yangson\"", ",", "description", "=", "\"Validate JSON data against a YANG data model.\"", ")", "parser", ".", "add_argument", "(", "\"-V\"", ",", "\"--version\"", ",", "action", "=", "\"version\"", ",", "version", "=", "f\"%(prog)s {pkg_resources.get_distribution('yangson').version}\"", ")", "parser", ".", "add_argument", "(", "\"ylib\"", ",", "metavar", "=", "\"YLIB\"", ",", "help", "=", "(", "\"name of the file with description of the data model\"", "\" in JSON-encoded YANG library format [RFC 7895]\"", ")", ")", "parser", ".", "add_argument", "(", "\"-p\"", ",", "\"--path\"", ",", "help", "=", "(", "\"colon-separated list of directories to search\"", "\" for YANG modules\"", ")", ")", "grp", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "grp", ".", "add_argument", "(", "\"-i\"", ",", "\"--id\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"print module set id\"", ")", "grp", ".", "add_argument", "(", "\"-t\"", ",", "\"--tree\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"print schema tree as ASCII art\"", ")", "grp", ".", "add_argument", "(", "\"-d\"", ",", "\"--digest\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"print schema digest in JSON format\"", ")", "grp", ".", "add_argument", "(", "\"-v\"", ",", "\"--validate\"", ",", "metavar", "=", "\"INST\"", ",", "help", "=", "\"name of the file with JSON-encoded instance data\"", ")", "parser", ".", "add_argument", "(", "\"-s\"", ",", "\"--scope\"", ",", "choices", "=", "[", "\"syntax\"", ",", "\"semantics\"", ",", "\"all\"", "]", ",", "default", "=", "\"all\"", ",", "help", "=", "\"validation scope (default: %(default)s)\"", ")", "parser", ".", "add_argument", "(", "\"-c\"", ",", "\"--ctype\"", ",", "type", "=", "str", ",", "choices", "=", "[", "\"config\"", ",", "\"nonconfig\"", ",", "\"all\"", "]", ",", "default", "=", "\"config\"", ",", "help", "=", "\"content type of the data instance (default: %(default)s)\"", ")", "parser", ".", "add_argument", "(", "\"-n\"", ",", "\"--no-types\"", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"suppress type info in tree output\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "ylib", ":", "str", "=", "args", ".", "ylib", "path", ":", "Optional", "[", "str", "]", "=", "args", ".", "path", "scope", "=", "ValidationScope", "[", "args", ".", "scope", "]", "ctype", "=", "ContentType", "[", "args", ".", "ctype", "]", "set_id", ":", "bool", "=", "args", ".", "id", "tree", ":", "bool", "=", "args", ".", "tree", "no_types", "=", "args", ".", "no_types", "digest", ":", "bool", "=", "args", ".", "digest", "validate", ":", "str", "=", "args", ".", "validate", "try", ":", "with", "open", "(", "ylib", ",", "encoding", "=", "\"utf-8\"", ")", "as", "infile", ":", "yl", "=", "infile", ".", "read", "(", ")", "except", "(", "FileNotFoundError", ",", "PermissionError", ",", "json", ".", "decoder", ".", "JSONDecodeError", ")", "as", "e", ":", "print", "(", "\"YANG library:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "sp", "=", "path", "if", "path", "else", "os", ".", "environ", ".", "get", "(", "\"YANG_MODPATH\"", ",", "\".\"", ")", "try", ":", "dm", "=", "DataModel", "(", "yl", ",", "tuple", "(", "sp", ".", "split", "(", "\":\"", ")", ")", ")", "except", "BadYangLibraryData", "as", "e", ":", "print", "(", "\"Invalid YANG library:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "2", "except", "FeaturePrerequisiteError", "as", "e", ":", "print", "(", "\"Unsupported pre-requisite feature:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "2", "except", "MultipleImplementedRevisions", "as", "e", ":", "print", "(", "\"Multiple implemented revisions:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "2", "except", "ModuleNotFound", "as", "e", ":", "print", "(", "\"Module not found:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "2", "except", "ModuleNotRegistered", "as", "e", ":", "print", "(", "\"Module not registered:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "2", "if", "set_id", ":", "print", "(", "dm", ".", "module_set_id", "(", ")", ")", "return", "0", "if", "tree", ":", "print", "(", "dm", ".", "ascii_tree", "(", "no_types", ")", ")", "return", "0", "if", "digest", ":", "print", "(", "dm", ".", "schema_digest", "(", ")", ")", "return", "0", "if", "not", "validate", ":", "return", "0", "try", ":", "with", "open", "(", "validate", ",", "encoding", "=", "\"utf-8\"", ")", "as", "infile", ":", "itxt", "=", "json", ".", "load", "(", "infile", ")", "except", "(", "FileNotFoundError", ",", "PermissionError", ",", "json", ".", "decoder", ".", "JSONDecodeError", ")", "as", "e", ":", "print", "(", "\"Instance data:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "1", "try", ":", "i", "=", "dm", ".", "from_raw", "(", "itxt", ")", "except", "RawMemberError", "as", "e", ":", "print", "(", "\"Illegal object member:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "3", "except", "RawTypeError", "as", "e", ":", "print", "(", "\"Invalid type:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "3", "try", ":", "i", ".", "validate", "(", "scope", ",", "ctype", ")", "except", "SchemaError", "as", "e", ":", "print", "(", "\"Schema error:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "3", "except", "SemanticError", "as", "e", ":", "print", "(", "\"Semantic error:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "3", "except", "YangTypeError", "as", "e", ":", "print", "(", "\"Invalid type:\"", ",", "str", "(", "e", ")", ",", "file", "=", "sys", ".", "stderr", ")", "return", "3", "return", "0" ]
Entry-point for a validation script. Args: ylib: Name of the file with YANG library path: Colon-separated list of directories to search for YANG modules. scope: Validation scope (syntax, semantics or all). ctype: Content type of the data instance (config, nonconfig or all) set_id: If `True`, print module set id. tree: If `True`, print schema tree. no_types: If `True`, don't print types in schema tree. digest: If `True`, print schema digest. validate: Name of file to validate against the schema. Returns: Numeric return code (0=no error, 2=YANG error, 1=other)
[ "Entry", "-", "point", "for", "a", "validation", "script", "." ]
python
train
38.674242
spyder-ide/conda-manager
conda_manager/api/download_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/download_api.py#L124-L161
def queryProxy(self, query): """Override Qt method.""" # Query is a QNetworkProxyQuery valid_proxies = [] query_scheme = query.url().scheme() query_host = query.url().host() query_scheme_host = '{0}://{1}'.format(query_scheme, query_host) proxy_servers = process_proxy_servers(self.proxy_servers) # print(proxy_servers) if proxy_servers: for key in proxy_servers: proxy_settings = proxy_servers[key] if key == 'http' and query_scheme == 'http': proxy = self._create_proxy(proxy_settings) valid_proxies.append(proxy) elif key == 'https' and query_scheme == 'https': proxy = self._create_proxy(proxy_settings) valid_proxies.append(proxy) if key == query_scheme_host: proxy = self._create_proxy(proxy_settings) valid_proxies.append(proxy) else: valid_proxies.append(QNetworkProxy(QNetworkProxy.DefaultProxy)) # print('factoy', query.url().toString()) # print(valid_proxies) # for pr in valid_proxies: # user = pr.user() # password = pr.password() # host = pr.hostName() # port = pr.port() # print(query.url(), user, password, host, port) # print('\n') return valid_proxies
[ "def", "queryProxy", "(", "self", ",", "query", ")", ":", "# Query is a QNetworkProxyQuery", "valid_proxies", "=", "[", "]", "query_scheme", "=", "query", ".", "url", "(", ")", ".", "scheme", "(", ")", "query_host", "=", "query", ".", "url", "(", ")", ".", "host", "(", ")", "query_scheme_host", "=", "'{0}://{1}'", ".", "format", "(", "query_scheme", ",", "query_host", ")", "proxy_servers", "=", "process_proxy_servers", "(", "self", ".", "proxy_servers", ")", "# print(proxy_servers)", "if", "proxy_servers", ":", "for", "key", "in", "proxy_servers", ":", "proxy_settings", "=", "proxy_servers", "[", "key", "]", "if", "key", "==", "'http'", "and", "query_scheme", "==", "'http'", ":", "proxy", "=", "self", ".", "_create_proxy", "(", "proxy_settings", ")", "valid_proxies", ".", "append", "(", "proxy", ")", "elif", "key", "==", "'https'", "and", "query_scheme", "==", "'https'", ":", "proxy", "=", "self", ".", "_create_proxy", "(", "proxy_settings", ")", "valid_proxies", ".", "append", "(", "proxy", ")", "if", "key", "==", "query_scheme_host", ":", "proxy", "=", "self", ".", "_create_proxy", "(", "proxy_settings", ")", "valid_proxies", ".", "append", "(", "proxy", ")", "else", ":", "valid_proxies", ".", "append", "(", "QNetworkProxy", "(", "QNetworkProxy", ".", "DefaultProxy", ")", ")", "# print('factoy', query.url().toString())", "# print(valid_proxies)", "# for pr in valid_proxies:", "# user = pr.user()", "# password = pr.password()", "# host = pr.hostName()", "# port = pr.port()", "# print(query.url(), user, password, host, port)", "# print('\\n')", "return", "valid_proxies" ]
Override Qt method.
[ "Override", "Qt", "method", "." ]
python
train
37.236842
ambitioninc/rabbitmq-admin
rabbitmq_admin/api.py
https://github.com/ambitioninc/rabbitmq-admin/blob/ff65054115f19991da153f0e4f4e45e526545fea/rabbitmq_admin/api.py#L140-L149
def get_channel(self, name): """ Details about an individual channel. :param name: The channel name :type name: str """ return self._api_get('/api/channels/{0}'.format( urllib.parse.quote_plus(name) ))
[ "def", "get_channel", "(", "self", ",", "name", ")", ":", "return", "self", ".", "_api_get", "(", "'/api/channels/{0}'", ".", "format", "(", "urllib", ".", "parse", ".", "quote_plus", "(", "name", ")", ")", ")" ]
Details about an individual channel. :param name: The channel name :type name: str
[ "Details", "about", "an", "individual", "channel", "." ]
python
train
26.1
rwl/pylon
pylon/dyn.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/dyn.py#L132-L198
def generatorInit(self, U0): """ Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/ electa/teaching/matdyn/} for more information. @rtype: tuple @return: Initial generator conditions. """ j = 0 + 1j generators = self.dyn_generators Efd0 = zeros(len(generators)) Xgen0 = zeros((len(generators), 4)) typ1 = [g._i for g in generators if g.model == CLASSICAL] typ2 = [g._i for g in generators if g.model == FOURTH_ORDER] # Generator type 1: classical model x_tr = array([g.x_tr for g in generators]) omega0 = ones(len(typ1)) * 2 * pi * self.freq # Initial machine armature currents. Sg = array([g.p + j * g.q for g in generators]) Ia0 = conj(Sg[typ1]) / conj(U0) / self.base_mva # Initial Steady-state internal EMF. Eq_tr0 = U0[typ1] + j * x_tr * Ia0 delta0 = angle(Eq_tr0) Eq_tr0 = abs(Eq_tr0) Xgen0[typ1, :] = c_[delta0, omega0, Eq_tr0] # Generator type 2: 4th order model xd = array([g.xd for g in generators]) xq = array([g.xq for g in generators]) xd_tr = array([g.xd_tr for g in generators]) xq_tr = array([g.xq_tr for g in generators]) omega0 = ones(len(typ2)) * 2 * pi * self.freq # Initial machine armature currents. Ia0 = conj(Sg[typ2]) / conj(U0[typ2]) / self.base_mva phi0 = angle(Ia0) # Initial Steady-state internal EMF. Eq0 = U0[typ2] + j * xq * Ia0 delta0 = angle(Eq0) # Machine currents in dq frame. Id0 = -abs(Ia0) * sin(delta0 - phi0) Iq0 = abs(Ia0) * cos(delta0 - phi0) # Field voltage. Efd0[typ2] = abs(Eq0) - (xd - xq) * Id0 # Initial Transient internal EMF. Eq_tr0 = Efd0[typ2] + (xd - xd_tr) * Id0 Ed_tr0 = -(xq - xq_tr) * Iq0 Xgen0[typ2, :] = c_[delta0, omega0, Eq_tr0, Ed_tr0] # Generator type 3: # Generator type 4: return Efd0, Xgen0
[ "def", "generatorInit", "(", "self", ",", "U0", ")", ":", "j", "=", "0", "+", "1j", "generators", "=", "self", ".", "dyn_generators", "Efd0", "=", "zeros", "(", "len", "(", "generators", ")", ")", "Xgen0", "=", "zeros", "(", "(", "len", "(", "generators", ")", ",", "4", ")", ")", "typ1", "=", "[", "g", ".", "_i", "for", "g", "in", "generators", "if", "g", ".", "model", "==", "CLASSICAL", "]", "typ2", "=", "[", "g", ".", "_i", "for", "g", "in", "generators", "if", "g", ".", "model", "==", "FOURTH_ORDER", "]", "# Generator type 1: classical model", "x_tr", "=", "array", "(", "[", "g", ".", "x_tr", "for", "g", "in", "generators", "]", ")", "omega0", "=", "ones", "(", "len", "(", "typ1", ")", ")", "*", "2", "*", "pi", "*", "self", ".", "freq", "# Initial machine armature currents.", "Sg", "=", "array", "(", "[", "g", ".", "p", "+", "j", "*", "g", ".", "q", "for", "g", "in", "generators", "]", ")", "Ia0", "=", "conj", "(", "Sg", "[", "typ1", "]", ")", "/", "conj", "(", "U0", ")", "/", "self", ".", "base_mva", "# Initial Steady-state internal EMF.", "Eq_tr0", "=", "U0", "[", "typ1", "]", "+", "j", "*", "x_tr", "*", "Ia0", "delta0", "=", "angle", "(", "Eq_tr0", ")", "Eq_tr0", "=", "abs", "(", "Eq_tr0", ")", "Xgen0", "[", "typ1", ",", ":", "]", "=", "c_", "[", "delta0", ",", "omega0", ",", "Eq_tr0", "]", "# Generator type 2: 4th order model", "xd", "=", "array", "(", "[", "g", ".", "xd", "for", "g", "in", "generators", "]", ")", "xq", "=", "array", "(", "[", "g", ".", "xq", "for", "g", "in", "generators", "]", ")", "xd_tr", "=", "array", "(", "[", "g", ".", "xd_tr", "for", "g", "in", "generators", "]", ")", "xq_tr", "=", "array", "(", "[", "g", ".", "xq_tr", "for", "g", "in", "generators", "]", ")", "omega0", "=", "ones", "(", "len", "(", "typ2", ")", ")", "*", "2", "*", "pi", "*", "self", ".", "freq", "# Initial machine armature currents.", "Ia0", "=", "conj", "(", "Sg", "[", "typ2", "]", ")", "/", "conj", "(", "U0", "[", "typ2", "]", ")", "/", "self", ".", "base_mva", "phi0", "=", "angle", "(", "Ia0", ")", "# Initial Steady-state internal EMF.", "Eq0", "=", "U0", "[", "typ2", "]", "+", "j", "*", "xq", "*", "Ia0", "delta0", "=", "angle", "(", "Eq0", ")", "# Machine currents in dq frame.", "Id0", "=", "-", "abs", "(", "Ia0", ")", "*", "sin", "(", "delta0", "-", "phi0", ")", "Iq0", "=", "abs", "(", "Ia0", ")", "*", "cos", "(", "delta0", "-", "phi0", ")", "# Field voltage.", "Efd0", "[", "typ2", "]", "=", "abs", "(", "Eq0", ")", "-", "(", "xd", "-", "xq", ")", "*", "Id0", "# Initial Transient internal EMF.", "Eq_tr0", "=", "Efd0", "[", "typ2", "]", "+", "(", "xd", "-", "xd_tr", ")", "*", "Id0", "Ed_tr0", "=", "-", "(", "xq", "-", "xq_tr", ")", "*", "Iq0", "Xgen0", "[", "typ2", ",", ":", "]", "=", "c_", "[", "delta0", ",", "omega0", ",", "Eq_tr0", ",", "Ed_tr0", "]", "# Generator type 3:", "# Generator type 4:", "return", "Efd0", ",", "Xgen0" ]
Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/ electa/teaching/matdyn/} for more information. @rtype: tuple @return: Initial generator conditions.
[ "Based", "on", "GeneratorInit", ".", "m", "from", "MatDyn", "by", "Stijn", "Cole", "developed", "at", "Katholieke", "Universiteit", "Leuven", ".", "See", "U", "{", "http", ":", "//", "www", ".", "esat", ".", "kuleuven", ".", "be", "/", "electa", "/", "teaching", "/", "matdyn", "/", "}", "for", "more", "information", "." ]
python
train
30.865672
boriel/zxbasic
arch/zx48k/backend/__array.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__array.py#L127-L136
def _aloadstr(ins): ''' Loads a string value from a memory address. ''' output = _addr(ins.quad[2]) output.append('call __ILOADSTR') output.append('push hl') REQUIRES.add('loadstr.asm') return output
[ "def", "_aloadstr", "(", "ins", ")", ":", "output", "=", "_addr", "(", "ins", ".", "quad", "[", "2", "]", ")", "output", ".", "append", "(", "'call __ILOADSTR'", ")", "output", ".", "append", "(", "'push hl'", ")", "REQUIRES", ".", "add", "(", "'loadstr.asm'", ")", "return", "output" ]
Loads a string value from a memory address.
[ "Loads", "a", "string", "value", "from", "a", "memory", "address", "." ]
python
train
22
nerdvegas/rez
src/rez/solver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L1039-L1057
def extract(self): """Extract a common dependency. Returns: A (_PackageScope, Requirement) tuple, containing the new scope copy with the extraction, and the extracted package range. If no package was extracted, then (self,None) is returned. """ if not self.package_request.conflict: new_slice, package_request = self.variant_slice.extract() if package_request: assert(new_slice is not self.variant_slice) scope = copy.copy(self) scope.variant_slice = new_slice if self.pr: self.pr("extracted %s from %s", package_request, self) return (scope, package_request) return (self, None)
[ "def", "extract", "(", "self", ")", ":", "if", "not", "self", ".", "package_request", ".", "conflict", ":", "new_slice", ",", "package_request", "=", "self", ".", "variant_slice", ".", "extract", "(", ")", "if", "package_request", ":", "assert", "(", "new_slice", "is", "not", "self", ".", "variant_slice", ")", "scope", "=", "copy", ".", "copy", "(", "self", ")", "scope", ".", "variant_slice", "=", "new_slice", "if", "self", ".", "pr", ":", "self", ".", "pr", "(", "\"extracted %s from %s\"", ",", "package_request", ",", "self", ")", "return", "(", "scope", ",", "package_request", ")", "return", "(", "self", ",", "None", ")" ]
Extract a common dependency. Returns: A (_PackageScope, Requirement) tuple, containing the new scope copy with the extraction, and the extracted package range. If no package was extracted, then (self,None) is returned.
[ "Extract", "a", "common", "dependency", "." ]
python
train
40.157895
CivicSpleen/ambry
ambry/valuetype/geo.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/geo.py#L115-L118
def dotted(self): """Return just the tract number, excluding the state and county, in the dotted format""" v = str(self.geoid.tract).zfill(6) return v[0:4] + '.' + v[4:]
[ "def", "dotted", "(", "self", ")", ":", "v", "=", "str", "(", "self", ".", "geoid", ".", "tract", ")", ".", "zfill", "(", "6", ")", "return", "v", "[", "0", ":", "4", "]", "+", "'.'", "+", "v", "[", "4", ":", "]" ]
Return just the tract number, excluding the state and county, in the dotted format
[ "Return", "just", "the", "tract", "number", "excluding", "the", "state", "and", "county", "in", "the", "dotted", "format" ]
python
train
47.5