repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
buildbot/buildbot
master/buildbot/worker_transition.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/worker_transition.py#L131-L150
def setupWorkerTransition(): """Hook Twisted deprecation machinery to use custom warning class for Worker API deprecation warnings.""" default_warn_method = getWarningMethod() def custom_warn_method(message, category, stacklevel): if stacklevel is not None: stacklevel += 1 if _WORKER_WARNING_MARK in message: # Message contains our mark - it's Worker API Renaming warning, # issue it appropriately. message = message.replace(_WORKER_WARNING_MARK, "") warnings.warn( DeprecatedWorkerNameWarning(message), message, stacklevel) else: # Other's warning message default_warn_method(message, category, stacklevel) setWarningMethod(custom_warn_method)
[ "def", "setupWorkerTransition", "(", ")", ":", "default_warn_method", "=", "getWarningMethod", "(", ")", "def", "custom_warn_method", "(", "message", ",", "category", ",", "stacklevel", ")", ":", "if", "stacklevel", "is", "not", "None", ":", "stacklevel", "+=", "1", "if", "_WORKER_WARNING_MARK", "in", "message", ":", "# Message contains our mark - it's Worker API Renaming warning,", "# issue it appropriately.", "message", "=", "message", ".", "replace", "(", "_WORKER_WARNING_MARK", ",", "\"\"", ")", "warnings", ".", "warn", "(", "DeprecatedWorkerNameWarning", "(", "message", ")", ",", "message", ",", "stacklevel", ")", "else", ":", "# Other's warning message", "default_warn_method", "(", "message", ",", "category", ",", "stacklevel", ")", "setWarningMethod", "(", "custom_warn_method", ")" ]
Hook Twisted deprecation machinery to use custom warning class for Worker API deprecation warnings.
[ "Hook", "Twisted", "deprecation", "machinery", "to", "use", "custom", "warning", "class", "for", "Worker", "API", "deprecation", "warnings", "." ]
python
train
38.65
ff0000/scarlet
scarlet/cms/bundles.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/bundles.py#L342-L392
def get_string_from_view(self, request, view_name, url_kwargs, render_type='string'): """ Returns a string that is a rendering of the view given a request, view_name, and the original url_kwargs. Makes the following changes the view before rendering: * Sets can_submit to False. * Adds action_url to the context. This is the url where \ this view actually lives. * Sets the default base_template to be 'cms/partial.html' This will always call GET and never POST as any actions that modify data should take place on the original url and not like this. :param request: The request object. :param view_name: The name of the view that you want. :param url_kwargs: The url keyword arguments that came \ with the request object. The view itself is responsible \ to remove arguments that would not be part of a normal match \ for that view. This is done by calling the `get_url_kwargs` \ method on the view. :param render_type: The render type to use. Defaults to \ 'string'. """ response = "" try: view, name = self.get_initialized_view_and_name(view_name, render_type=render_type, can_submit=False, base_template='cms/partial.html', request=request, kwargs=url_kwargs) if isinstance(view, URLAlias): view_name = view.get_view_name(view_name) bundle = view.get_bundle(self, url_kwargs, {}) if bundle and isinstance(bundle, Bundle): return bundle.get_string_from_view(request, view_name, url_kwargs, render_type=render_type) elif view: if view and name and view.can_view(request.user): response = self._render_view_as_string(view, name, request, url_kwargs) except http.Http404: pass return response
[ "def", "get_string_from_view", "(", "self", ",", "request", ",", "view_name", ",", "url_kwargs", ",", "render_type", "=", "'string'", ")", ":", "response", "=", "\"\"", "try", ":", "view", ",", "name", "=", "self", ".", "get_initialized_view_and_name", "(", "view_name", ",", "render_type", "=", "render_type", ",", "can_submit", "=", "False", ",", "base_template", "=", "'cms/partial.html'", ",", "request", "=", "request", ",", "kwargs", "=", "url_kwargs", ")", "if", "isinstance", "(", "view", ",", "URLAlias", ")", ":", "view_name", "=", "view", ".", "get_view_name", "(", "view_name", ")", "bundle", "=", "view", ".", "get_bundle", "(", "self", ",", "url_kwargs", ",", "{", "}", ")", "if", "bundle", "and", "isinstance", "(", "bundle", ",", "Bundle", ")", ":", "return", "bundle", ".", "get_string_from_view", "(", "request", ",", "view_name", ",", "url_kwargs", ",", "render_type", "=", "render_type", ")", "elif", "view", ":", "if", "view", "and", "name", "and", "view", ".", "can_view", "(", "request", ".", "user", ")", ":", "response", "=", "self", ".", "_render_view_as_string", "(", "view", ",", "name", ",", "request", ",", "url_kwargs", ")", "except", "http", ".", "Http404", ":", "pass", "return", "response" ]
Returns a string that is a rendering of the view given a request, view_name, and the original url_kwargs. Makes the following changes the view before rendering: * Sets can_submit to False. * Adds action_url to the context. This is the url where \ this view actually lives. * Sets the default base_template to be 'cms/partial.html' This will always call GET and never POST as any actions that modify data should take place on the original url and not like this. :param request: The request object. :param view_name: The name of the view that you want. :param url_kwargs: The url keyword arguments that came \ with the request object. The view itself is responsible \ to remove arguments that would not be part of a normal match \ for that view. This is done by calling the `get_url_kwargs` \ method on the view. :param render_type: The render type to use. Defaults to \ 'string'.
[ "Returns", "a", "string", "that", "is", "a", "rendering", "of", "the", "view", "given", "a", "request", "view_name", "and", "the", "original", "url_kwargs", ".", "Makes", "the", "following", "changes", "the", "view", "before", "rendering", ":" ]
python
train
44.254902
lsst-epo/vela
astropixie-widgets/astropixie_widgets/science.py
https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie-widgets/astropixie_widgets/science.py#L24-L41
def teff(cluster): """ Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use [Fe/H] of the cluster, if available. Returns a list of Teff values. """ b_vs, _ = cluster.stars() teffs = [] for b_v in b_vs: b_v -= cluster.eb_v if b_v > -0.04: x = (14.551 - b_v) / 3.684 else: x = (3.402 - math.sqrt(0.515 + 1.376 * b_v)) / 0.688 teffs.append(math.pow(10, x)) return teffs
[ "def", "teff", "(", "cluster", ")", ":", "b_vs", ",", "_", "=", "cluster", ".", "stars", "(", ")", "teffs", "=", "[", "]", "for", "b_v", "in", "b_vs", ":", "b_v", "-=", "cluster", ".", "eb_v", "if", "b_v", ">", "-", "0.04", ":", "x", "=", "(", "14.551", "-", "b_v", ")", "/", "3.684", "else", ":", "x", "=", "(", "3.402", "-", "math", ".", "sqrt", "(", "0.515", "+", "1.376", "*", "b_v", ")", ")", "/", "0.688", "teffs", ".", "append", "(", "math", ".", "pow", "(", "10", ",", "x", ")", ")", "return", "teffs" ]
Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use [Fe/H] of the cluster, if available. Returns a list of Teff values.
[ "Calculate", "Teff", "for", "main", "sequence", "stars", "ranging", "from", "Teff", "3500K", "-", "8000K", ".", "Use", "[", "Fe", "/", "H", "]", "of", "the", "cluster", "if", "available", "." ]
python
valid
25.888889
3ll3d00d/vibe
backend/src/analyser/common/uploadcontroller.py
https://github.com/3ll3d00d/vibe/blob/124b029f13ac746723e92cb47e9cb56edd2e54b5/backend/src/analyser/common/uploadcontroller.py#L41-L54
def loadSignal(self, name, start=None, end=None): """ Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists. """ entry = self._getCacheEntry(name) if entry is not None: from analyser.common.signal import loadSignalFromWav return loadSignalFromWav(entry['path'], start=start, end=end) else: return None
[ "def", "loadSignal", "(", "self", ",", "name", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "entry", "=", "self", ".", "_getCacheEntry", "(", "name", ")", "if", "entry", "is", "not", "None", ":", "from", "analyser", ".", "common", ".", "signal", "import", "loadSignalFromWav", "return", "loadSignalFromWav", "(", "entry", "[", "'path'", "]", ",", "start", "=", "start", ",", "end", "=", "end", ")", "else", ":", "return", "None" ]
Loads the named entry from the upload cache as a signal. :param name: the name. :param start: the time to start from in HH:mm:ss.SSS format :param end: the time to end at in HH:mm:ss.SSS format. :return: the signal if the named upload exists.
[ "Loads", "the", "named", "entry", "from", "the", "upload", "cache", "as", "a", "signal", ".", ":", "param", "name", ":", "the", "name", ".", ":", "param", "start", ":", "the", "time", "to", "start", "from", "in", "HH", ":", "mm", ":", "ss", ".", "SSS", "format", ":", "param", "end", ":", "the", "time", "to", "end", "at", "in", "HH", ":", "mm", ":", "ss", ".", "SSS", "format", ".", ":", "return", ":", "the", "signal", "if", "the", "named", "upload", "exists", "." ]
python
train
42.285714
eamigo86/graphene-django-extras
graphene_django_extras/utils.py
https://github.com/eamigo86/graphene-django-extras/blob/b27fd6b5128f6b6a500a8b7a497d76be72d6a232/graphene_django_extras/utils.py#L104-L130
def get_obj(app_label, model_name, object_id): """ Function used to get a object :param app_label: A valid Django Model or a string with format: <app_label>.<model_name> :param model_name: Key into kwargs that contains de data: new_person :param object_id: :return: instance """ try: model = apps.get_model("{}.{}".format(app_label, model_name)) assert is_valid_django_model(model), ("Model {}.{} do not exist.").format( app_label, model_name ) obj = get_Object_or_None(model, pk=object_id) return obj except model.DoesNotExist: return None except LookupError: pass except ValidationError as e: raise ValidationError(e.__str__()) except TypeError as e: raise TypeError(e.__str__()) except Exception as e: raise Exception(e.__str__())
[ "def", "get_obj", "(", "app_label", ",", "model_name", ",", "object_id", ")", ":", "try", ":", "model", "=", "apps", ".", "get_model", "(", "\"{}.{}\"", ".", "format", "(", "app_label", ",", "model_name", ")", ")", "assert", "is_valid_django_model", "(", "model", ")", ",", "(", "\"Model {}.{} do not exist.\"", ")", ".", "format", "(", "app_label", ",", "model_name", ")", "obj", "=", "get_Object_or_None", "(", "model", ",", "pk", "=", "object_id", ")", "return", "obj", "except", "model", ".", "DoesNotExist", ":", "return", "None", "except", "LookupError", ":", "pass", "except", "ValidationError", "as", "e", ":", "raise", "ValidationError", "(", "e", ".", "__str__", "(", ")", ")", "except", "TypeError", "as", "e", ":", "raise", "TypeError", "(", "e", ".", "__str__", "(", ")", ")", "except", "Exception", "as", "e", ":", "raise", "Exception", "(", "e", ".", "__str__", "(", ")", ")" ]
Function used to get a object :param app_label: A valid Django Model or a string with format: <app_label>.<model_name> :param model_name: Key into kwargs that contains de data: new_person :param object_id: :return: instance
[ "Function", "used", "to", "get", "a", "object", ":", "param", "app_label", ":", "A", "valid", "Django", "Model", "or", "a", "string", "with", "format", ":", "<app_label", ">", ".", "<model_name", ">", ":", "param", "model_name", ":", "Key", "into", "kwargs", "that", "contains", "de", "data", ":", "new_person", ":", "param", "object_id", ":", ":", "return", ":", "instance" ]
python
train
31.592593
fastai/fastai
fastai/callback.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L364-L367
def annealing_cos(start:Number, end:Number, pct:float)->Number: "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." cos_out = np.cos(np.pi * pct) + 1 return end + (start-end)/2 * cos_out
[ "def", "annealing_cos", "(", "start", ":", "Number", ",", "end", ":", "Number", ",", "pct", ":", "float", ")", "->", "Number", ":", "cos_out", "=", "np", ".", "cos", "(", "np", ".", "pi", "*", "pct", ")", "+", "1", "return", "end", "+", "(", "start", "-", "end", ")", "/", "2", "*", "cos_out" ]
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
[ "Cosine", "anneal", "from", "start", "to", "end", "as", "pct", "goes", "from", "0", ".", "0", "to", "1", ".", "0", "." ]
python
train
52.5
dnanexus/dx-toolkit
src/python/dxpy/bindings/dxapplet.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/bindings/dxapplet.py#L158-L227
def run(self, executable_input, project=None, folder=None, name=None, tags=None, properties=None, details=None, instance_type=None, stage_instance_types=None, stage_folders=None, rerun_stages=None, cluster_spec=None, depends_on=None, allow_ssh=None, debug=None, delay_workspace_destruction=None, priority=None, ignore_reuse=None, ignore_reuse_stages=None, extra_args=None, **kwargs): ''' :param executable_input: Hash of the executable's input arguments :type executable_input: dict :param project: Project ID of the project context :type project: string :param folder: Folder in which executable's outputs will be placed in *project* :type folder: string :param name: Name for the new job (default is "<name of the executable>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :param allow_ssh: List of hostname or IP masks to allow SSH connections from :type allow_ssh: list :param debug: Configuration options for job debugging :type debug: dict :param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails :type delay_workspace_destruction: boolean :param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high" :type priority: string :param ignore_reuse: Disable job reuse for this execution :type ignore_reuse: boolean :param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled :type ignore_reuse_stages: list :param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call :type extra_args: dict :returns: Object handler of the newly created job :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates a new job that executes the function "main" of this executable with the given input *executable_input*. ''' # stage_instance_types, stage_folders, and rerun_stages are # only supported for workflows, but we include them # here. Applet-based executables should detect when they # receive a truthy workflow-specific value and raise an error. run_input = self._get_run_input(executable_input, project=project, folder=folder, name=name, tags=tags, properties=properties, details=details, instance_type=instance_type, stage_instance_types=stage_instance_types, stage_folders=stage_folders, rerun_stages=rerun_stages, cluster_spec=cluster_spec, depends_on=depends_on, allow_ssh=allow_ssh, ignore_reuse=ignore_reuse, ignore_reuse_stages=ignore_reuse_stages, debug=debug, delay_workspace_destruction=delay_workspace_destruction, priority=priority, extra_args=extra_args) return self._run_impl(run_input, **kwargs)
[ "def", "run", "(", "self", ",", "executable_input", ",", "project", "=", "None", ",", "folder", "=", "None", ",", "name", "=", "None", ",", "tags", "=", "None", ",", "properties", "=", "None", ",", "details", "=", "None", ",", "instance_type", "=", "None", ",", "stage_instance_types", "=", "None", ",", "stage_folders", "=", "None", ",", "rerun_stages", "=", "None", ",", "cluster_spec", "=", "None", ",", "depends_on", "=", "None", ",", "allow_ssh", "=", "None", ",", "debug", "=", "None", ",", "delay_workspace_destruction", "=", "None", ",", "priority", "=", "None", ",", "ignore_reuse", "=", "None", ",", "ignore_reuse_stages", "=", "None", ",", "extra_args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# stage_instance_types, stage_folders, and rerun_stages are", "# only supported for workflows, but we include them", "# here. Applet-based executables should detect when they", "# receive a truthy workflow-specific value and raise an error.", "run_input", "=", "self", ".", "_get_run_input", "(", "executable_input", ",", "project", "=", "project", ",", "folder", "=", "folder", ",", "name", "=", "name", ",", "tags", "=", "tags", ",", "properties", "=", "properties", ",", "details", "=", "details", ",", "instance_type", "=", "instance_type", ",", "stage_instance_types", "=", "stage_instance_types", ",", "stage_folders", "=", "stage_folders", ",", "rerun_stages", "=", "rerun_stages", ",", "cluster_spec", "=", "cluster_spec", ",", "depends_on", "=", "depends_on", ",", "allow_ssh", "=", "allow_ssh", ",", "ignore_reuse", "=", "ignore_reuse", ",", "ignore_reuse_stages", "=", "ignore_reuse_stages", ",", "debug", "=", "debug", ",", "delay_workspace_destruction", "=", "delay_workspace_destruction", ",", "priority", "=", "priority", ",", "extra_args", "=", "extra_args", ")", "return", "self", ".", "_run_impl", "(", "run_input", ",", "*", "*", "kwargs", ")" ]
:param executable_input: Hash of the executable's input arguments :type executable_input: dict :param project: Project ID of the project context :type project: string :param folder: Folder in which executable's outputs will be placed in *project* :type folder: string :param name: Name for the new job (default is "<name of the executable>") :type name: string :param tags: Tags to associate with the job :type tags: list of strings :param properties: Properties to associate with the job :type properties: dict with string values :param details: Details to set for the job :type details: dict or list :param instance_type: Instance type on which the jobs will be run, or a dict mapping function names to instance type requests :type instance_type: string or dict :param depends_on: List of data objects or jobs to wait that need to enter the "closed" or "done" states, respectively, before the new job will be run; each element in the list can either be a dxpy handler or a string ID :type depends_on: list :param allow_ssh: List of hostname or IP masks to allow SSH connections from :type allow_ssh: list :param debug: Configuration options for job debugging :type debug: dict :param delay_workspace_destruction: Whether to keep the job's temporary workspace around for debugging purposes for 3 days after it succeeds or fails :type delay_workspace_destruction: boolean :param priority: Priority level to request for all jobs created in the execution tree, either "normal" or "high" :type priority: string :param ignore_reuse: Disable job reuse for this execution :type ignore_reuse: boolean :param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled :type ignore_reuse_stages: list :param extra_args: If provided, a hash of options that will be merged into the underlying JSON given for the API call :type extra_args: dict :returns: Object handler of the newly created job :rtype: :class:`~dxpy.bindings.dxjob.DXJob` Creates a new job that executes the function "main" of this executable with the given input *executable_input*.
[ ":", "param", "executable_input", ":", "Hash", "of", "the", "executable", "s", "input", "arguments", ":", "type", "executable_input", ":", "dict", ":", "param", "project", ":", "Project", "ID", "of", "the", "project", "context", ":", "type", "project", ":", "string", ":", "param", "folder", ":", "Folder", "in", "which", "executable", "s", "outputs", "will", "be", "placed", "in", "*", "project", "*", ":", "type", "folder", ":", "string", ":", "param", "name", ":", "Name", "for", "the", "new", "job", "(", "default", "is", "<name", "of", "the", "executable", ">", ")", ":", "type", "name", ":", "string", ":", "param", "tags", ":", "Tags", "to", "associate", "with", "the", "job", ":", "type", "tags", ":", "list", "of", "strings", ":", "param", "properties", ":", "Properties", "to", "associate", "with", "the", "job", ":", "type", "properties", ":", "dict", "with", "string", "values", ":", "param", "details", ":", "Details", "to", "set", "for", "the", "job", ":", "type", "details", ":", "dict", "or", "list", ":", "param", "instance_type", ":", "Instance", "type", "on", "which", "the", "jobs", "will", "be", "run", "or", "a", "dict", "mapping", "function", "names", "to", "instance", "type", "requests", ":", "type", "instance_type", ":", "string", "or", "dict", ":", "param", "depends_on", ":", "List", "of", "data", "objects", "or", "jobs", "to", "wait", "that", "need", "to", "enter", "the", "closed", "or", "done", "states", "respectively", "before", "the", "new", "job", "will", "be", "run", ";", "each", "element", "in", "the", "list", "can", "either", "be", "a", "dxpy", "handler", "or", "a", "string", "ID", ":", "type", "depends_on", ":", "list", ":", "param", "allow_ssh", ":", "List", "of", "hostname", "or", "IP", "masks", "to", "allow", "SSH", "connections", "from", ":", "type", "allow_ssh", ":", "list", ":", "param", "debug", ":", "Configuration", "options", "for", "job", "debugging", ":", "type", "debug", ":", "dict", ":", "param", "delay_workspace_destruction", ":", "Whether", "to", "keep", "the", "job", "s", "temporary", "workspace", "around", "for", "debugging", "purposes", "for", "3", "days", "after", "it", "succeeds", "or", "fails", ":", "type", "delay_workspace_destruction", ":", "boolean", ":", "param", "priority", ":", "Priority", "level", "to", "request", "for", "all", "jobs", "created", "in", "the", "execution", "tree", "either", "normal", "or", "high", ":", "type", "priority", ":", "string", ":", "param", "ignore_reuse", ":", "Disable", "job", "reuse", "for", "this", "execution", ":", "type", "ignore_reuse", ":", "boolean", ":", "param", "ignore_reuse_stages", ":", "Stages", "of", "a", "workflow", "(", "IDs", "names", "or", "indices", ")", "or", "*", "for", "which", "job", "reuse", "should", "be", "disabled", ":", "type", "ignore_reuse_stages", ":", "list", ":", "param", "extra_args", ":", "If", "provided", "a", "hash", "of", "options", "that", "will", "be", "merged", "into", "the", "underlying", "JSON", "given", "for", "the", "API", "call", ":", "type", "extra_args", ":", "dict", ":", "returns", ":", "Object", "handler", "of", "the", "newly", "created", "job", ":", "rtype", ":", ":", "class", ":", "~dxpy", ".", "bindings", ".", "dxjob", ".", "DXJob" ]
python
train
62.342857
openstax/cnx-easybake
cnxeasybake/oven.py
https://github.com/openstax/cnx-easybake/blob/f8edf018fb7499f6f18af0145c326b93a737a782/cnxeasybake/oven.py#L1390-L1404
def append_string(t, string): """Append a string to a node, as text or tail of last child.""" node = t.tree if string: if len(node) == 0: if node.text is not None: node.text += string else: node.text = string else: # Get last child child = list(node)[-1] if child.tail is not None: child.tail += string else: child.tail = string
[ "def", "append_string", "(", "t", ",", "string", ")", ":", "node", "=", "t", ".", "tree", "if", "string", ":", "if", "len", "(", "node", ")", "==", "0", ":", "if", "node", ".", "text", "is", "not", "None", ":", "node", ".", "text", "+=", "string", "else", ":", "node", ".", "text", "=", "string", "else", ":", "# Get last child", "child", "=", "list", "(", "node", ")", "[", "-", "1", "]", "if", "child", ".", "tail", "is", "not", "None", ":", "child", ".", "tail", "+=", "string", "else", ":", "child", ".", "tail", "=", "string" ]
Append a string to a node, as text or tail of last child.
[ "Append", "a", "string", "to", "a", "node", "as", "text", "or", "tail", "of", "last", "child", "." ]
python
train
31.133333
tanghaibao/jcvi
jcvi/formats/agp.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/agp.py#L269-L275
def getNorthSouthClone(self, i): """ Returns the adjacent clone name from both sides. """ north = self.getAdjacentClone(i, south=False) south = self.getAdjacentClone(i) return north, south
[ "def", "getNorthSouthClone", "(", "self", ",", "i", ")", ":", "north", "=", "self", ".", "getAdjacentClone", "(", "i", ",", "south", "=", "False", ")", "south", "=", "self", ".", "getAdjacentClone", "(", "i", ")", "return", "north", ",", "south" ]
Returns the adjacent clone name from both sides.
[ "Returns", "the", "adjacent", "clone", "name", "from", "both", "sides", "." ]
python
train
32.857143
Erotemic/utool
utool/util_sysreq.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L11-L44
def in_virtual_env(): """ returns True if you are running inside a python virtual environment. (DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV) sys.prefix gives the location of the virtualenv Notes: It seems IPython does not respect virtual environments properly. TODO: find a solution http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages References: http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv CommandLine: python -m utool.util_sysreq in_virtual_env Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> result = in_virtual_env() >>> print(result) """ import sys has_venv = False if hasattr(sys, 'real_prefix'): # For virtualenv module has_venv = True elif hasattr(sys, 'base_prefix'): # For venv module has_venv = sys.base_prefix != sys.prefix return has_venv
[ "def", "in_virtual_env", "(", ")", ":", "import", "sys", "has_venv", "=", "False", "if", "hasattr", "(", "sys", ",", "'real_prefix'", ")", ":", "# For virtualenv module", "has_venv", "=", "True", "elif", "hasattr", "(", "sys", ",", "'base_prefix'", ")", ":", "# For venv module", "has_venv", "=", "sys", ".", "base_prefix", "!=", "sys", ".", "prefix", "return", "has_venv" ]
returns True if you are running inside a python virtual environment. (DOES NOT WORK IF IN IPYTHON AND USING A VIRTUALENV) sys.prefix gives the location of the virtualenv Notes: It seems IPython does not respect virtual environments properly. TODO: find a solution http://stackoverflow.com/questions/7335992/ipython-and-virtualenv-ignoring-site-packages References: http://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv CommandLine: python -m utool.util_sysreq in_virtual_env Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> result = in_virtual_env() >>> print(result)
[ "returns", "True", "if", "you", "are", "running", "inside", "a", "python", "virtual", "environment", ".", "(", "DOES", "NOT", "WORK", "IF", "IN", "IPYTHON", "AND", "USING", "A", "VIRTUALENV", ")" ]
python
train
30.411765
jalanb/pysyte
pysyte/getch.py
https://github.com/jalanb/pysyte/blob/4e278101943d1ceb1a6bcaf6ddc72052ecf13114/pysyte/getch.py#L282-L292
def get_string(): """A better str(_get_keycodes()) method""" keycodes = _get_keycodes() initial_code, codes = keycodes[0], keycodes[1:] initial_char = chr(initial_code) if initial_code == 27: initial_char = '\\e' elif not ascii.isgraph(initial_char): initial_char = '\\x%x' % initial_code chars = ''.join([chr(c) for c in codes]) return ''.join((initial_char, chars))
[ "def", "get_string", "(", ")", ":", "keycodes", "=", "_get_keycodes", "(", ")", "initial_code", ",", "codes", "=", "keycodes", "[", "0", "]", ",", "keycodes", "[", "1", ":", "]", "initial_char", "=", "chr", "(", "initial_code", ")", "if", "initial_code", "==", "27", ":", "initial_char", "=", "'\\\\e'", "elif", "not", "ascii", ".", "isgraph", "(", "initial_char", ")", ":", "initial_char", "=", "'\\\\x%x'", "%", "initial_code", "chars", "=", "''", ".", "join", "(", "[", "chr", "(", "c", ")", "for", "c", "in", "codes", "]", ")", "return", "''", ".", "join", "(", "(", "initial_char", ",", "chars", ")", ")" ]
A better str(_get_keycodes()) method
[ "A", "better", "str", "(", "_get_keycodes", "()", ")", "method" ]
python
train
36.818182
google/grr
grr/core/grr_response_core/lib/fingerprint.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/fingerprint.py#L65-L98
def ConsumeRange(self, start, end): """Consumes an entire range, or part thereof. If the finger has no ranges left, or the curent range start is higher than the end of the consumed block, nothing happens. Otherwise, the current range is adjusted for the consumed block, or removed, if the entire block is consumed. For things to work, the consumed range and the current finger starts must be equal, and the length of the consumed range may not exceed the length of the current range. Args: start: Beginning of range to be consumed. end: First offset after the consumed range (end + 1). Raises: RuntimeError: if the start position of the consumed range is higher than the start of the current range in the finger, or if the consumed range cuts accross block boundaries. """ old = self.CurrentRange() if old is None: return if old.start > start: if old.start < end: raise RuntimeError('Block end too high.') return if old.start < start: raise RuntimeError('Block start too high.') if old.end == end: del self.ranges[0] elif old.end > end: self.ranges[0] = Range(end, old.end) else: raise RuntimeError('Block length exceeds range.')
[ "def", "ConsumeRange", "(", "self", ",", "start", ",", "end", ")", ":", "old", "=", "self", ".", "CurrentRange", "(", ")", "if", "old", "is", "None", ":", "return", "if", "old", ".", "start", ">", "start", ":", "if", "old", ".", "start", "<", "end", ":", "raise", "RuntimeError", "(", "'Block end too high.'", ")", "return", "if", "old", ".", "start", "<", "start", ":", "raise", "RuntimeError", "(", "'Block start too high.'", ")", "if", "old", ".", "end", "==", "end", ":", "del", "self", ".", "ranges", "[", "0", "]", "elif", "old", ".", "end", ">", "end", ":", "self", ".", "ranges", "[", "0", "]", "=", "Range", "(", "end", ",", "old", ".", "end", ")", "else", ":", "raise", "RuntimeError", "(", "'Block length exceeds range.'", ")" ]
Consumes an entire range, or part thereof. If the finger has no ranges left, or the curent range start is higher than the end of the consumed block, nothing happens. Otherwise, the current range is adjusted for the consumed block, or removed, if the entire block is consumed. For things to work, the consumed range and the current finger starts must be equal, and the length of the consumed range may not exceed the length of the current range. Args: start: Beginning of range to be consumed. end: First offset after the consumed range (end + 1). Raises: RuntimeError: if the start position of the consumed range is higher than the start of the current range in the finger, or if the consumed range cuts accross block boundaries.
[ "Consumes", "an", "entire", "range", "or", "part", "thereof", "." ]
python
train
36.911765
siznax/wptools
wptools/page.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/page.py#L607-L643
def get_query(self, show=True, proxy=None, timeout=0): """ GET MediaWiki:API action=query selected data https://en.wikipedia.org/w/api.php?action=help&modules=query Required {params}: title OR pageid - title: <str> article title - pageid: <int> Wikipedia database ID Optional arguments: - [show]: <bool> echo page data if true - [proxy]: <str> use this HTTP proxy - [timeout]: <int> timeout in seconds (0=wait forever) Data captured: - description: <str> Wikidata description (via pageterms) - extext: <str> plain text (Markdown) extract - extract: <str> HTML extract from Extension:TextExtract - image: <dict> {query-pageimage, query-thumbnail} - label: <str> Wikidata label (via pageterms) - modified (page): <str> ISO8601 date and time - pageid: <int> Wikipedia database ID - random: <str> a random article title with every request! - requests: list of request actions made - url: <str> the canonical wiki URL - url_raw: <str> ostensible raw wikitext URL - watchers: <int> number of people watching this page """ if not self.params.get('title') and not self.params.get('pageid'): raise ValueError("get_query needs title or pageid") self._get('query', show, proxy, timeout) while self.data.get('continue'): self._get('query', show, proxy, timeout) return self
[ "def", "get_query", "(", "self", ",", "show", "=", "True", ",", "proxy", "=", "None", ",", "timeout", "=", "0", ")", ":", "if", "not", "self", ".", "params", ".", "get", "(", "'title'", ")", "and", "not", "self", ".", "params", ".", "get", "(", "'pageid'", ")", ":", "raise", "ValueError", "(", "\"get_query needs title or pageid\"", ")", "self", ".", "_get", "(", "'query'", ",", "show", ",", "proxy", ",", "timeout", ")", "while", "self", ".", "data", ".", "get", "(", "'continue'", ")", ":", "self", ".", "_get", "(", "'query'", ",", "show", ",", "proxy", ",", "timeout", ")", "return", "self" ]
GET MediaWiki:API action=query selected data https://en.wikipedia.org/w/api.php?action=help&modules=query Required {params}: title OR pageid - title: <str> article title - pageid: <int> Wikipedia database ID Optional arguments: - [show]: <bool> echo page data if true - [proxy]: <str> use this HTTP proxy - [timeout]: <int> timeout in seconds (0=wait forever) Data captured: - description: <str> Wikidata description (via pageterms) - extext: <str> plain text (Markdown) extract - extract: <str> HTML extract from Extension:TextExtract - image: <dict> {query-pageimage, query-thumbnail} - label: <str> Wikidata label (via pageterms) - modified (page): <str> ISO8601 date and time - pageid: <int> Wikipedia database ID - random: <str> a random article title with every request! - requests: list of request actions made - url: <str> the canonical wiki URL - url_raw: <str> ostensible raw wikitext URL - watchers: <int> number of people watching this page
[ "GET", "MediaWiki", ":", "API", "action", "=", "query", "selected", "data", "https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "w", "/", "api", ".", "php?action", "=", "help&modules", "=", "query" ]
python
train
39.972973
incuna/django-orderable
orderable/models.py
https://github.com/incuna/django-orderable/blob/88da9c762ef0500725f95988c8f18d9b304e6951/orderable/models.py#L73-L84
def _update(qs): """ Increment the sort_order in a queryset. Handle IntegrityErrors caused by unique constraints. """ try: with transaction.atomic(): qs.update(sort_order=models.F('sort_order') + 1) except IntegrityError: for obj in qs.order_by('-sort_order'): qs.filter(pk=obj.pk).update(sort_order=models.F('sort_order') + 1)
[ "def", "_update", "(", "qs", ")", ":", "try", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "qs", ".", "update", "(", "sort_order", "=", "models", ".", "F", "(", "'sort_order'", ")", "+", "1", ")", "except", "IntegrityError", ":", "for", "obj", "in", "qs", ".", "order_by", "(", "'-sort_order'", ")", ":", "qs", ".", "filter", "(", "pk", "=", "obj", ".", "pk", ")", ".", "update", "(", "sort_order", "=", "models", ".", "F", "(", "'sort_order'", ")", "+", "1", ")" ]
Increment the sort_order in a queryset. Handle IntegrityErrors caused by unique constraints.
[ "Increment", "the", "sort_order", "in", "a", "queryset", "." ]
python
train
35.083333
DreamLab/VmShepherd
src/vmshepherd/iaas/openstack_driver.py
https://github.com/DreamLab/VmShepherd/blob/709a412c372b897d53808039c5c64a8b69c12c8d/src/vmshepherd/iaas/openstack_driver.py#L30-L52
def initialize_openstack(func): ''' Initialize and refresh openstack connection ''' async def wrap(self, *args, **kwargs): if not hasattr(self, 'auth') or not self.auth.is_token_valid(): self.auth = AuthPassword(auth_url=self.config['auth_url'], username=self.config['username'], password=self.config['password'], project_name=self.config['project_name'], user_domain_name=self.config['user_domain_name'], project_domain_name=self.config['project_domain_name']) self.nova = NovaClient(session=self.auth) self.glance = GlanceClient(session=self.auth) await self.nova.init_api(timeout=self.config.get('http_timeout', 10)) await self.glance.init_api(timeout=self.config.get('http_timeout', 10)) if not hasattr(self, 'last_init') or self.last_init < (time.time() - 60): await self.initialize() self.last_init = time.time() return await func(self, *args, **kwargs) return wrap
[ "def", "initialize_openstack", "(", "func", ")", ":", "async", "def", "wrap", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'auth'", ")", "or", "not", "self", ".", "auth", ".", "is_token_valid", "(", ")", ":", "self", ".", "auth", "=", "AuthPassword", "(", "auth_url", "=", "self", ".", "config", "[", "'auth_url'", "]", ",", "username", "=", "self", ".", "config", "[", "'username'", "]", ",", "password", "=", "self", ".", "config", "[", "'password'", "]", ",", "project_name", "=", "self", ".", "config", "[", "'project_name'", "]", ",", "user_domain_name", "=", "self", ".", "config", "[", "'user_domain_name'", "]", ",", "project_domain_name", "=", "self", ".", "config", "[", "'project_domain_name'", "]", ")", "self", ".", "nova", "=", "NovaClient", "(", "session", "=", "self", ".", "auth", ")", "self", ".", "glance", "=", "GlanceClient", "(", "session", "=", "self", ".", "auth", ")", "await", "self", ".", "nova", ".", "init_api", "(", "timeout", "=", "self", ".", "config", ".", "get", "(", "'http_timeout'", ",", "10", ")", ")", "await", "self", ".", "glance", ".", "init_api", "(", "timeout", "=", "self", ".", "config", ".", "get", "(", "'http_timeout'", ",", "10", ")", ")", "if", "not", "hasattr", "(", "self", ",", "'last_init'", ")", "or", "self", ".", "last_init", "<", "(", "time", ".", "time", "(", ")", "-", "60", ")", ":", "await", "self", ".", "initialize", "(", ")", "self", ".", "last_init", "=", "time", ".", "time", "(", ")", "return", "await", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrap" ]
Initialize and refresh openstack connection
[ "Initialize", "and", "refresh", "openstack", "connection" ]
python
train
54.26087
ranaroussi/qtpylib
qtpylib/asynctools.py
https://github.com/ranaroussi/qtpylib/blob/0dbbc465fafd9cb9b0f4d10e1e07fae4e15032dd/qtpylib/asynctools.py#L179-L189
def run(self): """Start the recurring task.""" if self.init_sec: sleep(self.init_sec) self._functime = time() while self._running: start = time() self._func() self._functime += self.interval_sec if self._functime - start > 0: sleep(self._functime - start)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "init_sec", ":", "sleep", "(", "self", ".", "init_sec", ")", "self", ".", "_functime", "=", "time", "(", ")", "while", "self", ".", "_running", ":", "start", "=", "time", "(", ")", "self", ".", "_func", "(", ")", "self", ".", "_functime", "+=", "self", ".", "interval_sec", "if", "self", ".", "_functime", "-", "start", ">", "0", ":", "sleep", "(", "self", ".", "_functime", "-", "start", ")" ]
Start the recurring task.
[ "Start", "the", "recurring", "task", "." ]
python
train
32.090909
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1367-L1377
def uniqueId(self, prefix=""): """ Generate a unique integer id (unique within the entire client session). Useful for temporary DOM ids. """ _IdCounter.count += 1 id = _IdCounter.count if prefix: return self._wrap(prefix + str(id)) else: return self._wrap(id)
[ "def", "uniqueId", "(", "self", ",", "prefix", "=", "\"\"", ")", ":", "_IdCounter", ".", "count", "+=", "1", "id", "=", "_IdCounter", ".", "count", "if", "prefix", ":", "return", "self", ".", "_wrap", "(", "prefix", "+", "str", "(", "id", ")", ")", "else", ":", "return", "self", ".", "_wrap", "(", "id", ")" ]
Generate a unique integer id (unique within the entire client session). Useful for temporary DOM ids.
[ "Generate", "a", "unique", "integer", "id", "(", "unique", "within", "the", "entire", "client", "session", ")", ".", "Useful", "for", "temporary", "DOM", "ids", "." ]
python
train
30.636364
LonamiWebs/Telethon
telethon_generator/generators/tlobject.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/generators/tlobject.py#L410-L527
def _write_arg_to_bytes(builder, arg, args, name=None): """ Writes the .__bytes__() code for the given argument :param builder: The source code builder :param arg: The argument to write :param args: All the other arguments in TLObject same __bytes__. This is required to determine the flags value :param name: The name of the argument. Defaults to "self.argname" This argument is an option because it's required when writing Vectors<> """ if arg.generic_definition: return # Do nothing, this only specifies a later type if name is None: name = 'self.{}'.format(arg.name) # The argument may be a flag, only write if it's not None AND # if it's not a True type. # True types are not actually sent, but instead only used to # determine the flags. if arg.is_flag: if arg.type == 'true': return # Exit, since True type is never written elif arg.is_vector: # Vector flags are special since they consist of 3 values, # so we need an extra join here. Note that empty vector flags # should NOT be sent either! builder.write("b'' if {0} is None or {0} is False " "else b''.join((", name) else: builder.write("b'' if {0} is None or {0} is False " "else (", name) if arg.is_vector: if arg.use_vector_id: # vector code, unsigned 0x1cb5c415 as little endian builder.write(r"b'\x15\xc4\xb5\x1c',") builder.write("struct.pack('<i', len({})),", name) # Cannot unpack the values for the outer tuple through *[( # since that's a Python >3.5 feature, so add another join. builder.write("b''.join(") # Temporary disable .is_vector, not to enter this if again # Also disable .is_flag since it's not needed per element old_flag = arg.is_flag arg.is_vector = arg.is_flag = False _write_arg_to_bytes(builder, arg, args, name='x') arg.is_vector = True arg.is_flag = old_flag builder.write(' for x in {})', name) elif arg.flag_indicator: # Calculate the flags with those items which are not None if not any(f.is_flag for f in args): # There's a flag indicator, but no flag arguments so it's 0 builder.write(r"b'\0\0\0\0'") else: builder.write("struct.pack('<I', ") builder.write( ' | '.join('(0 if {0} is None or {0} is False else {1})' .format('self.{}'.format(flag.name), 1 << flag.flag_index) for flag in args if flag.is_flag) ) builder.write(')') elif 'int' == arg.type: # struct.pack is around 4 times faster than int.to_bytes builder.write("struct.pack('<i', {})", name) elif 'long' == arg.type: builder.write("struct.pack('<q', {})", name) elif 'int128' == arg.type: builder.write("{}.to_bytes(16, 'little', signed=True)", name) elif 'int256' == arg.type: builder.write("{}.to_bytes(32, 'little', signed=True)", name) elif 'double' == arg.type: builder.write("struct.pack('<d', {})", name) elif 'string' == arg.type: builder.write('self.serialize_bytes({})', name) elif 'Bool' == arg.type: # 0x997275b5 if boolean else 0xbc799737 builder.write(r"b'\xb5ur\x99' if {} else b'7\x97y\xbc'", name) elif 'true' == arg.type: pass # These are actually NOT written! Only used for flags elif 'bytes' == arg.type: builder.write('self.serialize_bytes({})', name) elif 'date' == arg.type: # Custom format builder.write('self.serialize_datetime({})', name) else: # Else it may be a custom type builder.write('bytes({})', name) # If the type is not boxed (i.e. starts with lowercase) we should # not serialize the constructor ID (so remove its first 4 bytes). boxed = arg.type[arg.type.find('.') + 1].isupper() if not boxed: builder.write('[4:]') if arg.is_flag: builder.write(')') if arg.is_vector: builder.write(')') # We were using a tuple return True
[ "def", "_write_arg_to_bytes", "(", "builder", ",", "arg", ",", "args", ",", "name", "=", "None", ")", ":", "if", "arg", ".", "generic_definition", ":", "return", "# Do nothing, this only specifies a later type", "if", "name", "is", "None", ":", "name", "=", "'self.{}'", ".", "format", "(", "arg", ".", "name", ")", "# The argument may be a flag, only write if it's not None AND", "# if it's not a True type.", "# True types are not actually sent, but instead only used to", "# determine the flags.", "if", "arg", ".", "is_flag", ":", "if", "arg", ".", "type", "==", "'true'", ":", "return", "# Exit, since True type is never written", "elif", "arg", ".", "is_vector", ":", "# Vector flags are special since they consist of 3 values,", "# so we need an extra join here. Note that empty vector flags", "# should NOT be sent either!", "builder", ".", "write", "(", "\"b'' if {0} is None or {0} is False \"", "\"else b''.join((\"", ",", "name", ")", "else", ":", "builder", ".", "write", "(", "\"b'' if {0} is None or {0} is False \"", "\"else (\"", ",", "name", ")", "if", "arg", ".", "is_vector", ":", "if", "arg", ".", "use_vector_id", ":", "# vector code, unsigned 0x1cb5c415 as little endian", "builder", ".", "write", "(", "r\"b'\\x15\\xc4\\xb5\\x1c',\"", ")", "builder", ".", "write", "(", "\"struct.pack('<i', len({})),\"", ",", "name", ")", "# Cannot unpack the values for the outer tuple through *[(", "# since that's a Python >3.5 feature, so add another join.", "builder", ".", "write", "(", "\"b''.join(\"", ")", "# Temporary disable .is_vector, not to enter this if again", "# Also disable .is_flag since it's not needed per element", "old_flag", "=", "arg", ".", "is_flag", "arg", ".", "is_vector", "=", "arg", ".", "is_flag", "=", "False", "_write_arg_to_bytes", "(", "builder", ",", "arg", ",", "args", ",", "name", "=", "'x'", ")", "arg", ".", "is_vector", "=", "True", "arg", ".", "is_flag", "=", "old_flag", "builder", ".", "write", "(", "' for x in {})'", ",", "name", ")", "elif", "arg", ".", "flag_indicator", ":", "# Calculate the flags with those items which are not None", "if", "not", "any", "(", "f", ".", "is_flag", "for", "f", "in", "args", ")", ":", "# There's a flag indicator, but no flag arguments so it's 0", "builder", ".", "write", "(", "r\"b'\\0\\0\\0\\0'\"", ")", "else", ":", "builder", ".", "write", "(", "\"struct.pack('<I', \"", ")", "builder", ".", "write", "(", "' | '", ".", "join", "(", "'(0 if {0} is None or {0} is False else {1})'", ".", "format", "(", "'self.{}'", ".", "format", "(", "flag", ".", "name", ")", ",", "1", "<<", "flag", ".", "flag_index", ")", "for", "flag", "in", "args", "if", "flag", ".", "is_flag", ")", ")", "builder", ".", "write", "(", "')'", ")", "elif", "'int'", "==", "arg", ".", "type", ":", "# struct.pack is around 4 times faster than int.to_bytes", "builder", ".", "write", "(", "\"struct.pack('<i', {})\"", ",", "name", ")", "elif", "'long'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "\"struct.pack('<q', {})\"", ",", "name", ")", "elif", "'int128'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "\"{}.to_bytes(16, 'little', signed=True)\"", ",", "name", ")", "elif", "'int256'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "\"{}.to_bytes(32, 'little', signed=True)\"", ",", "name", ")", "elif", "'double'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "\"struct.pack('<d', {})\"", ",", "name", ")", "elif", "'string'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "'self.serialize_bytes({})'", ",", "name", ")", "elif", "'Bool'", "==", "arg", ".", "type", ":", "# 0x997275b5 if boolean else 0xbc799737", "builder", ".", "write", "(", "r\"b'\\xb5ur\\x99' if {} else b'7\\x97y\\xbc'\"", ",", "name", ")", "elif", "'true'", "==", "arg", ".", "type", ":", "pass", "# These are actually NOT written! Only used for flags", "elif", "'bytes'", "==", "arg", ".", "type", ":", "builder", ".", "write", "(", "'self.serialize_bytes({})'", ",", "name", ")", "elif", "'date'", "==", "arg", ".", "type", ":", "# Custom format", "builder", ".", "write", "(", "'self.serialize_datetime({})'", ",", "name", ")", "else", ":", "# Else it may be a custom type", "builder", ".", "write", "(", "'bytes({})'", ",", "name", ")", "# If the type is not boxed (i.e. starts with lowercase) we should", "# not serialize the constructor ID (so remove its first 4 bytes).", "boxed", "=", "arg", ".", "type", "[", "arg", ".", "type", ".", "find", "(", "'.'", ")", "+", "1", "]", ".", "isupper", "(", ")", "if", "not", "boxed", ":", "builder", ".", "write", "(", "'[4:]'", ")", "if", "arg", ".", "is_flag", ":", "builder", ".", "write", "(", "')'", ")", "if", "arg", ".", "is_vector", ":", "builder", ".", "write", "(", "')'", ")", "# We were using a tuple", "return", "True" ]
Writes the .__bytes__() code for the given argument :param builder: The source code builder :param arg: The argument to write :param args: All the other arguments in TLObject same __bytes__. This is required to determine the flags value :param name: The name of the argument. Defaults to "self.argname" This argument is an option because it's required when writing Vectors<>
[ "Writes", "the", ".", "__bytes__", "()", "code", "for", "the", "given", "argument", ":", "param", "builder", ":", "The", "source", "code", "builder", ":", "param", "arg", ":", "The", "argument", "to", "write", ":", "param", "args", ":", "All", "the", "other", "arguments", "in", "TLObject", "same", "__bytes__", ".", "This", "is", "required", "to", "determine", "the", "flags", "value", ":", "param", "name", ":", "The", "name", "of", "the", "argument", ".", "Defaults", "to", "self", ".", "argname", "This", "argument", "is", "an", "option", "because", "it", "s", "required", "when", "writing", "Vectors<", ">" ]
python
train
36.211864
anchore/anchore
anchore/cli/common.py
https://github.com/anchore/anchore/blob/8a4d5b9708e27856312d303aae3f04f3c72039d6/anchore/cli/common.py#L14-L49
def extended_help_option(extended_help=None, *param_decls, **attrs): """ Based on the click.help_option code. Adds a ``--extended-help`` option which immediately ends the program printing out the extended extended-help page. Defaults to using the callback's doc string, but can be given an explicit value as well. This is intended for use as a decorator on a command to provide a 3rd level of help verbosity suitable for use as a manpage (though not formatted as such explicitly). Like :func:`version_option`, this is implemented as eager option that prints in the callback and exits. All arguments are forwarded to :func:`option`. """ def decorator(f): def callback(ctx, param, value): if value and not ctx.resilient_parsing: if not extended_help: ctx.command.help = ctx.command.callback.__doc__ click.echo(ctx.get_help(), color=ctx.color) else: ctx.command.help = extended_help click.echo(ctx.get_help(), color=ctx.color) ctx.exit() attrs.setdefault('is_flag', True) attrs.setdefault('expose_value', False) attrs.setdefault('help', 'Show extended help content, similar to manpage, and exit.') attrs.setdefault('is_eager', True) attrs['callback'] = callback return click.option(*(param_decls or ('--extended-help',)), **attrs)(f) return decorator
[ "def", "extended_help_option", "(", "extended_help", "=", "None", ",", "*", "param_decls", ",", "*", "*", "attrs", ")", ":", "def", "decorator", "(", "f", ")", ":", "def", "callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "value", "and", "not", "ctx", ".", "resilient_parsing", ":", "if", "not", "extended_help", ":", "ctx", ".", "command", ".", "help", "=", "ctx", ".", "command", ".", "callback", ".", "__doc__", "click", ".", "echo", "(", "ctx", ".", "get_help", "(", ")", ",", "color", "=", "ctx", ".", "color", ")", "else", ":", "ctx", ".", "command", ".", "help", "=", "extended_help", "click", ".", "echo", "(", "ctx", ".", "get_help", "(", ")", ",", "color", "=", "ctx", ".", "color", ")", "ctx", ".", "exit", "(", ")", "attrs", ".", "setdefault", "(", "'is_flag'", ",", "True", ")", "attrs", ".", "setdefault", "(", "'expose_value'", ",", "False", ")", "attrs", ".", "setdefault", "(", "'help'", ",", "'Show extended help content, similar to manpage, and exit.'", ")", "attrs", ".", "setdefault", "(", "'is_eager'", ",", "True", ")", "attrs", "[", "'callback'", "]", "=", "callback", "return", "click", ".", "option", "(", "*", "(", "param_decls", "or", "(", "'--extended-help'", ",", ")", ")", ",", "*", "*", "attrs", ")", "(", "f", ")", "return", "decorator" ]
Based on the click.help_option code. Adds a ``--extended-help`` option which immediately ends the program printing out the extended extended-help page. Defaults to using the callback's doc string, but can be given an explicit value as well. This is intended for use as a decorator on a command to provide a 3rd level of help verbosity suitable for use as a manpage (though not formatted as such explicitly). Like :func:`version_option`, this is implemented as eager option that prints in the callback and exits. All arguments are forwarded to :func:`option`.
[ "Based", "on", "the", "click", ".", "help_option", "code", "." ]
python
train
40.75
nerdvegas/rez
src/rez/vendor/pygraph/algorithms/cycles.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/vendor/pygraph/algorithms/cycles.py#L38-L108
def find_cycle(graph): """ Find a cycle in the given graph. This function will return a list of nodes which form a cycle in the graph or an empty list if no cycle exists. @type graph: graph, digraph @param graph: Graph. @rtype: list @return: List of nodes. """ if (isinstance(graph, graph_class)): directed = False elif (isinstance(graph, digraph_class)): directed = True else: raise InvalidGraphType def find_cycle_to_ancestor(node, ancestor): """ Find a cycle containing both node and ancestor. """ path = [] while (node != ancestor): if (node is None): return [] path.append(node) node = spanning_tree[node] path.append(node) path.reverse() return path def dfs(node): """ Depth-first search subfunction. """ visited[node] = 1 # Explore recursively the connected component for each in graph[node]: if (cycle): return if (each not in visited): spanning_tree[each] = node dfs(each) else: if (directed or spanning_tree[node] != each): cycle.extend(find_cycle_to_ancestor(node, each)) recursionlimit = getrecursionlimit() setrecursionlimit(max(len(graph.nodes())*2,recursionlimit)) visited = {} # List for marking visited and non-visited nodes spanning_tree = {} # Spanning tree cycle = [] # Algorithm outer-loop for each in graph: # Select a non-visited node if (each not in visited): spanning_tree[each] = None # Explore node's connected component dfs(each) if (cycle): setrecursionlimit(recursionlimit) return cycle setrecursionlimit(recursionlimit) return []
[ "def", "find_cycle", "(", "graph", ")", ":", "if", "(", "isinstance", "(", "graph", ",", "graph_class", ")", ")", ":", "directed", "=", "False", "elif", "(", "isinstance", "(", "graph", ",", "digraph_class", ")", ")", ":", "directed", "=", "True", "else", ":", "raise", "InvalidGraphType", "def", "find_cycle_to_ancestor", "(", "node", ",", "ancestor", ")", ":", "\"\"\"\n Find a cycle containing both node and ancestor.\n \"\"\"", "path", "=", "[", "]", "while", "(", "node", "!=", "ancestor", ")", ":", "if", "(", "node", "is", "None", ")", ":", "return", "[", "]", "path", ".", "append", "(", "node", ")", "node", "=", "spanning_tree", "[", "node", "]", "path", ".", "append", "(", "node", ")", "path", ".", "reverse", "(", ")", "return", "path", "def", "dfs", "(", "node", ")", ":", "\"\"\"\n Depth-first search subfunction.\n \"\"\"", "visited", "[", "node", "]", "=", "1", "# Explore recursively the connected component", "for", "each", "in", "graph", "[", "node", "]", ":", "if", "(", "cycle", ")", ":", "return", "if", "(", "each", "not", "in", "visited", ")", ":", "spanning_tree", "[", "each", "]", "=", "node", "dfs", "(", "each", ")", "else", ":", "if", "(", "directed", "or", "spanning_tree", "[", "node", "]", "!=", "each", ")", ":", "cycle", ".", "extend", "(", "find_cycle_to_ancestor", "(", "node", ",", "each", ")", ")", "recursionlimit", "=", "getrecursionlimit", "(", ")", "setrecursionlimit", "(", "max", "(", "len", "(", "graph", ".", "nodes", "(", ")", ")", "*", "2", ",", "recursionlimit", ")", ")", "visited", "=", "{", "}", "# List for marking visited and non-visited nodes", "spanning_tree", "=", "{", "}", "# Spanning tree", "cycle", "=", "[", "]", "# Algorithm outer-loop", "for", "each", "in", "graph", ":", "# Select a non-visited node", "if", "(", "each", "not", "in", "visited", ")", ":", "spanning_tree", "[", "each", "]", "=", "None", "# Explore node's connected component", "dfs", "(", "each", ")", "if", "(", "cycle", ")", ":", "setrecursionlimit", "(", "recursionlimit", ")", "return", "cycle", "setrecursionlimit", "(", "recursionlimit", ")", "return", "[", "]" ]
Find a cycle in the given graph. This function will return a list of nodes which form a cycle in the graph or an empty list if no cycle exists. @type graph: graph, digraph @param graph: Graph. @rtype: list @return: List of nodes.
[ "Find", "a", "cycle", "in", "the", "given", "graph", ".", "This", "function", "will", "return", "a", "list", "of", "nodes", "which", "form", "a", "cycle", "in", "the", "graph", "or", "an", "empty", "list", "if", "no", "cycle", "exists", "." ]
python
train
27.28169
markuskiller/textblob-de
textblob_de/ext/_pattern/text/tree.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/tree.py#L1440-L1487
def parse_string(xml): """ Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern). """ string = "" # Traverse all the <sentence> elements in the XML. dom = XML(xml) for sentence in dom(XML_SENTENCE): _anchors.clear() # Populated by calling _parse_tokens(). _attachments.clear() # Populated by calling _parse_tokens(). # Parse the language from <sentence language="">. language = sentence.get(XML_LANGUAGE, "en") # Parse the token tag format from <sentence token="">. # This information is returned in TokenString.tags, # so the format and order of the token tags is retained when exporting/importing as XML. format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") # Traverse all <chunk> and <chink> elements in the sentence. # Find the <word> elements inside and create tokens. tokens = [] for chunk in sentence: tokens.extend(_parse_tokens(chunk, format)) # Attach PNP's to their anchors. # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens). # The keys correspond to the keys in _attachments, which have linked PNP chunks. if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) # Collapse the tokens to string. # Separate multiple sentences with a new line. tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" # Return a TokenString, which is a unicode string that transforms easily # into a plain str, a list of tokens, or a Sentence. try: if MBSP: from mbsp import TokenString return TokenString(string.strip(), tags=format, language=language) except: return TaggedString(string.strip(), tags=format, language=language)
[ "def", "parse_string", "(", "xml", ")", ":", "string", "=", "\"\"", "# Traverse all the <sentence> elements in the XML.", "dom", "=", "XML", "(", "xml", ")", "for", "sentence", "in", "dom", "(", "XML_SENTENCE", ")", ":", "_anchors", ".", "clear", "(", ")", "# Populated by calling _parse_tokens().", "_attachments", ".", "clear", "(", ")", "# Populated by calling _parse_tokens().", "# Parse the language from <sentence language=\"\">.", "language", "=", "sentence", ".", "get", "(", "XML_LANGUAGE", ",", "\"en\"", ")", "# Parse the token tag format from <sentence token=\"\">.", "# This information is returned in TokenString.tags,", "# so the format and order of the token tags is retained when exporting/importing as XML.", "format", "=", "sentence", ".", "get", "(", "XML_TOKEN", ",", "[", "WORD", ",", "POS", ",", "CHUNK", ",", "PNP", ",", "REL", ",", "ANCHOR", ",", "LEMMA", "]", ")", "format", "=", "not", "isinstance", "(", "format", ",", "basestring", ")", "and", "format", "or", "format", ".", "replace", "(", "\" \"", ",", "\"\"", ")", ".", "split", "(", "\",\"", ")", "# Traverse all <chunk> and <chink> elements in the sentence.", "# Find the <word> elements inside and create tokens.", "tokens", "=", "[", "]", "for", "chunk", "in", "sentence", ":", "tokens", ".", "extend", "(", "_parse_tokens", "(", "chunk", ",", "format", ")", ")", "# Attach PNP's to their anchors.", "# Keys in _anchors have linked anchor chunks (each chunk is a list of tokens).", "# The keys correspond to the keys in _attachments, which have linked PNP chunks.", "if", "ANCHOR", "in", "format", ":", "A", ",", "P", ",", "a", ",", "i", "=", "_anchors", ",", "_attachments", ",", "1", ",", "format", ".", "index", "(", "ANCHOR", ")", "for", "id", "in", "sorted", "(", "A", ".", "keys", "(", ")", ")", ":", "for", "token", "in", "A", "[", "id", "]", ":", "token", "[", "i", "]", "+=", "\"-\"", "+", "\"-\"", ".", "join", "(", "[", "\"A\"", "+", "str", "(", "a", "+", "p", ")", "for", "p", "in", "range", "(", "len", "(", "P", "[", "id", "]", ")", ")", "]", ")", "token", "[", "i", "]", "=", "token", "[", "i", "]", ".", "strip", "(", "\"O-\"", ")", "for", "p", ",", "pnp", "in", "enumerate", "(", "P", "[", "id", "]", ")", ":", "for", "token", "in", "pnp", ":", "token", "[", "i", "]", "+=", "\"-\"", "+", "\"P\"", "+", "str", "(", "a", "+", "p", ")", "token", "[", "i", "]", "=", "token", "[", "i", "]", ".", "strip", "(", "\"O-\"", ")", "a", "+=", "len", "(", "P", "[", "id", "]", ")", "# Collapse the tokens to string.", "# Separate multiple sentences with a new line.", "tokens", "=", "[", "\"/\"", ".", "join", "(", "[", "tag", "for", "tag", "in", "token", "]", ")", "for", "token", "in", "tokens", "]", "tokens", "=", "\" \"", ".", "join", "(", "tokens", ")", "string", "+=", "tokens", "+", "\"\\n\"", "# Return a TokenString, which is a unicode string that transforms easily", "# into a plain str, a list of tokens, or a Sentence.", "try", ":", "if", "MBSP", ":", "from", "mbsp", "import", "TokenString", "return", "TokenString", "(", "string", ".", "strip", "(", ")", ",", "tags", "=", "format", ",", "language", "=", "language", ")", "except", ":", "return", "TaggedString", "(", "string", ".", "strip", "(", ")", ",", "tags", "=", "format", ",", "language", "=", "language", ")" ]
Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
[ "Returns", "a", "slash", "-", "formatted", "string", "from", "the", "given", "XML", "representation", ".", "The", "return", "value", "is", "a", "TokenString", "(", "for", "MBSP", ")", "or", "TaggedString", "(", "for", "Pattern", ")", "." ]
python
train
52.375
tensorlayer/tensorlayer
tensorlayer/logging/tl_logging.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/logging/tl_logging.py#L148-L160
def _GetNextLogCountPerToken(token): """Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0) """ global _log_counter_per_token # pylint: disable=global-variable-not-assigned _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1) return _log_counter_per_token[token]
[ "def", "_GetNextLogCountPerToken", "(", "token", ")", ":", "global", "_log_counter_per_token", "# pylint: disable=global-variable-not-assigned", "_log_counter_per_token", "[", "token", "]", "=", "1", "+", "_log_counter_per_token", ".", "get", "(", "token", ",", "-", "1", ")", "return", "_log_counter_per_token", "[", "token", "]" ]
Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)
[ "Wrapper", "for", "_log_counter_per_token", "." ]
python
valid
35.076923
pavlin-policar/openTSNE
openTSNE/affinity.py
https://github.com/pavlin-policar/openTSNE/blob/28513a0d669f2f20e7b971c0c6373dc375f72771/openTSNE/affinity.py#L139-L178
def set_perplexity(self, new_perplexity): """Change the perplexity of the affinity matrix. Note that we only allow lowering the perplexity or restoring it to its original value. This restriction exists because setting a higher perplexity value requires recomputing all the nearest neighbors, which can take a long time. To avoid potential confusion as to why execution time is slow, this is not allowed. If you would like to increase the perplexity above the initial value, simply create a new instance. Parameters ---------- new_perplexity: float The new perplexity. """ # If the value hasn't changed, there's nothing to do if new_perplexity == self.perplexity: return # Verify that the perplexity isn't too large new_perplexity = self.check_perplexity(new_perplexity) # Recompute the affinity matrix k_neighbors = min(self.n_samples - 1, int(3 * new_perplexity)) if k_neighbors > self.__neighbors.shape[1]: raise RuntimeError( "The desired perplexity `%.2f` is larger than the initial one " "used. This would need to recompute the nearest neighbors, " "which is not efficient. Please create a new `%s` instance " "with the increased perplexity." % (new_perplexity, self.__class__.__name__) ) self.perplexity = new_perplexity self.P = joint_probabilities_nn( self.__neighbors[:, :k_neighbors], self.__distances[:, :k_neighbors], [self.perplexity], symmetrize=True, n_jobs=self.n_jobs, )
[ "def", "set_perplexity", "(", "self", ",", "new_perplexity", ")", ":", "# If the value hasn't changed, there's nothing to do", "if", "new_perplexity", "==", "self", ".", "perplexity", ":", "return", "# Verify that the perplexity isn't too large", "new_perplexity", "=", "self", ".", "check_perplexity", "(", "new_perplexity", ")", "# Recompute the affinity matrix", "k_neighbors", "=", "min", "(", "self", ".", "n_samples", "-", "1", ",", "int", "(", "3", "*", "new_perplexity", ")", ")", "if", "k_neighbors", ">", "self", ".", "__neighbors", ".", "shape", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"The desired perplexity `%.2f` is larger than the initial one \"", "\"used. This would need to recompute the nearest neighbors, \"", "\"which is not efficient. Please create a new `%s` instance \"", "\"with the increased perplexity.\"", "%", "(", "new_perplexity", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "perplexity", "=", "new_perplexity", "self", ".", "P", "=", "joint_probabilities_nn", "(", "self", ".", "__neighbors", "[", ":", ",", ":", "k_neighbors", "]", ",", "self", ".", "__distances", "[", ":", ",", ":", "k_neighbors", "]", ",", "[", "self", ".", "perplexity", "]", ",", "symmetrize", "=", "True", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", ")" ]
Change the perplexity of the affinity matrix. Note that we only allow lowering the perplexity or restoring it to its original value. This restriction exists because setting a higher perplexity value requires recomputing all the nearest neighbors, which can take a long time. To avoid potential confusion as to why execution time is slow, this is not allowed. If you would like to increase the perplexity above the initial value, simply create a new instance. Parameters ---------- new_perplexity: float The new perplexity.
[ "Change", "the", "perplexity", "of", "the", "affinity", "matrix", "." ]
python
train
42.7
danielperna84/pyhomematic
pyhomematic/devicetypes/generic.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/devicetypes/generic.py#L199-L211
def getValue(self, key): """ Some devices allow to directly get values for specific parameters. """ LOG.debug("HMGeneric.getValue: address = '%s', key = '%s'" % (self._ADDRESS, key)) try: returnvalue = self._proxy.getValue(self._ADDRESS, key) self._VALUES[key] = returnvalue return returnvalue except Exception as err: LOG.warning("HMGeneric.getValue: %s on %s Exception: %s", key, self._ADDRESS, err) return False
[ "def", "getValue", "(", "self", ",", "key", ")", ":", "LOG", ".", "debug", "(", "\"HMGeneric.getValue: address = '%s', key = '%s'\"", "%", "(", "self", ".", "_ADDRESS", ",", "key", ")", ")", "try", ":", "returnvalue", "=", "self", ".", "_proxy", ".", "getValue", "(", "self", ".", "_ADDRESS", ",", "key", ")", "self", ".", "_VALUES", "[", "key", "]", "=", "returnvalue", "return", "returnvalue", "except", "Exception", "as", "err", ":", "LOG", ".", "warning", "(", "\"HMGeneric.getValue: %s on %s Exception: %s\"", ",", "key", ",", "self", ".", "_ADDRESS", ",", "err", ")", "return", "False" ]
Some devices allow to directly get values for specific parameters.
[ "Some", "devices", "allow", "to", "directly", "get", "values", "for", "specific", "parameters", "." ]
python
train
41.076923
liminspace/dju-common
dju_common/validators.py
https://github.com/liminspace/dju-common/blob/c68860bb84d454a35e66275841c20f38375c2135/dju_common/validators.py#L6-L14
def validate_email_domain(email): """ Validates email domain by blacklist. """ try: domain = email.split('@', 1)[1].lower().strip() except IndexError: return if domain in dju_settings.DJU_EMAIL_DOMAIN_BLACK_LIST: raise ValidationError(_(u'Email with domain "%(domain)s" is disallowed.'), code='banned_domain', params={'domain': domain})
[ "def", "validate_email_domain", "(", "email", ")", ":", "try", ":", "domain", "=", "email", ".", "split", "(", "'@'", ",", "1", ")", "[", "1", "]", ".", "lower", "(", ")", ".", "strip", "(", ")", "except", "IndexError", ":", "return", "if", "domain", "in", "dju_settings", ".", "DJU_EMAIL_DOMAIN_BLACK_LIST", ":", "raise", "ValidationError", "(", "_", "(", "u'Email with domain \"%(domain)s\" is disallowed.'", ")", ",", "code", "=", "'banned_domain'", ",", "params", "=", "{", "'domain'", ":", "domain", "}", ")" ]
Validates email domain by blacklist.
[ "Validates", "email", "domain", "by", "blacklist", "." ]
python
train
44.222222
orb-framework/orb
orb/core/schema.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/schema.py#L478-L487
def setCollectors(self, collectors): """ Sets the collector methods that will be used for this schema. :param collectors | [<orb.Collectors>, ..] """ self.__collectors = {} for name, collector in collectors.items(): self.__collectors[name] = collector collector.setSchema(self)
[ "def", "setCollectors", "(", "self", ",", "collectors", ")", ":", "self", ".", "__collectors", "=", "{", "}", "for", "name", ",", "collector", "in", "collectors", ".", "items", "(", ")", ":", "self", ".", "__collectors", "[", "name", "]", "=", "collector", "collector", ".", "setSchema", "(", "self", ")" ]
Sets the collector methods that will be used for this schema. :param collectors | [<orb.Collectors>, ..]
[ "Sets", "the", "collector", "methods", "that", "will", "be", "used", "for", "this", "schema", ".", ":", "param", "collectors", "|", "[", "<orb", ".", "Collectors", ">", "..", "]" ]
python
train
35.4
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L804-L816
def get_domain(self): """ :returns: opposite vertices of the bounding prism for this object in the form of ndarray([min], [max]) .. note:: This method automatically stores the solution in order to do not repeat calculations if the user needs to call it more than once. """ points = ([poly.points for poly in self]+ [holes.points for holes in self.holes]) points = np.concatenate(points, axis=0) return np.array([points.min(axis=0), points.max(axis=0)])
[ "def", "get_domain", "(", "self", ")", ":", "points", "=", "(", "[", "poly", ".", "points", "for", "poly", "in", "self", "]", "+", "[", "holes", ".", "points", "for", "holes", "in", "self", ".", "holes", "]", ")", "points", "=", "np", ".", "concatenate", "(", "points", ",", "axis", "=", "0", ")", "return", "np", ".", "array", "(", "[", "points", ".", "min", "(", "axis", "=", "0", ")", ",", "points", ".", "max", "(", "axis", "=", "0", ")", "]", ")" ]
:returns: opposite vertices of the bounding prism for this object in the form of ndarray([min], [max]) .. note:: This method automatically stores the solution in order to do not repeat calculations if the user needs to call it more than once.
[ ":", "returns", ":", "opposite", "vertices", "of", "the", "bounding", "prism", "for", "this", "object", "in", "the", "form", "of", "ndarray", "(", "[", "min", "]", "[", "max", "]", ")", "..", "note", "::", "This", "method", "automatically", "stores", "the", "solution", "in", "order", "to", "do", "not", "repeat", "calculations", "if", "the", "user", "needs", "to", "call", "it", "more", "than", "once", "." ]
python
train
44.076923
tensorflow/cleverhans
cleverhans/attacks/bapp.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/bapp.py#L358-L364
def compute_distance(x_ori, x_pert, constraint='l2'): """ Compute the distance between two images. """ if constraint == 'l2': dist = np.linalg.norm(x_ori - x_pert) elif constraint == 'linf': dist = np.max(abs(x_ori - x_pert)) return dist
[ "def", "compute_distance", "(", "x_ori", ",", "x_pert", ",", "constraint", "=", "'l2'", ")", ":", "if", "constraint", "==", "'l2'", ":", "dist", "=", "np", ".", "linalg", ".", "norm", "(", "x_ori", "-", "x_pert", ")", "elif", "constraint", "==", "'linf'", ":", "dist", "=", "np", ".", "max", "(", "abs", "(", "x_ori", "-", "x_pert", ")", ")", "return", "dist" ]
Compute the distance between two images.
[ "Compute", "the", "distance", "between", "two", "images", "." ]
python
train
35.285714
brutasse/graphite-api
graphite_api/functions.py
https://github.com/brutasse/graphite-api/blob/0886b7adcf985a1e8bcb084f6dd1dc166a3f3dff/graphite_api/functions.py#L2544-L2551
def sortByTotal(requestContext, seriesList): """ Takes one metric or a wildcard seriesList. Sorts the list of metrics by the sum of values across the time period specified. """ return list(sorted(seriesList, key=safeSum, reverse=True))
[ "def", "sortByTotal", "(", "requestContext", ",", "seriesList", ")", ":", "return", "list", "(", "sorted", "(", "seriesList", ",", "key", "=", "safeSum", ",", "reverse", "=", "True", ")", ")" ]
Takes one metric or a wildcard seriesList. Sorts the list of metrics by the sum of values across the time period specified.
[ "Takes", "one", "metric", "or", "a", "wildcard", "seriesList", "." ]
python
train
31.625
quodlibet/mutagen
mutagen/_util.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_util.py#L661-L683
def seek_end(fileobj, offset): """Like fileobj.seek(-offset, 2), but will not try to go beyond the start Needed since file objects from BytesIO will not raise IOError and file objects from open() will raise IOError if going to a negative offset. To make things easier for custom implementations, instead of allowing both behaviors, we just don't do it. Args: fileobj (fileobj) offset (int): how many bytes away from the end backwards to seek to Raises: IOError """ if offset < 0: raise ValueError if get_size(fileobj) < offset: fileobj.seek(0, 0) else: fileobj.seek(-offset, 2)
[ "def", "seek_end", "(", "fileobj", ",", "offset", ")", ":", "if", "offset", "<", "0", ":", "raise", "ValueError", "if", "get_size", "(", "fileobj", ")", "<", "offset", ":", "fileobj", ".", "seek", "(", "0", ",", "0", ")", "else", ":", "fileobj", ".", "seek", "(", "-", "offset", ",", "2", ")" ]
Like fileobj.seek(-offset, 2), but will not try to go beyond the start Needed since file objects from BytesIO will not raise IOError and file objects from open() will raise IOError if going to a negative offset. To make things easier for custom implementations, instead of allowing both behaviors, we just don't do it. Args: fileobj (fileobj) offset (int): how many bytes away from the end backwards to seek to Raises: IOError
[ "Like", "fileobj", ".", "seek", "(", "-", "offset", "2", ")", "but", "will", "not", "try", "to", "go", "beyond", "the", "start" ]
python
train
28.347826
lpantano/seqcluster
seqcluster/function/coral.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/function/coral.py#L18-L29
def prepare_bam(bam_in, precursors): """ Clean BAM file to keep only position inside the bigger cluster """ # use pybedtools to keep valid positions # intersect option with -b bigger_cluster_loci a = pybedtools.BedTool(bam_in) b = pybedtools.BedTool(precursors) c = a.intersect(b, u=True) out_file = utils.splitext_plus(op.basename(bam_in))[0] + "_clean.bam" c.saveas(out_file) return op.abspath(out_file)
[ "def", "prepare_bam", "(", "bam_in", ",", "precursors", ")", ":", "# use pybedtools to keep valid positions", "# intersect option with -b bigger_cluster_loci", "a", "=", "pybedtools", ".", "BedTool", "(", "bam_in", ")", "b", "=", "pybedtools", ".", "BedTool", "(", "precursors", ")", "c", "=", "a", ".", "intersect", "(", "b", ",", "u", "=", "True", ")", "out_file", "=", "utils", ".", "splitext_plus", "(", "op", ".", "basename", "(", "bam_in", ")", ")", "[", "0", "]", "+", "\"_clean.bam\"", "c", ".", "saveas", "(", "out_file", ")", "return", "op", ".", "abspath", "(", "out_file", ")" ]
Clean BAM file to keep only position inside the bigger cluster
[ "Clean", "BAM", "file", "to", "keep", "only", "position", "inside", "the", "bigger", "cluster" ]
python
train
36.5
hubo1016/vlcp
vlcp/utils/flowupdater.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L167-L248
async def main(self): """ Main coroutine """ try: lastkeys = set() dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED) startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK) self.subroutine(self._flowupdater(), False, '_flowupdateroutine') # Cache updated objects presave_update = set() while True: self._restartwalk = False presave_update.update(self._updatedset) self._updatedset.clear() _initialkeys = set(self._initialkeys) try: walk_result = await call_api(self, 'objectdb', 'walk', {'keys': self._initialkeys, 'walkerdict': self._walkerdict, 'requestid': (self._requstid, self._requestindex)}) except Exception: self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection, exc_info=True) # Cleanup await call_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}) await self.wait_with_timeout(2) self._requestindex += 1 if self._restartwalk: continue if self._updatedset: if any(v.getkey() in _initialkeys for v in self._updatedset): # During walk, there are other initial keys that are updated # To make sure we get the latest result, restart the walk continue lastkeys = set(self._savedkeys) _savedkeys, _savedresult = walk_result removekeys = tuple(lastkeys.difference(_savedkeys)) self.reset_initialkeys(_savedkeys, _savedresult) _initialkeys = set(self._initialkeys) if self._dataupdateroutine: self.terminate(self._dataupdateroutine) # Start detecting updates self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine") # Set the updates back (potentially merged with newly updated objects) self._updatedset.update(v for v in presave_update) presave_update.clear() await self.walkcomplete(_savedkeys, _savedresult) if removekeys: await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys, 'requestid': (self._requstid, self._requestindex)}) # Transfer updated objects to updatedset2 before a flow update notification # This helps to make `walkcomplete` executes before `updateflow` # # But notice that since there is only a single data object copy in all the program, # it is impossible to hide the change completely during `updateflow` self._updatedset2.update(self._updatedset) self._updatedset.clear() self._savedkeys = _savedkeys self._savedresult = _savedresult await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE)) while not self._restartwalk: if self._updatedset: if any(v.getkey() in _initialkeys for v in self._updatedset): break else: self._updatedset2.update(self._updatedset) self._updatedset.clear() self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE)) await M_(dataupdate, startwalk) except Exception: self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection) raise finally: self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}), False) if self._flowupdateroutine: self.terminate(self._flowupdateroutine) self._flowupdateroutine = None if self._dataupdateroutine: self.terminate(self._dataupdateroutine) self._dataupdateroutine = None
[ "async", "def", "main", "(", "self", ")", ":", "try", ":", "lastkeys", "=", "set", "(", ")", "dataupdate", "=", "FlowUpdaterNotification", ".", "createMatcher", "(", "self", ",", "FlowUpdaterNotification", ".", "DATAUPDATED", ")", "startwalk", "=", "FlowUpdaterNotification", ".", "createMatcher", "(", "self", ",", "FlowUpdaterNotification", ".", "STARTWALK", ")", "self", ".", "subroutine", "(", "self", ".", "_flowupdater", "(", ")", ",", "False", ",", "'_flowupdateroutine'", ")", "# Cache updated objects", "presave_update", "=", "set", "(", ")", "while", "True", ":", "self", ".", "_restartwalk", "=", "False", "presave_update", ".", "update", "(", "self", ".", "_updatedset", ")", "self", ".", "_updatedset", ".", "clear", "(", ")", "_initialkeys", "=", "set", "(", "self", ".", "_initialkeys", ")", "try", ":", "walk_result", "=", "await", "call_api", "(", "self", ",", "'objectdb'", ",", "'walk'", ",", "{", "'keys'", ":", "self", ".", "_initialkeys", ",", "'walkerdict'", ":", "self", ".", "_walkerdict", ",", "'requestid'", ":", "(", "self", ".", "_requstid", ",", "self", ".", "_requestindex", ")", "}", ")", "except", "Exception", ":", "self", ".", "_logger", ".", "warning", "(", "\"Flow updater %r walk step failed, conn = %r\"", ",", "self", ",", "self", ".", "_connection", ",", "exc_info", "=", "True", ")", "# Cleanup", "await", "call_api", "(", "self", ",", "'objectdb'", ",", "'unwatchall'", ",", "{", "'requestid'", ":", "(", "self", ".", "_requstid", ",", "self", ".", "_requestindex", ")", "}", ")", "await", "self", ".", "wait_with_timeout", "(", "2", ")", "self", ".", "_requestindex", "+=", "1", "if", "self", ".", "_restartwalk", ":", "continue", "if", "self", ".", "_updatedset", ":", "if", "any", "(", "v", ".", "getkey", "(", ")", "in", "_initialkeys", "for", "v", "in", "self", ".", "_updatedset", ")", ":", "# During walk, there are other initial keys that are updated", "# To make sure we get the latest result, restart the walk", "continue", "lastkeys", "=", "set", "(", "self", ".", "_savedkeys", ")", "_savedkeys", ",", "_savedresult", "=", "walk_result", "removekeys", "=", "tuple", "(", "lastkeys", ".", "difference", "(", "_savedkeys", ")", ")", "self", ".", "reset_initialkeys", "(", "_savedkeys", ",", "_savedresult", ")", "_initialkeys", "=", "set", "(", "self", ".", "_initialkeys", ")", "if", "self", ".", "_dataupdateroutine", ":", "self", ".", "terminate", "(", "self", ".", "_dataupdateroutine", ")", "# Start detecting updates", "self", ".", "subroutine", "(", "self", ".", "_dataobject_update_detect", "(", "_initialkeys", ",", "_savedresult", ")", ",", "False", ",", "\"_dataupdateroutine\"", ")", "# Set the updates back (potentially merged with newly updated objects)", "self", ".", "_updatedset", ".", "update", "(", "v", "for", "v", "in", "presave_update", ")", "presave_update", ".", "clear", "(", ")", "await", "self", ".", "walkcomplete", "(", "_savedkeys", ",", "_savedresult", ")", "if", "removekeys", ":", "await", "call_api", "(", "self", ",", "'objectdb'", ",", "'munwatch'", ",", "{", "'keys'", ":", "removekeys", ",", "'requestid'", ":", "(", "self", ".", "_requstid", ",", "self", ".", "_requestindex", ")", "}", ")", "# Transfer updated objects to updatedset2 before a flow update notification", "# This helps to make `walkcomplete` executes before `updateflow`", "#", "# But notice that since there is only a single data object copy in all the program,", "# it is impossible to hide the change completely during `updateflow`", "self", ".", "_updatedset2", ".", "update", "(", "self", ".", "_updatedset", ")", "self", ".", "_updatedset", ".", "clear", "(", ")", "self", ".", "_savedkeys", "=", "_savedkeys", "self", ".", "_savedresult", "=", "_savedresult", "await", "self", ".", "wait_for_send", "(", "FlowUpdaterNotification", "(", "self", ",", "FlowUpdaterNotification", ".", "FLOWUPDATE", ")", ")", "while", "not", "self", ".", "_restartwalk", ":", "if", "self", ".", "_updatedset", ":", "if", "any", "(", "v", ".", "getkey", "(", ")", "in", "_initialkeys", "for", "v", "in", "self", ".", "_updatedset", ")", ":", "break", "else", ":", "self", ".", "_updatedset2", ".", "update", "(", "self", ".", "_updatedset", ")", "self", ".", "_updatedset", ".", "clear", "(", ")", "self", ".", "scheduler", ".", "emergesend", "(", "FlowUpdaterNotification", "(", "self", ",", "FlowUpdaterNotification", ".", "FLOWUPDATE", ")", ")", "await", "M_", "(", "dataupdate", ",", "startwalk", ")", "except", "Exception", ":", "self", ".", "_logger", ".", "exception", "(", "\"Flow updater %r stops update by an exception, conn = %r\"", ",", "self", ",", "self", ".", "_connection", ")", "raise", "finally", ":", "self", ".", "subroutine", "(", "send_api", "(", "self", ",", "'objectdb'", ",", "'unwatchall'", ",", "{", "'requestid'", ":", "(", "self", ".", "_requstid", ",", "self", ".", "_requestindex", ")", "}", ")", ",", "False", ")", "if", "self", ".", "_flowupdateroutine", ":", "self", ".", "terminate", "(", "self", ".", "_flowupdateroutine", ")", "self", ".", "_flowupdateroutine", "=", "None", "if", "self", ".", "_dataupdateroutine", ":", "self", ".", "terminate", "(", "self", ".", "_dataupdateroutine", ")", "self", ".", "_dataupdateroutine", "=", "None" ]
Main coroutine
[ "Main", "coroutine" ]
python
train
57.134146
constverum/ProxyBroker
proxybroker/providers.py
https://github.com/constverum/ProxyBroker/blob/d21aae8575fc3a95493233ecfd2c7cf47b36b069/proxybroker/providers.py#L70-L86
async def get_proxies(self): """Receive proxies from the provider and return them. :return: :attr:`.proxies` """ log.debug('Try to get proxies from %s' % self.domain) async with aiohttp.ClientSession( headers=get_headers(), cookies=self._cookies, loop=self._loop ) as self._session: await self._pipe() log.debug( '%d proxies received from %s: %s' % (len(self.proxies), self.domain, self.proxies) ) return self.proxies
[ "async", "def", "get_proxies", "(", "self", ")", ":", "log", ".", "debug", "(", "'Try to get proxies from %s'", "%", "self", ".", "domain", ")", "async", "with", "aiohttp", ".", "ClientSession", "(", "headers", "=", "get_headers", "(", ")", ",", "cookies", "=", "self", ".", "_cookies", ",", "loop", "=", "self", ".", "_loop", ")", "as", "self", ".", "_session", ":", "await", "self", ".", "_pipe", "(", ")", "log", ".", "debug", "(", "'%d proxies received from %s: %s'", "%", "(", "len", "(", "self", ".", "proxies", ")", ",", "self", ".", "domain", ",", "self", ".", "proxies", ")", ")", "return", "self", ".", "proxies" ]
Receive proxies from the provider and return them. :return: :attr:`.proxies`
[ "Receive", "proxies", "from", "the", "provider", "and", "return", "them", "." ]
python
train
30.823529
GochoMugo/firecall
firecall/sync.py
https://github.com/GochoMugo/firecall/blob/6b99ff72b3c056f51a5901f2be32030c7e68961a/firecall/sync.py#L119-L130
def catch_error(response): ''' Checks for Errors in a Response. 401 or 403 - Security Rules Violation. 404 or 417 - Firebase NOT Found. response - (Request.Response) - response from a request. ''' status = response.status_code if status == 401 or status == 403: raise EnvironmentError("Forbidden") elif status == 417 or status == 404: raise EnvironmentError("NotFound")
[ "def", "catch_error", "(", "response", ")", ":", "status", "=", "response", ".", "status_code", "if", "status", "==", "401", "or", "status", "==", "403", ":", "raise", "EnvironmentError", "(", "\"Forbidden\"", ")", "elif", "status", "==", "417", "or", "status", "==", "404", ":", "raise", "EnvironmentError", "(", "\"NotFound\"", ")" ]
Checks for Errors in a Response. 401 or 403 - Security Rules Violation. 404 or 417 - Firebase NOT Found. response - (Request.Response) - response from a request.
[ "Checks", "for", "Errors", "in", "a", "Response", ".", "401", "or", "403", "-", "Security", "Rules", "Violation", ".", "404", "or", "417", "-", "Firebase", "NOT", "Found", ".", "response", "-", "(", "Request", ".", "Response", ")", "-", "response", "from", "a", "request", "." ]
python
valid
37.833333
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L1283-L1290
def extendedMeasurementOrder(): """EXTENDED MEASUREMENT ORDER Section 9.1.51""" a = L2PseudoLength(l2pLength=0x12) b = TpPd(pd=0x6) c = MessageType(mesType=0x37) # 00110111 d = ExtendedMeasurementFrequencyList() packet = a / b / c / d return packet
[ "def", "extendedMeasurementOrder", "(", ")", ":", "a", "=", "L2PseudoLength", "(", "l2pLength", "=", "0x12", ")", "b", "=", "TpPd", "(", "pd", "=", "0x6", ")", "c", "=", "MessageType", "(", "mesType", "=", "0x37", ")", "# 00110111", "d", "=", "ExtendedMeasurementFrequencyList", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "/", "d", "return", "packet" ]
EXTENDED MEASUREMENT ORDER Section 9.1.51
[ "EXTENDED", "MEASUREMENT", "ORDER", "Section", "9", ".", "1", ".", "51" ]
python
train
33.75
invisibleroads/socketIO-client
socketIO_client/namespaces.py
https://github.com/invisibleroads/socketIO-client/blob/1e58adda9397500d89b4521c90aa06e6a511cef6/socketIO_client/namespaces.py#L27-L33
def off(self, event): 'Remove an event handler' try: self._once_events.remove(event) except KeyError: pass self._callback_by_event.pop(event, None)
[ "def", "off", "(", "self", ",", "event", ")", ":", "try", ":", "self", ".", "_once_events", ".", "remove", "(", "event", ")", "except", "KeyError", ":", "pass", "self", ".", "_callback_by_event", ".", "pop", "(", "event", ",", "None", ")" ]
Remove an event handler
[ "Remove", "an", "event", "handler" ]
python
train
28.142857
kkroening/ffmpeg-python
ffmpeg/_run.py
https://github.com/kkroening/ffmpeg-python/blob/ac111dc3a976ddbb872bc7d6d4fe24a267c1a956/ffmpeg/_run.py#L158-L173
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False): """Build command-line for invoking ffmpeg. The :meth:`run` function uses this to build the commnad line arguments and should work in most cases, but calling this function directly is useful for debugging or if you need to invoke ffmpeg manually for whatever reason. This is the same as calling :meth:`get_args` except that it also includes the ``ffmpeg`` command as the first argument. """ if isinstance(cmd, basestring): cmd = [cmd] elif type(cmd) != list: cmd = list(cmd) return cmd + get_args(stream_spec, overwrite_output=overwrite_output)
[ "def", "compile", "(", "stream_spec", ",", "cmd", "=", "'ffmpeg'", ",", "overwrite_output", "=", "False", ")", ":", "if", "isinstance", "(", "cmd", ",", "basestring", ")", ":", "cmd", "=", "[", "cmd", "]", "elif", "type", "(", "cmd", ")", "!=", "list", ":", "cmd", "=", "list", "(", "cmd", ")", "return", "cmd", "+", "get_args", "(", "stream_spec", ",", "overwrite_output", "=", "overwrite_output", ")" ]
Build command-line for invoking ffmpeg. The :meth:`run` function uses this to build the commnad line arguments and should work in most cases, but calling this function directly is useful for debugging or if you need to invoke ffmpeg manually for whatever reason. This is the same as calling :meth:`get_args` except that it also includes the ``ffmpeg`` command as the first argument.
[ "Build", "command", "-", "line", "for", "invoking", "ffmpeg", "." ]
python
train
40.875
Fizzadar/pyinfra
pyinfra/modules/server.py
https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L99-L121
def modprobe(state, host, name, present=True, force=False): ''' Load/unload kernel modules. + name: name of the module to manage + present: whether the module should be loaded or not + force: whether to force any add/remove modules ''' modules = host.fact.kernel_modules is_present = name in modules args = '' if force: args = ' -f' # Module is loaded and we don't want it? if not present and is_present: yield 'modprobe{0} -r {1}'.format(args, name) # Module isn't loaded and we want it? elif present and not is_present: yield 'modprobe{0} {1}'.format(args, name)
[ "def", "modprobe", "(", "state", ",", "host", ",", "name", ",", "present", "=", "True", ",", "force", "=", "False", ")", ":", "modules", "=", "host", ".", "fact", ".", "kernel_modules", "is_present", "=", "name", "in", "modules", "args", "=", "''", "if", "force", ":", "args", "=", "' -f'", "# Module is loaded and we don't want it?", "if", "not", "present", "and", "is_present", ":", "yield", "'modprobe{0} -r {1}'", ".", "format", "(", "args", ",", "name", ")", "# Module isn't loaded and we want it?", "elif", "present", "and", "not", "is_present", ":", "yield", "'modprobe{0} {1}'", ".", "format", "(", "args", ",", "name", ")" ]
Load/unload kernel modules. + name: name of the module to manage + present: whether the module should be loaded or not + force: whether to force any add/remove modules
[ "Load", "/", "unload", "kernel", "modules", "." ]
python
train
27.26087
hotdoc/hotdoc
hotdoc/parsers/sitemap.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/parsers/sitemap.py#L77-L87
def walk(self, action, user_data=None): """ Walk the hierarchy, applying action to each filename. Args: action: callable, the callable to invoke for each filename, will be invoked with the filename, the subfiles, and the level in the sitemap. """ action(self.index_file, self.__root, 0, user_data) self.__do_walk(self.__root, 1, action, user_data)
[ "def", "walk", "(", "self", ",", "action", ",", "user_data", "=", "None", ")", ":", "action", "(", "self", ".", "index_file", ",", "self", ".", "__root", ",", "0", ",", "user_data", ")", "self", ".", "__do_walk", "(", "self", ".", "__root", ",", "1", ",", "action", ",", "user_data", ")" ]
Walk the hierarchy, applying action to each filename. Args: action: callable, the callable to invoke for each filename, will be invoked with the filename, the subfiles, and the level in the sitemap.
[ "Walk", "the", "hierarchy", "applying", "action", "to", "each", "filename", "." ]
python
train
39.090909
romanz/trezor-agent
libagent/gpg/keyring.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L191-L197
def get_gnupg_components(sp=subprocess): """Parse GnuPG components' paths.""" args = [util.which('gpgconf'), '--list-components'] output = check_output(args=args, sp=sp) components = dict(re.findall('(.*):.*:(.*)', output.decode('utf-8'))) log.debug('gpgconf --list-components: %s', components) return components
[ "def", "get_gnupg_components", "(", "sp", "=", "subprocess", ")", ":", "args", "=", "[", "util", ".", "which", "(", "'gpgconf'", ")", ",", "'--list-components'", "]", "output", "=", "check_output", "(", "args", "=", "args", ",", "sp", "=", "sp", ")", "components", "=", "dict", "(", "re", ".", "findall", "(", "'(.*):.*:(.*)'", ",", "output", ".", "decode", "(", "'utf-8'", ")", ")", ")", "log", ".", "debug", "(", "'gpgconf --list-components: %s'", ",", "components", ")", "return", "components" ]
Parse GnuPG components' paths.
[ "Parse", "GnuPG", "components", "paths", "." ]
python
train
47.142857
erget/StereoVision
stereovision/blockmatchers.py
https://github.com/erget/StereoVision/blob/1adff45e291362f52188e0fd0211265845a4461a/stereovision/blockmatchers.py#L82-L87
def load_settings(self, settings): """Load settings from file""" with open(settings) as settings_file: settings_dict = simplejson.load(settings_file) for key, value in settings_dict.items(): self.__setattr__(key, value)
[ "def", "load_settings", "(", "self", ",", "settings", ")", ":", "with", "open", "(", "settings", ")", "as", "settings_file", ":", "settings_dict", "=", "simplejson", ".", "load", "(", "settings_file", ")", "for", "key", ",", "value", "in", "settings_dict", ".", "items", "(", ")", ":", "self", ".", "__setattr__", "(", "key", ",", "value", ")" ]
Load settings from file
[ "Load", "settings", "from", "file" ]
python
train
43.666667
Netflix-Skunkworks/historical
historical/common/dynamodb.py
https://github.com/Netflix-Skunkworks/historical/blob/c3ebaa8388a3fe67e23a6c9c6b04c3e618497c4a/historical/common/dynamodb.py#L146-L165
def get_full_durable_object(arn, event_time, durable_model): """ Utility method to fetch items from the Durable table if they are too big for SNS/SQS. :param record: :param durable_model: :return: """ LOG.debug(f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Durable table...') item = list(durable_model.query(arn, durable_model.eventTime == event_time)) # It is not clear if this would ever be the case... We will consider this an error condition for now. if not item: LOG.error(f'[?] Item with ARN/Event Time: {arn}/{event_time} was NOT found in the Durable table...' f' This is odd.') raise DurableItemIsMissingException({"item_arn": arn, "event_time": event_time}) # We need to place the real configuration data into the record so it can be deserialized into # the durable model correctly: return item[0]
[ "def", "get_full_durable_object", "(", "arn", ",", "event_time", ",", "durable_model", ")", ":", "LOG", ".", "debug", "(", "f'[-->] Item with ARN: {arn} was too big for SNS -- fetching it from the Durable table...'", ")", "item", "=", "list", "(", "durable_model", ".", "query", "(", "arn", ",", "durable_model", ".", "eventTime", "==", "event_time", ")", ")", "# It is not clear if this would ever be the case... We will consider this an error condition for now.", "if", "not", "item", ":", "LOG", ".", "error", "(", "f'[?] Item with ARN/Event Time: {arn}/{event_time} was NOT found in the Durable table...'", "f' This is odd.'", ")", "raise", "DurableItemIsMissingException", "(", "{", "\"item_arn\"", ":", "arn", ",", "\"event_time\"", ":", "event_time", "}", ")", "# We need to place the real configuration data into the record so it can be deserialized into", "# the durable model correctly:", "return", "item", "[", "0", "]" ]
Utility method to fetch items from the Durable table if they are too big for SNS/SQS. :param record: :param durable_model: :return:
[ "Utility", "method", "to", "fetch", "items", "from", "the", "Durable", "table", "if", "they", "are", "too", "big", "for", "SNS", "/", "SQS", "." ]
python
train
45.15
proycon/pynlpl
pynlpl/textprocessors.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/textprocessors.py#L444-L455
def find_keyword_in_context(tokens, keyword, contextsize=1): """Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list""" if isinstance(keyword,tuple) and isinstance(keyword,list): l = len(keyword) else: keyword = (keyword,) l = 1 n = l + contextsize*2 focuspos = contextsize + 1 for ngram in Windower(tokens,n,None,None): if ngram[focuspos:focuspos+l] == keyword: yield ngram[:focuspos], ngram[focuspos:focuspos+l],ngram[focuspos+l+1:]
[ "def", "find_keyword_in_context", "(", "tokens", ",", "keyword", ",", "contextsize", "=", "1", ")", ":", "if", "isinstance", "(", "keyword", ",", "tuple", ")", "and", "isinstance", "(", "keyword", ",", "list", ")", ":", "l", "=", "len", "(", "keyword", ")", "else", ":", "keyword", "=", "(", "keyword", ",", ")", "l", "=", "1", "n", "=", "l", "+", "contextsize", "*", "2", "focuspos", "=", "contextsize", "+", "1", "for", "ngram", "in", "Windower", "(", "tokens", ",", "n", ",", "None", ",", "None", ")", ":", "if", "ngram", "[", "focuspos", ":", "focuspos", "+", "l", "]", "==", "keyword", ":", "yield", "ngram", "[", ":", "focuspos", "]", ",", "ngram", "[", "focuspos", ":", "focuspos", "+", "l", "]", ",", "ngram", "[", "focuspos", "+", "l", "+", "1", ":", "]" ]
Find a keyword in a particular sequence of tokens, and return the local context. Contextsize is the number of words to the left and right. The keyword may have multiple word, in which case it should to passed as a tuple or list
[ "Find", "a", "keyword", "in", "a", "particular", "sequence", "of", "tokens", "and", "return", "the", "local", "context", ".", "Contextsize", "is", "the", "number", "of", "words", "to", "the", "left", "and", "right", ".", "The", "keyword", "may", "have", "multiple", "word", "in", "which", "case", "it", "should", "to", "passed", "as", "a", "tuple", "or", "list" ]
python
train
55.5
HPCC-Cloud-Computing/CAL
calplus/v1/compute/drivers/openstack.py
https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/compute/drivers/openstack.py#L119-L122
def delete_nic(self, instance_id, port_id): """Delete a Network Interface Controller""" self.client.servers.interface_detach(instance_id, port_id) return True
[ "def", "delete_nic", "(", "self", ",", "instance_id", ",", "port_id", ")", ":", "self", ".", "client", ".", "servers", ".", "interface_detach", "(", "instance_id", ",", "port_id", ")", "return", "True" ]
Delete a Network Interface Controller
[ "Delete", "a", "Network", "Interface", "Controller" ]
python
train
44.75
ndrlslz/ternya
ternya/ternya.py
https://github.com/ndrlslz/ternya/blob/c05aec10029e645d63ff04313dbcf2644743481f/ternya/ternya.py#L165-L183
def init_glance_consumer(self, mq): """ Init openstack glance mq 1. Check if enable listening glance notification 2. Create consumer :param mq: class ternya.mq.MQ """ if not self.enable_component_notification(Openstack.Glance): log.debug("disable listening glance notification") return for i in range(self.config.glance_mq_consumer_count): mq.create_consumer(self.config.glance_mq_exchange, self.config.glance_mq_queue, ProcessFactory.process(Openstack.Glance)) log.debug("enable listening openstack glance notification.")
[ "def", "init_glance_consumer", "(", "self", ",", "mq", ")", ":", "if", "not", "self", ".", "enable_component_notification", "(", "Openstack", ".", "Glance", ")", ":", "log", ".", "debug", "(", "\"disable listening glance notification\"", ")", "return", "for", "i", "in", "range", "(", "self", ".", "config", ".", "glance_mq_consumer_count", ")", ":", "mq", ".", "create_consumer", "(", "self", ".", "config", ".", "glance_mq_exchange", ",", "self", ".", "config", ".", "glance_mq_queue", ",", "ProcessFactory", ".", "process", "(", "Openstack", ".", "Glance", ")", ")", "log", ".", "debug", "(", "\"enable listening openstack glance notification.\"", ")" ]
Init openstack glance mq 1. Check if enable listening glance notification 2. Create consumer :param mq: class ternya.mq.MQ
[ "Init", "openstack", "glance", "mq" ]
python
test
35.684211
pandas-dev/pandas
pandas/core/indexes/datetimes.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimes.py#L35-L51
def _new_DatetimeIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ """ if "data" in d and not isinstance(d["data"], DatetimeIndex): # Avoid need to verify integrity by calling simple_new directly data = d.pop("data") result = cls._simple_new(data, **d) else: with warnings.catch_warnings(): # we ignore warnings from passing verify_integrity=False # TODO: If we knew what was going in to **d, we might be able to # go through _simple_new instead warnings.simplefilter("ignore") result = cls.__new__(cls, verify_integrity=False, **d) return result
[ "def", "_new_DatetimeIndex", "(", "cls", ",", "d", ")", ":", "if", "\"data\"", "in", "d", "and", "not", "isinstance", "(", "d", "[", "\"data\"", "]", ",", "DatetimeIndex", ")", ":", "# Avoid need to verify integrity by calling simple_new directly", "data", "=", "d", ".", "pop", "(", "\"data\"", ")", "result", "=", "cls", ".", "_simple_new", "(", "data", ",", "*", "*", "d", ")", "else", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "# we ignore warnings from passing verify_integrity=False", "# TODO: If we knew what was going in to **d, we might be able to", "# go through _simple_new instead", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "result", "=", "cls", ".", "__new__", "(", "cls", ",", "verify_integrity", "=", "False", ",", "*", "*", "d", ")", "return", "result" ]
This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__
[ "This", "is", "called", "upon", "unpickling", "rather", "than", "the", "default", "which", "doesn", "t", "have", "arguments", "and", "breaks", "__new__" ]
python
train
42.235294
crdoconnor/pathquery
hitch/key.py
https://github.com/crdoconnor/pathquery/blob/4905fef27fc666ea4511eb0eee5098f754bb52ed/hitch/key.py#L291-L302
def regression(): """ Run regression testing - lint and then run all tests. """ # HACK: Start using hitchbuildpy to get around this. Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook = _storybook({}).only_uninherited() #storybook.with_params(**{"python version": "2.7.10"})\ #.ordered_by_name().play() Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play() lint()
[ "def", "regression", "(", ")", ":", "# HACK: Start using hitchbuildpy to get around this.", "Command", "(", "\"touch\"", ",", "DIR", ".", "project", ".", "joinpath", "(", "\"pathquery\"", ",", "\"__init__.py\"", ")", ".", "abspath", "(", ")", ")", ".", "run", "(", ")", "storybook", "=", "_storybook", "(", "{", "}", ")", ".", "only_uninherited", "(", ")", "#storybook.with_params(**{\"python version\": \"2.7.10\"})\\", "#.ordered_by_name().play()", "Command", "(", "\"touch\"", ",", "DIR", ".", "project", ".", "joinpath", "(", "\"pathquery\"", ",", "\"__init__.py\"", ")", ".", "abspath", "(", ")", ")", ".", "run", "(", ")", "storybook", ".", "with_params", "(", "*", "*", "{", "\"python version\"", ":", "\"3.5.0\"", "}", ")", ".", "ordered_by_name", "(", ")", ".", "play", "(", ")", "lint", "(", ")" ]
Run regression testing - lint and then run all tests.
[ "Run", "regression", "testing", "-", "lint", "and", "then", "run", "all", "tests", "." ]
python
train
46.166667
moonso/vcf_parser
vcf_parser/header_parser.py
https://github.com/moonso/vcf_parser/blob/8e2b6724e31995e0d43af501f25974310c6b843b/vcf_parser/header_parser.py#L178-L183
def parse_header_line(self, line): """docstring for parse_header_line""" self.header = line[1:].rstrip().split('\t') if len(self.header) < 9: self.header = line[1:].rstrip().split() self.individuals = self.header[9:]
[ "def", "parse_header_line", "(", "self", ",", "line", ")", ":", "self", ".", "header", "=", "line", "[", "1", ":", "]", ".", "rstrip", "(", ")", ".", "split", "(", "'\\t'", ")", "if", "len", "(", "self", ".", "header", ")", "<", "9", ":", "self", ".", "header", "=", "line", "[", "1", ":", "]", ".", "rstrip", "(", ")", ".", "split", "(", ")", "self", ".", "individuals", "=", "self", ".", "header", "[", "9", ":", "]" ]
docstring for parse_header_line
[ "docstring", "for", "parse_header_line" ]
python
train
42.5
jmbhughes/suvi-trainer
suvitrainer/fileio.py
https://github.com/jmbhughes/suvi-trainer/blob/3d89894a4a037286221974c7eb5634d229b4f5d4/suvitrainer/fileio.py#L249-L313
def fetch_suvi_l1b(self, product, correct=True, median_kernel=5): """ Given a product keyword, downloads the SUVI l1b image into the current directory. NOTE: the suvi_l1b_url must be properly set for the Fetcher object :param product: the keyword for the product, e.g. suvi-l1b-fe094 :param correct: remove nans and negatives :return: tuple of product name, fits header, and data object the header and data object will be None if the request failed """ if self.date < datetime(2018, 5, 23) and not (self.date >= datetime(2017, 9, 6) \ and self.date <= datetime(2017, 9, 10, 23, 59)): print("SUVI data is only available after 2018-5-23") return product, None, None url = self.suvi_base_url + product + "/{}/{:02d}/{:02d}".format(self.date.year, self.date.month, self.date.day) if self.verbose: print("Requesting from {}".format(url)) try: req = urllib.request.Request(url) with urllib.request.urlopen(req) as response: page = response.read() except (URLError, HTTPError): msg = "The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser." raise RuntimeError(msg.format(url)) soup = BeautifulSoup(page, 'html.parser') links = [link['href'] for link in soup.find_all('a', href=True)] links = [link for link in links if "SUVI" in link] meta = [self.parse_filename_meta(fn) for fn in links if ".fits" in fn] links = sorted(meta, key=lambda m: np.abs((m[2] - self.date).total_seconds()))[:10] links = [fn for fn, _, _, _, _ in links] i = 0 def download_and_check(i): try: urllib.request.urlretrieve(url + "/" + links[i], "{}.fits".format(product)) except (URLError, HTTPError): msg = "THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct." raise RuntimeError(msg.format(url + "/" + links[i])) with fits.open("{}.fits".format(product)) as hdu: head = hdu[0].header return head['exptime'] > 0.5 while not download_and_check(i): i += 1 with fits.open("{}.fits".format(product)) as hdu: head = hdu[0].header data = hdu[0].data os.remove("{}.fits".format(product)) if correct: data[np.isnan(data)] = 0 data[data < 0] = 0 if median_kernel: data = medfilt(data, median_kernel) data, head = self.align_solar_fov(head, data, 2.5, 2.0, rotate=True, scale=False) if self.verbose: print(product, " is using ", head['date-obs']) return product, head, data
[ "def", "fetch_suvi_l1b", "(", "self", ",", "product", ",", "correct", "=", "True", ",", "median_kernel", "=", "5", ")", ":", "if", "self", ".", "date", "<", "datetime", "(", "2018", ",", "5", ",", "23", ")", "and", "not", "(", "self", ".", "date", ">=", "datetime", "(", "2017", ",", "9", ",", "6", ")", "and", "self", ".", "date", "<=", "datetime", "(", "2017", ",", "9", ",", "10", ",", "23", ",", "59", ")", ")", ":", "print", "(", "\"SUVI data is only available after 2018-5-23\"", ")", "return", "product", ",", "None", ",", "None", "url", "=", "self", ".", "suvi_base_url", "+", "product", "+", "\"/{}/{:02d}/{:02d}\"", ".", "format", "(", "self", ".", "date", ".", "year", ",", "self", ".", "date", ".", "month", ",", "self", ".", "date", ".", "day", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"Requesting from {}\"", ".", "format", "(", "url", ")", ")", "try", ":", "req", "=", "urllib", ".", "request", ".", "Request", "(", "url", ")", "with", "urllib", ".", "request", ".", "urlopen", "(", "req", ")", "as", "response", ":", "page", "=", "response", ".", "read", "(", ")", "except", "(", "URLError", ",", "HTTPError", ")", ":", "msg", "=", "\"The SUVI URL you requested, {}, appears to be unavailable. Check it through a web browser.\"", "raise", "RuntimeError", "(", "msg", ".", "format", "(", "url", ")", ")", "soup", "=", "BeautifulSoup", "(", "page", ",", "'html.parser'", ")", "links", "=", "[", "link", "[", "'href'", "]", "for", "link", "in", "soup", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", "]", "links", "=", "[", "link", "for", "link", "in", "links", "if", "\"SUVI\"", "in", "link", "]", "meta", "=", "[", "self", ".", "parse_filename_meta", "(", "fn", ")", "for", "fn", "in", "links", "if", "\".fits\"", "in", "fn", "]", "links", "=", "sorted", "(", "meta", ",", "key", "=", "lambda", "m", ":", "np", ".", "abs", "(", "(", "m", "[", "2", "]", "-", "self", ".", "date", ")", ".", "total_seconds", "(", ")", ")", ")", "[", ":", "10", "]", "links", "=", "[", "fn", "for", "fn", ",", "_", ",", "_", ",", "_", ",", "_", "in", "links", "]", "i", "=", "0", "def", "download_and_check", "(", "i", ")", ":", "try", ":", "urllib", ".", "request", ".", "urlretrieve", "(", "url", "+", "\"/\"", "+", "links", "[", "i", "]", ",", "\"{}.fits\"", ".", "format", "(", "product", ")", ")", "except", "(", "URLError", ",", "HTTPError", ")", ":", "msg", "=", "\"THE SUVI file you requested, {}, appears to be unvailable. Check if the website is correct.\"", "raise", "RuntimeError", "(", "msg", ".", "format", "(", "url", "+", "\"/\"", "+", "links", "[", "i", "]", ")", ")", "with", "fits", ".", "open", "(", "\"{}.fits\"", ".", "format", "(", "product", ")", ")", "as", "hdu", ":", "head", "=", "hdu", "[", "0", "]", ".", "header", "return", "head", "[", "'exptime'", "]", ">", "0.5", "while", "not", "download_and_check", "(", "i", ")", ":", "i", "+=", "1", "with", "fits", ".", "open", "(", "\"{}.fits\"", ".", "format", "(", "product", ")", ")", "as", "hdu", ":", "head", "=", "hdu", "[", "0", "]", ".", "header", "data", "=", "hdu", "[", "0", "]", ".", "data", "os", ".", "remove", "(", "\"{}.fits\"", ".", "format", "(", "product", ")", ")", "if", "correct", ":", "data", "[", "np", ".", "isnan", "(", "data", ")", "]", "=", "0", "data", "[", "data", "<", "0", "]", "=", "0", "if", "median_kernel", ":", "data", "=", "medfilt", "(", "data", ",", "median_kernel", ")", "data", ",", "head", "=", "self", ".", "align_solar_fov", "(", "head", ",", "data", ",", "2.5", ",", "2.0", ",", "rotate", "=", "True", ",", "scale", "=", "False", ")", "if", "self", ".", "verbose", ":", "print", "(", "product", ",", "\" is using \"", ",", "head", "[", "'date-obs'", "]", ")", "return", "product", ",", "head", ",", "data" ]
Given a product keyword, downloads the SUVI l1b image into the current directory. NOTE: the suvi_l1b_url must be properly set for the Fetcher object :param product: the keyword for the product, e.g. suvi-l1b-fe094 :param correct: remove nans and negatives :return: tuple of product name, fits header, and data object the header and data object will be None if the request failed
[ "Given", "a", "product", "keyword", "downloads", "the", "SUVI", "l1b", "image", "into", "the", "current", "directory", ".", "NOTE", ":", "the", "suvi_l1b_url", "must", "be", "properly", "set", "for", "the", "Fetcher", "object", ":", "param", "product", ":", "the", "keyword", "for", "the", "product", "e", ".", "g", ".", "suvi", "-", "l1b", "-", "fe094", ":", "param", "correct", ":", "remove", "nans", "and", "negatives", ":", "return", ":", "tuple", "of", "product", "name", "fits", "header", "and", "data", "object", "the", "header", "and", "data", "object", "will", "be", "None", "if", "the", "request", "failed" ]
python
train
43.092308
saltstack/salt
salt/modules/win_useradd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_useradd.py#L1025-L1063
def current(sam=False): ''' Get the username that salt-minion is running under. If salt-minion is running as a service it should return the Local System account. If salt is running from a command prompt it should return the username that started the command prompt. .. versionadded:: 2015.5.6 Args: sam (bool, optional): False returns just the username without any domain notation. True returns the domain with the username in the SAM format. Ie: ``domain\\username`` Returns: str: Returns username CLI Example: .. code-block:: bash salt '*' user.current ''' try: if sam: user_name = win32api.GetUserNameEx(win32con.NameSamCompatible) else: user_name = win32api.GetUserName() except pywintypes.error as exc: log.error('Failed to get current user') log.error('nbr: %s', exc.winerror) log.error('ctx: %s', exc.funcname) log.error('msg: %s', exc.strerror) raise CommandExecutionError('Failed to get current user', info=exc) if not user_name: raise CommandExecutionError('Failed to get current user') return user_name
[ "def", "current", "(", "sam", "=", "False", ")", ":", "try", ":", "if", "sam", ":", "user_name", "=", "win32api", ".", "GetUserNameEx", "(", "win32con", ".", "NameSamCompatible", ")", "else", ":", "user_name", "=", "win32api", ".", "GetUserName", "(", ")", "except", "pywintypes", ".", "error", "as", "exc", ":", "log", ".", "error", "(", "'Failed to get current user'", ")", "log", ".", "error", "(", "'nbr: %s'", ",", "exc", ".", "winerror", ")", "log", ".", "error", "(", "'ctx: %s'", ",", "exc", ".", "funcname", ")", "log", ".", "error", "(", "'msg: %s'", ",", "exc", ".", "strerror", ")", "raise", "CommandExecutionError", "(", "'Failed to get current user'", ",", "info", "=", "exc", ")", "if", "not", "user_name", ":", "raise", "CommandExecutionError", "(", "'Failed to get current user'", ")", "return", "user_name" ]
Get the username that salt-minion is running under. If salt-minion is running as a service it should return the Local System account. If salt is running from a command prompt it should return the username that started the command prompt. .. versionadded:: 2015.5.6 Args: sam (bool, optional): False returns just the username without any domain notation. True returns the domain with the username in the SAM format. Ie: ``domain\\username`` Returns: str: Returns username CLI Example: .. code-block:: bash salt '*' user.current
[ "Get", "the", "username", "that", "salt", "-", "minion", "is", "running", "under", ".", "If", "salt", "-", "minion", "is", "running", "as", "a", "service", "it", "should", "return", "the", "Local", "System", "account", ".", "If", "salt", "is", "running", "from", "a", "command", "prompt", "it", "should", "return", "the", "username", "that", "started", "the", "command", "prompt", "." ]
python
train
30.230769
bprinty/gems
gems/datatypes.py
https://github.com/bprinty/gems/blob/3ff76407af0e71621dada744cd964611e998699c/gems/datatypes.py#L648-L658
def json(self): """ Return JSON representation of object. """ data = {} for item in self._data: if isinstance(self._data[item], filetree): data[item] = self._data[item].json() else: data[item] = self._data[item] return data
[ "def", "json", "(", "self", ")", ":", "data", "=", "{", "}", "for", "item", "in", "self", ".", "_data", ":", "if", "isinstance", "(", "self", ".", "_data", "[", "item", "]", ",", "filetree", ")", ":", "data", "[", "item", "]", "=", "self", ".", "_data", "[", "item", "]", ".", "json", "(", ")", "else", ":", "data", "[", "item", "]", "=", "self", ".", "_data", "[", "item", "]", "return", "data" ]
Return JSON representation of object.
[ "Return", "JSON", "representation", "of", "object", "." ]
python
valid
28.818182
sbg/sevenbridges-python
sevenbridges/transfer/upload.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/transfer/upload.py#L142-L170
def submit(self): """ Partitions the file into chunks and submits them into group of 4 for upload on the api upload pool. :return: Futures """ futures = [] while self.submitted < 4 and not self.done(): part = self.parts.pop(0) part_number = part['part'] part_read_offset = part['offset'] part_read_limit = part['limit'] self.fp.seek(part_read_offset) part_data = self.fp.read(part_read_limit - part_read_offset) futures.append( self.pool.submit( _upload_part, self.api, self.session, self._URL['upload_part'], self.upload_id, part_number, part_data, self.retry, self.timeout ) ) self.submitted += 1 self.total_submitted += 1 return futures
[ "def", "submit", "(", "self", ")", ":", "futures", "=", "[", "]", "while", "self", ".", "submitted", "<", "4", "and", "not", "self", ".", "done", "(", ")", ":", "part", "=", "self", ".", "parts", ".", "pop", "(", "0", ")", "part_number", "=", "part", "[", "'part'", "]", "part_read_offset", "=", "part", "[", "'offset'", "]", "part_read_limit", "=", "part", "[", "'limit'", "]", "self", ".", "fp", ".", "seek", "(", "part_read_offset", ")", "part_data", "=", "self", ".", "fp", ".", "read", "(", "part_read_limit", "-", "part_read_offset", ")", "futures", ".", "append", "(", "self", ".", "pool", ".", "submit", "(", "_upload_part", ",", "self", ".", "api", ",", "self", ".", "session", ",", "self", ".", "_URL", "[", "'upload_part'", "]", ",", "self", ".", "upload_id", ",", "part_number", ",", "part_data", ",", "self", ".", "retry", ",", "self", ".", "timeout", ")", ")", "self", ".", "submitted", "+=", "1", "self", ".", "total_submitted", "+=", "1", "return", "futures" ]
Partitions the file into chunks and submits them into group of 4 for upload on the api upload pool. :return: Futures
[ "Partitions", "the", "file", "into", "chunks", "and", "submits", "them", "into", "group", "of", "4", "for", "upload", "on", "the", "api", "upload", "pool", ".", ":", "return", ":", "Futures" ]
python
train
30.758621
idlesign/steampak
steampak/libsteam/resources/apps.py
https://github.com/idlesign/steampak/blob/cb3f2c737e272b0360802d947e388df7e34f50f3/steampak/libsteam/resources/apps.py#L73-L90
def install_dir(self): """Returns application installation path. .. note:: If fails this falls back to a restricted interface, which can only be used by approved apps. :rtype: str """ max_len = 500 directory = self._get_str(self._iface.get_install_dir, [self.app_id], max_len=max_len) if not directory: # Fallback to restricted interface (can only be used by approved apps). directory = self._get_str(self._iface_list.get_install_dir, [self.app_id], max_len=max_len) return directory
[ "def", "install_dir", "(", "self", ")", ":", "max_len", "=", "500", "directory", "=", "self", ".", "_get_str", "(", "self", ".", "_iface", ".", "get_install_dir", ",", "[", "self", ".", "app_id", "]", ",", "max_len", "=", "max_len", ")", "if", "not", "directory", ":", "# Fallback to restricted interface (can only be used by approved apps).", "directory", "=", "self", ".", "_get_str", "(", "self", ".", "_iface_list", ".", "get_install_dir", ",", "[", "self", ".", "app_id", "]", ",", "max_len", "=", "max_len", ")", "return", "directory" ]
Returns application installation path. .. note:: If fails this falls back to a restricted interface, which can only be used by approved apps. :rtype: str
[ "Returns", "application", "installation", "path", "." ]
python
train
31.777778
ejeschke/ginga
ginga/misc/Task.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L999-L1048
def startall(self, wait=False, **kwdargs): """Start all of the threads in the thread pool. If _wait_ is True then don't return until all threads are up and running. Any extra keyword arguments are passed to the worker thread constructor. """ self.logger.debug("startall called") with self.regcond: while self.status != 'down': if self.status in ('start', 'up') or self.ev_quit.is_set(): # For now, abandon additional request to start self.logger.error("ignoring duplicate request to start thread pool") return self.logger.debug("waiting for threads: count=%d" % self.runningcount) self.regcond.wait() #assert(self.status == 'down') if self.ev_quit.is_set(): return self.runningcount = 0 self.status = 'start' self.workers = [] if wait: tpool = self else: tpool = None # Start all worker threads self.logger.debug("starting threads in thread pool") for i in range(self.numthreads): t = self.workerClass(self.queue, logger=self.logger, ev_quit=self.ev_quit, tpool=tpool, **kwdargs) self.workers.append(t) t.start() # if started with wait=True, then expect that threads will register # themselves and last one up will set status to "up" if wait: # Threads are on the way up. Wait until last one starts. while self.status != 'up' and not self.ev_quit.is_set(): self.logger.debug("waiting for threads: count=%d" % self.runningcount) self.regcond.wait() else: # otherwise, we just assume the pool is up self.status = 'up' self.logger.debug("startall done")
[ "def", "startall", "(", "self", ",", "wait", "=", "False", ",", "*", "*", "kwdargs", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"startall called\"", ")", "with", "self", ".", "regcond", ":", "while", "self", ".", "status", "!=", "'down'", ":", "if", "self", ".", "status", "in", "(", "'start'", ",", "'up'", ")", "or", "self", ".", "ev_quit", ".", "is_set", "(", ")", ":", "# For now, abandon additional request to start", "self", ".", "logger", ".", "error", "(", "\"ignoring duplicate request to start thread pool\"", ")", "return", "self", ".", "logger", ".", "debug", "(", "\"waiting for threads: count=%d\"", "%", "self", ".", "runningcount", ")", "self", ".", "regcond", ".", "wait", "(", ")", "#assert(self.status == 'down')", "if", "self", ".", "ev_quit", ".", "is_set", "(", ")", ":", "return", "self", ".", "runningcount", "=", "0", "self", ".", "status", "=", "'start'", "self", ".", "workers", "=", "[", "]", "if", "wait", ":", "tpool", "=", "self", "else", ":", "tpool", "=", "None", "# Start all worker threads", "self", ".", "logger", ".", "debug", "(", "\"starting threads in thread pool\"", ")", "for", "i", "in", "range", "(", "self", ".", "numthreads", ")", ":", "t", "=", "self", ".", "workerClass", "(", "self", ".", "queue", ",", "logger", "=", "self", ".", "logger", ",", "ev_quit", "=", "self", ".", "ev_quit", ",", "tpool", "=", "tpool", ",", "*", "*", "kwdargs", ")", "self", ".", "workers", ".", "append", "(", "t", ")", "t", ".", "start", "(", ")", "# if started with wait=True, then expect that threads will register", "# themselves and last one up will set status to \"up\"", "if", "wait", ":", "# Threads are on the way up. Wait until last one starts.", "while", "self", ".", "status", "!=", "'up'", "and", "not", "self", ".", "ev_quit", ".", "is_set", "(", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"waiting for threads: count=%d\"", "%", "self", ".", "runningcount", ")", "self", ".", "regcond", ".", "wait", "(", ")", "else", ":", "# otherwise, we just assume the pool is up", "self", ".", "status", "=", "'up'", "self", ".", "logger", ".", "debug", "(", "\"startall done\"", ")" ]
Start all of the threads in the thread pool. If _wait_ is True then don't return until all threads are up and running. Any extra keyword arguments are passed to the worker thread constructor.
[ "Start", "all", "of", "the", "threads", "in", "the", "thread", "pool", ".", "If", "_wait_", "is", "True", "then", "don", "t", "return", "until", "all", "threads", "are", "up", "and", "running", ".", "Any", "extra", "keyword", "arguments", "are", "passed", "to", "the", "worker", "thread", "constructor", "." ]
python
train
42.06
Yubico/python-pyhsm
pyhsm/util.py
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/util.py#L103-L110
def input_validate_aead(aead, name='aead', expected_len=None, max_aead_len = pyhsm.defines.YSM_AEAD_MAX_SIZE): """ Input validation for YHSM_GeneratedAEAD or string. """ if isinstance(aead, pyhsm.aead_cmd.YHSM_GeneratedAEAD): aead = aead.data if expected_len != None: return input_validate_str(aead, name, exact_len = expected_len) else: return input_validate_str(aead, name, max_len=max_aead_len)
[ "def", "input_validate_aead", "(", "aead", ",", "name", "=", "'aead'", ",", "expected_len", "=", "None", ",", "max_aead_len", "=", "pyhsm", ".", "defines", ".", "YSM_AEAD_MAX_SIZE", ")", ":", "if", "isinstance", "(", "aead", ",", "pyhsm", ".", "aead_cmd", ".", "YHSM_GeneratedAEAD", ")", ":", "aead", "=", "aead", ".", "data", "if", "expected_len", "!=", "None", ":", "return", "input_validate_str", "(", "aead", ",", "name", ",", "exact_len", "=", "expected_len", ")", "else", ":", "return", "input_validate_str", "(", "aead", ",", "name", ",", "max_len", "=", "max_aead_len", ")" ]
Input validation for YHSM_GeneratedAEAD or string.
[ "Input", "validation", "for", "YHSM_GeneratedAEAD", "or", "string", "." ]
python
train
53.75
zengbin93/zb
zb/algorithms/nn_one_hidden.py
https://github.com/zengbin93/zb/blob/ccdb384a0b5801b459933220efcb71972c2b89a7/zb/algorithms/nn_one_hidden.py#L107-L131
def compute_cost(A2, Y): """ Computes the cross-entropy cost given in equation (13) Arguments: A2 -- The sigmoid output of the second activation, of shape (1, number of examples) Y -- "true" labels vector of shape (1, number of examples) parameters -- python dictionary containing your parameters W1, b1, W2 and b2 Returns: cost -- cross-entropy cost given equation (13) """ m = Y.shape[1] # number of example # Compute the cross-entropy cost logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y)) cost = -np.sum(logprobs) / m cost = np.squeeze(cost) # makes sure cost is the dimension we expect. # E.g., turns [[17]] into 17 assert (isinstance(cost, float)) return cost
[ "def", "compute_cost", "(", "A2", ",", "Y", ")", ":", "m", "=", "Y", ".", "shape", "[", "1", "]", "# number of example", "# Compute the cross-entropy cost", "logprobs", "=", "np", ".", "multiply", "(", "np", ".", "log", "(", "A2", ")", ",", "Y", ")", "+", "np", ".", "multiply", "(", "np", ".", "log", "(", "1", "-", "A2", ")", ",", "(", "1", "-", "Y", ")", ")", "cost", "=", "-", "np", ".", "sum", "(", "logprobs", ")", "/", "m", "cost", "=", "np", ".", "squeeze", "(", "cost", ")", "# makes sure cost is the dimension we expect.", "# E.g., turns [[17]] into 17", "assert", "(", "isinstance", "(", "cost", ",", "float", ")", ")", "return", "cost" ]
Computes the cross-entropy cost given in equation (13) Arguments: A2 -- The sigmoid output of the second activation, of shape (1, number of examples) Y -- "true" labels vector of shape (1, number of examples) parameters -- python dictionary containing your parameters W1, b1, W2 and b2 Returns: cost -- cross-entropy cost given equation (13)
[ "Computes", "the", "cross", "-", "entropy", "cost", "given", "in", "equation", "(", "13", ")" ]
python
train
29.8
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L215-L220
def jsonarrlen(self, name, path=Path.rootPath()): """ Returns the length of the array JSON value under ``path`` at key ``name`` """ return self.execute_command('JSON.ARRLEN', name, str_path(path))
[ "def", "jsonarrlen", "(", "self", ",", "name", ",", "path", "=", "Path", ".", "rootPath", "(", ")", ")", ":", "return", "self", ".", "execute_command", "(", "'JSON.ARRLEN'", ",", "name", ",", "str_path", "(", "path", ")", ")" ]
Returns the length of the array JSON value under ``path`` at key ``name``
[ "Returns", "the", "length", "of", "the", "array", "JSON", "value", "under", "path", "at", "key", "name" ]
python
train
38.5
opencobra/cobrapy
cobra/io/sbml.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/io/sbml.py#L843-L1103
def _model_to_sbml(cobra_model, f_replace=None, units=True): """Convert Cobra model to SBMLDocument. Parameters ---------- cobra_model : cobra.core.Model Cobra model instance f_replace : dict of replacement functions Replacement to apply on identifiers. units : boolean Should the FLUX_UNITS be written in the SBMLDocument. Returns ------- libsbml.SBMLDocument """ if f_replace is None: f_replace = {} sbml_ns = libsbml.SBMLNamespaces(3, 1) # SBML L3V1 sbml_ns.addPackageNamespace("fbc", 2) # fbc-v2 doc = libsbml.SBMLDocument(sbml_ns) # noqa: E501 type: libsbml.SBMLDocument doc.setPackageRequired("fbc", False) doc.setSBOTerm(SBO_FBA_FRAMEWORK) model = doc.createModel() # type: libsbml.Model model_fbc = model.getPlugin("fbc") # type: libsbml.FbcModelPlugin model_fbc.setStrict(True) if cobra_model.id is not None: model.setId(cobra_model.id) model.setMetaId("meta_" + cobra_model.id) else: model.setMetaId("meta_model") if cobra_model.name is not None: model.setName(cobra_model.name) _sbase_annotations(model, cobra_model.annotation) # Meta information (ModelHistory) if hasattr(cobra_model, "_sbml"): meta = cobra_model._sbml if "annotation" in meta: _sbase_annotations(doc, meta["annotation"]) if "notes" in meta: _sbase_notes_dict(doc, meta["notes"]) history = libsbml.ModelHistory() # type: libsbml.ModelHistory if "created" in meta and meta["created"]: history.setCreatedDate(meta["created"]) else: time = datetime.datetime.now() timestr = time.strftime('%Y-%m-%dT%H:%M:%S') date = libsbml.Date(timestr) _check(history.setCreatedDate(date), 'set creation date') _check(history.setModifiedDate(date), 'set modified date') if "creators" in meta: for cobra_creator in meta["creators"]: creator = libsbml.ModelCreator() # noqa: E501 type: libsbml.ModelCreator if cobra_creator.get("familyName", None): creator.setFamilyName(cobra_creator["familyName"]) if cobra_creator.get("givenName", None): creator.setGivenName(cobra_creator["givenName"]) if cobra_creator.get("organisation", None): creator.setOrganisation(cobra_creator["organisation"]) if cobra_creator.get("email", None): creator.setEmail(cobra_creator["email"]) _check(history.addCreator(creator), "adding creator to ModelHistory.") _check(model.setModelHistory(history), 'set model history') # Units if units: flux_udef = model.createUnitDefinition() # noqa: E501 type: libsbml.UnitDefinition flux_udef.setId(UNITS_FLUX[0]) for u in UNITS_FLUX[1]: unit = flux_udef.createUnit() # type: libsbml.Unit unit.setKind(u.kind) unit.setExponent(u.exponent) unit.setScale(u.scale) unit.setMultiplier(u.multiplier) # minimum and maximum value from model if len(cobra_model.reactions) > 0: min_value = min(cobra_model.reactions.list_attr("lower_bound")) max_value = max(cobra_model.reactions.list_attr("upper_bound")) else: min_value = config.lower_bound max_value = config.upper_bound _create_parameter(model, pid=LOWER_BOUND_ID, value=min_value, sbo=SBO_DEFAULT_FLUX_BOUND) _create_parameter(model, pid=UPPER_BOUND_ID, value=max_value, sbo=SBO_DEFAULT_FLUX_BOUND) _create_parameter(model, pid=ZERO_BOUND_ID, value=0, sbo=SBO_DEFAULT_FLUX_BOUND) _create_parameter(model, pid=BOUND_MINUS_INF, value=-float("Inf"), sbo=SBO_FLUX_BOUND) _create_parameter(model, pid=BOUND_PLUS_INF, value=float("Inf"), sbo=SBO_FLUX_BOUND) # Compartments # FIXME: use first class compartment model (and write notes & annotations) # (https://github.com/opencobra/cobrapy/issues/811) for cid, name in iteritems(cobra_model.compartments): compartment = model.createCompartment() # type: libsbml.Compartment compartment.setId(cid) compartment.setName(name) compartment.setConstant(True) # FIXME: write annotations and notes # _sbase_notes(c, com.notes) # _sbase_annotations(c, com.annotation) # Species for metabolite in cobra_model.metabolites: specie = model.createSpecies() # type: libsbml.Species mid = metabolite.id if f_replace and F_SPECIE_REV in f_replace: mid = f_replace[F_SPECIE_REV](mid) specie.setId(mid) specie.setConstant(False) specie.setBoundaryCondition(False) specie.setHasOnlySubstanceUnits(False) specie.setName(metabolite.name) specie.setCompartment(metabolite.compartment) s_fbc = specie.getPlugin("fbc") # type: libsbml.FbcSpeciesPlugin if metabolite.charge is not None: s_fbc.setCharge(metabolite.charge) if metabolite.formula is not None: s_fbc.setChemicalFormula(metabolite.formula) _sbase_annotations(specie, metabolite.annotation) _sbase_notes_dict(specie, metabolite.notes) # Genes for cobra_gene in cobra_model.genes: gp = model_fbc.createGeneProduct() # type: libsbml.GeneProduct gid = cobra_gene.id if f_replace and F_GENE_REV in f_replace: gid = f_replace[F_GENE_REV](gid) gp.setId(gid) gname = cobra_gene.name if gname is None or len(gname) == 0: gname = gid gp.setName(gname) gp.setLabel(gid) _sbase_annotations(gp, cobra_gene.annotation) _sbase_notes_dict(gp, cobra_gene.notes) # Objective objective = model_fbc.createObjective() # type: libsbml.Objective objective.setId("obj") objective.setType(SHORT_LONG_DIRECTION[cobra_model.objective.direction]) model_fbc.setActiveObjectiveId("obj") # Reactions reaction_coefficients = linear_reaction_coefficients(cobra_model) for cobra_reaction in cobra_model.reactions: rid = cobra_reaction.id if f_replace and F_REACTION_REV in f_replace: rid = f_replace[F_REACTION_REV](rid) reaction = model.createReaction() # type: libsbml.Reaction reaction.setId(rid) reaction.setName(cobra_reaction.name) reaction.setFast(False) reaction.setReversible((cobra_reaction.lower_bound < 0)) _sbase_annotations(reaction, cobra_reaction.annotation) _sbase_notes_dict(reaction, cobra_reaction.notes) # stoichiometry for metabolite, stoichiometry in iteritems(cobra_reaction._metabolites): # noqa: E501 sid = metabolite.id if f_replace and F_SPECIE_REV in f_replace: sid = f_replace[F_SPECIE_REV](sid) if stoichiometry < 0: sref = reaction.createReactant() # noqa: E501 type: libsbml.SpeciesReference sref.setSpecies(sid) sref.setStoichiometry(-stoichiometry) sref.setConstant(True) else: sref = reaction.createProduct() # noqa: E501 type: libsbml.SpeciesReference sref.setSpecies(sid) sref.setStoichiometry(stoichiometry) sref.setConstant(True) # bounds r_fbc = reaction.getPlugin("fbc") # type: libsbml.FbcReactionPlugin r_fbc.setLowerFluxBound(_create_bound(model, cobra_reaction, "lower_bound", f_replace=f_replace, units=units, flux_udef=flux_udef)) r_fbc.setUpperFluxBound(_create_bound(model, cobra_reaction, "upper_bound", f_replace=f_replace, units=units, flux_udef=flux_udef)) # GPR gpr = cobra_reaction.gene_reaction_rule if gpr is not None and len(gpr) > 0: # replace ids in string if f_replace and F_GENE_REV in f_replace: gpr = gpr.replace('(', '( ') gpr = gpr.replace(')', ' )') tokens = gpr.split(' ') for k in range(len(tokens)): if tokens[k] not in [' ', 'and', 'or', '(', ')']: tokens[k] = f_replace[F_GENE_REV](tokens[k]) gpr_new = " ".join(tokens) gpa = r_fbc.createGeneProductAssociation() # noqa: E501 type: libsbml.GeneProductAssociation gpa.setAssociation(gpr_new) # objective coefficients if reaction_coefficients.get(cobra_reaction, 0) != 0: flux_obj = objective.createFluxObjective() # noqa: E501 type: libsbml.FluxObjective flux_obj.setReaction(rid) flux_obj.setCoefficient(cobra_reaction.objective_coefficient) # write groups if len(cobra_model.groups) > 0: doc.enablePackage( "http://www.sbml.org/sbml/level3/version1/groups/version1", "groups", True) doc.setPackageRequired("groups", False) model_group = model.getPlugin("groups") # noqa: E501 type: libsbml.GroupsModelPlugin for cobra_group in cobra_model.groups: group = model_group.createGroup() # type: libsbml.Group group.setId(cobra_group.id) group.setName(cobra_group.name) group.setKind(cobra_group.kind) _sbase_notes_dict(group, cobra_group.notes) _sbase_annotations(group, cobra_group.annotation) for cobra_member in cobra_group.members: member = group.createMember() # type: libsbml.Member mid = cobra_member.id m_type = str(type(cobra_member)) # id replacements if "Reaction" in m_type: if f_replace and F_REACTION_REV in f_replace: mid = f_replace[F_REACTION_REV](mid) if "Metabolite" in m_type: if f_replace and F_SPECIE_REV in f_replace: mid = f_replace[F_SPECIE_REV](mid) if "Gene" in m_type: if f_replace and F_GENE_REV in f_replace: mid = f_replace[F_GENE_REV](mid) member.setIdRef(mid) if cobra_member.name and len(cobra_member.name) > 0: member.setName(cobra_member.name) return doc
[ "def", "_model_to_sbml", "(", "cobra_model", ",", "f_replace", "=", "None", ",", "units", "=", "True", ")", ":", "if", "f_replace", "is", "None", ":", "f_replace", "=", "{", "}", "sbml_ns", "=", "libsbml", ".", "SBMLNamespaces", "(", "3", ",", "1", ")", "# SBML L3V1", "sbml_ns", ".", "addPackageNamespace", "(", "\"fbc\"", ",", "2", ")", "# fbc-v2", "doc", "=", "libsbml", ".", "SBMLDocument", "(", "sbml_ns", ")", "# noqa: E501 type: libsbml.SBMLDocument", "doc", ".", "setPackageRequired", "(", "\"fbc\"", ",", "False", ")", "doc", ".", "setSBOTerm", "(", "SBO_FBA_FRAMEWORK", ")", "model", "=", "doc", ".", "createModel", "(", ")", "# type: libsbml.Model", "model_fbc", "=", "model", ".", "getPlugin", "(", "\"fbc\"", ")", "# type: libsbml.FbcModelPlugin", "model_fbc", ".", "setStrict", "(", "True", ")", "if", "cobra_model", ".", "id", "is", "not", "None", ":", "model", ".", "setId", "(", "cobra_model", ".", "id", ")", "model", ".", "setMetaId", "(", "\"meta_\"", "+", "cobra_model", ".", "id", ")", "else", ":", "model", ".", "setMetaId", "(", "\"meta_model\"", ")", "if", "cobra_model", ".", "name", "is", "not", "None", ":", "model", ".", "setName", "(", "cobra_model", ".", "name", ")", "_sbase_annotations", "(", "model", ",", "cobra_model", ".", "annotation", ")", "# Meta information (ModelHistory)", "if", "hasattr", "(", "cobra_model", ",", "\"_sbml\"", ")", ":", "meta", "=", "cobra_model", ".", "_sbml", "if", "\"annotation\"", "in", "meta", ":", "_sbase_annotations", "(", "doc", ",", "meta", "[", "\"annotation\"", "]", ")", "if", "\"notes\"", "in", "meta", ":", "_sbase_notes_dict", "(", "doc", ",", "meta", "[", "\"notes\"", "]", ")", "history", "=", "libsbml", ".", "ModelHistory", "(", ")", "# type: libsbml.ModelHistory", "if", "\"created\"", "in", "meta", "and", "meta", "[", "\"created\"", "]", ":", "history", ".", "setCreatedDate", "(", "meta", "[", "\"created\"", "]", ")", "else", ":", "time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "timestr", "=", "time", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "date", "=", "libsbml", ".", "Date", "(", "timestr", ")", "_check", "(", "history", ".", "setCreatedDate", "(", "date", ")", ",", "'set creation date'", ")", "_check", "(", "history", ".", "setModifiedDate", "(", "date", ")", ",", "'set modified date'", ")", "if", "\"creators\"", "in", "meta", ":", "for", "cobra_creator", "in", "meta", "[", "\"creators\"", "]", ":", "creator", "=", "libsbml", ".", "ModelCreator", "(", ")", "# noqa: E501 type: libsbml.ModelCreator", "if", "cobra_creator", ".", "get", "(", "\"familyName\"", ",", "None", ")", ":", "creator", ".", "setFamilyName", "(", "cobra_creator", "[", "\"familyName\"", "]", ")", "if", "cobra_creator", ".", "get", "(", "\"givenName\"", ",", "None", ")", ":", "creator", ".", "setGivenName", "(", "cobra_creator", "[", "\"givenName\"", "]", ")", "if", "cobra_creator", ".", "get", "(", "\"organisation\"", ",", "None", ")", ":", "creator", ".", "setOrganisation", "(", "cobra_creator", "[", "\"organisation\"", "]", ")", "if", "cobra_creator", ".", "get", "(", "\"email\"", ",", "None", ")", ":", "creator", ".", "setEmail", "(", "cobra_creator", "[", "\"email\"", "]", ")", "_check", "(", "history", ".", "addCreator", "(", "creator", ")", ",", "\"adding creator to ModelHistory.\"", ")", "_check", "(", "model", ".", "setModelHistory", "(", "history", ")", ",", "'set model history'", ")", "# Units", "if", "units", ":", "flux_udef", "=", "model", ".", "createUnitDefinition", "(", ")", "# noqa: E501 type: libsbml.UnitDefinition", "flux_udef", ".", "setId", "(", "UNITS_FLUX", "[", "0", "]", ")", "for", "u", "in", "UNITS_FLUX", "[", "1", "]", ":", "unit", "=", "flux_udef", ".", "createUnit", "(", ")", "# type: libsbml.Unit", "unit", ".", "setKind", "(", "u", ".", "kind", ")", "unit", ".", "setExponent", "(", "u", ".", "exponent", ")", "unit", ".", "setScale", "(", "u", ".", "scale", ")", "unit", ".", "setMultiplier", "(", "u", ".", "multiplier", ")", "# minimum and maximum value from model", "if", "len", "(", "cobra_model", ".", "reactions", ")", ">", "0", ":", "min_value", "=", "min", "(", "cobra_model", ".", "reactions", ".", "list_attr", "(", "\"lower_bound\"", ")", ")", "max_value", "=", "max", "(", "cobra_model", ".", "reactions", ".", "list_attr", "(", "\"upper_bound\"", ")", ")", "else", ":", "min_value", "=", "config", ".", "lower_bound", "max_value", "=", "config", ".", "upper_bound", "_create_parameter", "(", "model", ",", "pid", "=", "LOWER_BOUND_ID", ",", "value", "=", "min_value", ",", "sbo", "=", "SBO_DEFAULT_FLUX_BOUND", ")", "_create_parameter", "(", "model", ",", "pid", "=", "UPPER_BOUND_ID", ",", "value", "=", "max_value", ",", "sbo", "=", "SBO_DEFAULT_FLUX_BOUND", ")", "_create_parameter", "(", "model", ",", "pid", "=", "ZERO_BOUND_ID", ",", "value", "=", "0", ",", "sbo", "=", "SBO_DEFAULT_FLUX_BOUND", ")", "_create_parameter", "(", "model", ",", "pid", "=", "BOUND_MINUS_INF", ",", "value", "=", "-", "float", "(", "\"Inf\"", ")", ",", "sbo", "=", "SBO_FLUX_BOUND", ")", "_create_parameter", "(", "model", ",", "pid", "=", "BOUND_PLUS_INF", ",", "value", "=", "float", "(", "\"Inf\"", ")", ",", "sbo", "=", "SBO_FLUX_BOUND", ")", "# Compartments", "# FIXME: use first class compartment model (and write notes & annotations)", "# (https://github.com/opencobra/cobrapy/issues/811)", "for", "cid", ",", "name", "in", "iteritems", "(", "cobra_model", ".", "compartments", ")", ":", "compartment", "=", "model", ".", "createCompartment", "(", ")", "# type: libsbml.Compartment", "compartment", ".", "setId", "(", "cid", ")", "compartment", ".", "setName", "(", "name", ")", "compartment", ".", "setConstant", "(", "True", ")", "# FIXME: write annotations and notes", "# _sbase_notes(c, com.notes)", "# _sbase_annotations(c, com.annotation)", "# Species", "for", "metabolite", "in", "cobra_model", ".", "metabolites", ":", "specie", "=", "model", ".", "createSpecies", "(", ")", "# type: libsbml.Species", "mid", "=", "metabolite", ".", "id", "if", "f_replace", "and", "F_SPECIE_REV", "in", "f_replace", ":", "mid", "=", "f_replace", "[", "F_SPECIE_REV", "]", "(", "mid", ")", "specie", ".", "setId", "(", "mid", ")", "specie", ".", "setConstant", "(", "False", ")", "specie", ".", "setBoundaryCondition", "(", "False", ")", "specie", ".", "setHasOnlySubstanceUnits", "(", "False", ")", "specie", ".", "setName", "(", "metabolite", ".", "name", ")", "specie", ".", "setCompartment", "(", "metabolite", ".", "compartment", ")", "s_fbc", "=", "specie", ".", "getPlugin", "(", "\"fbc\"", ")", "# type: libsbml.FbcSpeciesPlugin", "if", "metabolite", ".", "charge", "is", "not", "None", ":", "s_fbc", ".", "setCharge", "(", "metabolite", ".", "charge", ")", "if", "metabolite", ".", "formula", "is", "not", "None", ":", "s_fbc", ".", "setChemicalFormula", "(", "metabolite", ".", "formula", ")", "_sbase_annotations", "(", "specie", ",", "metabolite", ".", "annotation", ")", "_sbase_notes_dict", "(", "specie", ",", "metabolite", ".", "notes", ")", "# Genes", "for", "cobra_gene", "in", "cobra_model", ".", "genes", ":", "gp", "=", "model_fbc", ".", "createGeneProduct", "(", ")", "# type: libsbml.GeneProduct", "gid", "=", "cobra_gene", ".", "id", "if", "f_replace", "and", "F_GENE_REV", "in", "f_replace", ":", "gid", "=", "f_replace", "[", "F_GENE_REV", "]", "(", "gid", ")", "gp", ".", "setId", "(", "gid", ")", "gname", "=", "cobra_gene", ".", "name", "if", "gname", "is", "None", "or", "len", "(", "gname", ")", "==", "0", ":", "gname", "=", "gid", "gp", ".", "setName", "(", "gname", ")", "gp", ".", "setLabel", "(", "gid", ")", "_sbase_annotations", "(", "gp", ",", "cobra_gene", ".", "annotation", ")", "_sbase_notes_dict", "(", "gp", ",", "cobra_gene", ".", "notes", ")", "# Objective", "objective", "=", "model_fbc", ".", "createObjective", "(", ")", "# type: libsbml.Objective", "objective", ".", "setId", "(", "\"obj\"", ")", "objective", ".", "setType", "(", "SHORT_LONG_DIRECTION", "[", "cobra_model", ".", "objective", ".", "direction", "]", ")", "model_fbc", ".", "setActiveObjectiveId", "(", "\"obj\"", ")", "# Reactions", "reaction_coefficients", "=", "linear_reaction_coefficients", "(", "cobra_model", ")", "for", "cobra_reaction", "in", "cobra_model", ".", "reactions", ":", "rid", "=", "cobra_reaction", ".", "id", "if", "f_replace", "and", "F_REACTION_REV", "in", "f_replace", ":", "rid", "=", "f_replace", "[", "F_REACTION_REV", "]", "(", "rid", ")", "reaction", "=", "model", ".", "createReaction", "(", ")", "# type: libsbml.Reaction", "reaction", ".", "setId", "(", "rid", ")", "reaction", ".", "setName", "(", "cobra_reaction", ".", "name", ")", "reaction", ".", "setFast", "(", "False", ")", "reaction", ".", "setReversible", "(", "(", "cobra_reaction", ".", "lower_bound", "<", "0", ")", ")", "_sbase_annotations", "(", "reaction", ",", "cobra_reaction", ".", "annotation", ")", "_sbase_notes_dict", "(", "reaction", ",", "cobra_reaction", ".", "notes", ")", "# stoichiometry", "for", "metabolite", ",", "stoichiometry", "in", "iteritems", "(", "cobra_reaction", ".", "_metabolites", ")", ":", "# noqa: E501", "sid", "=", "metabolite", ".", "id", "if", "f_replace", "and", "F_SPECIE_REV", "in", "f_replace", ":", "sid", "=", "f_replace", "[", "F_SPECIE_REV", "]", "(", "sid", ")", "if", "stoichiometry", "<", "0", ":", "sref", "=", "reaction", ".", "createReactant", "(", ")", "# noqa: E501 type: libsbml.SpeciesReference", "sref", ".", "setSpecies", "(", "sid", ")", "sref", ".", "setStoichiometry", "(", "-", "stoichiometry", ")", "sref", ".", "setConstant", "(", "True", ")", "else", ":", "sref", "=", "reaction", ".", "createProduct", "(", ")", "# noqa: E501 type: libsbml.SpeciesReference", "sref", ".", "setSpecies", "(", "sid", ")", "sref", ".", "setStoichiometry", "(", "stoichiometry", ")", "sref", ".", "setConstant", "(", "True", ")", "# bounds", "r_fbc", "=", "reaction", ".", "getPlugin", "(", "\"fbc\"", ")", "# type: libsbml.FbcReactionPlugin", "r_fbc", ".", "setLowerFluxBound", "(", "_create_bound", "(", "model", ",", "cobra_reaction", ",", "\"lower_bound\"", ",", "f_replace", "=", "f_replace", ",", "units", "=", "units", ",", "flux_udef", "=", "flux_udef", ")", ")", "r_fbc", ".", "setUpperFluxBound", "(", "_create_bound", "(", "model", ",", "cobra_reaction", ",", "\"upper_bound\"", ",", "f_replace", "=", "f_replace", ",", "units", "=", "units", ",", "flux_udef", "=", "flux_udef", ")", ")", "# GPR", "gpr", "=", "cobra_reaction", ".", "gene_reaction_rule", "if", "gpr", "is", "not", "None", "and", "len", "(", "gpr", ")", ">", "0", ":", "# replace ids in string", "if", "f_replace", "and", "F_GENE_REV", "in", "f_replace", ":", "gpr", "=", "gpr", ".", "replace", "(", "'('", ",", "'( '", ")", "gpr", "=", "gpr", ".", "replace", "(", "')'", ",", "' )'", ")", "tokens", "=", "gpr", ".", "split", "(", "' '", ")", "for", "k", "in", "range", "(", "len", "(", "tokens", ")", ")", ":", "if", "tokens", "[", "k", "]", "not", "in", "[", "' '", ",", "'and'", ",", "'or'", ",", "'('", ",", "')'", "]", ":", "tokens", "[", "k", "]", "=", "f_replace", "[", "F_GENE_REV", "]", "(", "tokens", "[", "k", "]", ")", "gpr_new", "=", "\" \"", ".", "join", "(", "tokens", ")", "gpa", "=", "r_fbc", ".", "createGeneProductAssociation", "(", ")", "# noqa: E501 type: libsbml.GeneProductAssociation", "gpa", ".", "setAssociation", "(", "gpr_new", ")", "# objective coefficients", "if", "reaction_coefficients", ".", "get", "(", "cobra_reaction", ",", "0", ")", "!=", "0", ":", "flux_obj", "=", "objective", ".", "createFluxObjective", "(", ")", "# noqa: E501 type: libsbml.FluxObjective", "flux_obj", ".", "setReaction", "(", "rid", ")", "flux_obj", ".", "setCoefficient", "(", "cobra_reaction", ".", "objective_coefficient", ")", "# write groups", "if", "len", "(", "cobra_model", ".", "groups", ")", ">", "0", ":", "doc", ".", "enablePackage", "(", "\"http://www.sbml.org/sbml/level3/version1/groups/version1\"", ",", "\"groups\"", ",", "True", ")", "doc", ".", "setPackageRequired", "(", "\"groups\"", ",", "False", ")", "model_group", "=", "model", ".", "getPlugin", "(", "\"groups\"", ")", "# noqa: E501 type: libsbml.GroupsModelPlugin", "for", "cobra_group", "in", "cobra_model", ".", "groups", ":", "group", "=", "model_group", ".", "createGroup", "(", ")", "# type: libsbml.Group", "group", ".", "setId", "(", "cobra_group", ".", "id", ")", "group", ".", "setName", "(", "cobra_group", ".", "name", ")", "group", ".", "setKind", "(", "cobra_group", ".", "kind", ")", "_sbase_notes_dict", "(", "group", ",", "cobra_group", ".", "notes", ")", "_sbase_annotations", "(", "group", ",", "cobra_group", ".", "annotation", ")", "for", "cobra_member", "in", "cobra_group", ".", "members", ":", "member", "=", "group", ".", "createMember", "(", ")", "# type: libsbml.Member", "mid", "=", "cobra_member", ".", "id", "m_type", "=", "str", "(", "type", "(", "cobra_member", ")", ")", "# id replacements", "if", "\"Reaction\"", "in", "m_type", ":", "if", "f_replace", "and", "F_REACTION_REV", "in", "f_replace", ":", "mid", "=", "f_replace", "[", "F_REACTION_REV", "]", "(", "mid", ")", "if", "\"Metabolite\"", "in", "m_type", ":", "if", "f_replace", "and", "F_SPECIE_REV", "in", "f_replace", ":", "mid", "=", "f_replace", "[", "F_SPECIE_REV", "]", "(", "mid", ")", "if", "\"Gene\"", "in", "m_type", ":", "if", "f_replace", "and", "F_GENE_REV", "in", "f_replace", ":", "mid", "=", "f_replace", "[", "F_GENE_REV", "]", "(", "mid", ")", "member", ".", "setIdRef", "(", "mid", ")", "if", "cobra_member", ".", "name", "and", "len", "(", "cobra_member", ".", "name", ")", ">", "0", ":", "member", ".", "setName", "(", "cobra_member", ".", "name", ")", "return", "doc" ]
Convert Cobra model to SBMLDocument. Parameters ---------- cobra_model : cobra.core.Model Cobra model instance f_replace : dict of replacement functions Replacement to apply on identifiers. units : boolean Should the FLUX_UNITS be written in the SBMLDocument. Returns ------- libsbml.SBMLDocument
[ "Convert", "Cobra", "model", "to", "SBMLDocument", "." ]
python
valid
40.781609
spdx/tools-python
spdx/parsers/rdf.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/rdf.py#L606-L612
def p_file_comments_on_lics(self, f_term, predicate): """Sets file license comment.""" try: for _, _, comment in self.graph.triples((f_term, predicate, None)): self.builder.set_file_license_comment(self.doc, six.text_type(comment)) except CardinalityError: self.more_than_one_error('file comments on license')
[ "def", "p_file_comments_on_lics", "(", "self", ",", "f_term", ",", "predicate", ")", ":", "try", ":", "for", "_", ",", "_", ",", "comment", "in", "self", ".", "graph", ".", "triples", "(", "(", "f_term", ",", "predicate", ",", "None", ")", ")", ":", "self", ".", "builder", ".", "set_file_license_comment", "(", "self", ".", "doc", ",", "six", ".", "text_type", "(", "comment", ")", ")", "except", "CardinalityError", ":", "self", ".", "more_than_one_error", "(", "'file comments on license'", ")" ]
Sets file license comment.
[ "Sets", "file", "license", "comment", "." ]
python
valid
52.428571
yjzhang/uncurl_python
uncurl/lineage.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/lineage.py#L195-L213
def pseudotime(starting_node, edges, fitted_vals): """ Args: starting_node (int): index of the starting node edges (list): list of tuples (node1, node2) fitted_vals (array): output of lineage (2 x cells) Returns: A 1d array containing the pseudotime value of each cell. """ # TODO # 1. calculate a distance matrix... distances = np.array([[sum((x - y)**2) for x in fitted_vals.T] for y in fitted_vals.T]) # 2. start from the root node/cell, calculate distance along graph distance_dict = graph_distances(starting_node, edges, distances) output = [] for i in range(fitted_vals.shape[1]): output.append(distance_dict[i]) return np.array(output)
[ "def", "pseudotime", "(", "starting_node", ",", "edges", ",", "fitted_vals", ")", ":", "# TODO", "# 1. calculate a distance matrix...", "distances", "=", "np", ".", "array", "(", "[", "[", "sum", "(", "(", "x", "-", "y", ")", "**", "2", ")", "for", "x", "in", "fitted_vals", ".", "T", "]", "for", "y", "in", "fitted_vals", ".", "T", "]", ")", "# 2. start from the root node/cell, calculate distance along graph", "distance_dict", "=", "graph_distances", "(", "starting_node", ",", "edges", ",", "distances", ")", "output", "=", "[", "]", "for", "i", "in", "range", "(", "fitted_vals", ".", "shape", "[", "1", "]", ")", ":", "output", ".", "append", "(", "distance_dict", "[", "i", "]", ")", "return", "np", ".", "array", "(", "output", ")" ]
Args: starting_node (int): index of the starting node edges (list): list of tuples (node1, node2) fitted_vals (array): output of lineage (2 x cells) Returns: A 1d array containing the pseudotime value of each cell.
[ "Args", ":", "starting_node", "(", "int", ")", ":", "index", "of", "the", "starting", "node", "edges", "(", "list", ")", ":", "list", "of", "tuples", "(", "node1", "node2", ")", "fitted_vals", "(", "array", ")", ":", "output", "of", "lineage", "(", "2", "x", "cells", ")" ]
python
train
37.526316
googledatalab/pydatalab
google/datalab/contrib/mlworkbench/_local_predict.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/contrib/mlworkbench/_local_predict.py#L242-L297
def get_probs_for_labels(labels, prediction_results): """ Given ML Workbench prediction results, get probs of each label for each instance. The prediction results are like: [ {'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1}, {'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01}, ... ] Each instance is ordered by prob. But in some cases probs are needed for fixed order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the results of above is expected to be: [ [0.8, 0.1, 0.0], [0.01, 0.0, 0.9], ... ] Note that the sum of each instance may not be always 1. If model's top_n is set to none-zero, and is less than number of labels, then prediction results may not contain probs for all labels. Args: labels: a list of labels specifying the order of the labels. prediction_results: a pandas DataFrame containing prediction results, usually returned by get_prediction_results() call. Returns: A list of list of probs for each class. """ probs = [] if 'probability' in prediction_results: # 'probability' exists so top-n is set to none zero, and results are like # "predicted, predicted_2,...,probability,probability_2,... for i, r in prediction_results.iterrows(): probs_one = [0.0] * len(labels) for k, v in six.iteritems(r): if v in labels and k.startswith('predicted'): if k == 'predict': prob_name = 'probability' else: prob_name = 'probability' + k[9:] probs_one[labels.index(v)] = r[prob_name] probs.append(probs_one) return probs else: # 'probability' does not exist, so top-n is set to zero. Results are like # "predicted, class_name1, class_name2,... for i, r in prediction_results.iterrows(): probs_one = [0.0] * len(labels) for k, v in six.iteritems(r): if k in labels: probs_one[labels.index(k)] = v probs.append(probs_one) return probs
[ "def", "get_probs_for_labels", "(", "labels", ",", "prediction_results", ")", ":", "probs", "=", "[", "]", "if", "'probability'", "in", "prediction_results", ":", "# 'probability' exists so top-n is set to none zero, and results are like", "# \"predicted, predicted_2,...,probability,probability_2,...", "for", "i", ",", "r", "in", "prediction_results", ".", "iterrows", "(", ")", ":", "probs_one", "=", "[", "0.0", "]", "*", "len", "(", "labels", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "r", ")", ":", "if", "v", "in", "labels", "and", "k", ".", "startswith", "(", "'predicted'", ")", ":", "if", "k", "==", "'predict'", ":", "prob_name", "=", "'probability'", "else", ":", "prob_name", "=", "'probability'", "+", "k", "[", "9", ":", "]", "probs_one", "[", "labels", ".", "index", "(", "v", ")", "]", "=", "r", "[", "prob_name", "]", "probs", ".", "append", "(", "probs_one", ")", "return", "probs", "else", ":", "# 'probability' does not exist, so top-n is set to zero. Results are like", "# \"predicted, class_name1, class_name2,...", "for", "i", ",", "r", "in", "prediction_results", ".", "iterrows", "(", ")", ":", "probs_one", "=", "[", "0.0", "]", "*", "len", "(", "labels", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "r", ")", ":", "if", "k", "in", "labels", ":", "probs_one", "[", "labels", ".", "index", "(", "k", ")", "]", "=", "v", "probs", ".", "append", "(", "probs_one", ")", "return", "probs" ]
Given ML Workbench prediction results, get probs of each label for each instance. The prediction results are like: [ {'predicted': 'daisy', 'probability': 0.8, 'predicted_2': 'rose', 'probability_2': 0.1}, {'predicted': 'sunflower', 'probability': 0.9, 'predicted_2': 'daisy', 'probability_2': 0.01}, ... ] Each instance is ordered by prob. But in some cases probs are needed for fixed order of labels. For example, given labels = ['daisy', 'rose', 'sunflower'], the results of above is expected to be: [ [0.8, 0.1, 0.0], [0.01, 0.0, 0.9], ... ] Note that the sum of each instance may not be always 1. If model's top_n is set to none-zero, and is less than number of labels, then prediction results may not contain probs for all labels. Args: labels: a list of labels specifying the order of the labels. prediction_results: a pandas DataFrame containing prediction results, usually returned by get_prediction_results() call. Returns: A list of list of probs for each class.
[ "Given", "ML", "Workbench", "prediction", "results", "get", "probs", "of", "each", "label", "for", "each", "instance", "." ]
python
train
36.160714
cyrus-/cypy
cypy/cg.py
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/cg.py#L198-L205
def last_string(self): """The last entry in code_builder, or ``None`` if none so far.""" cb = self.code_builder len_cb = len(cb) if len_cb > 0: return cb[len_cb - 1] else: return None
[ "def", "last_string", "(", "self", ")", ":", "cb", "=", "self", ".", "code_builder", "len_cb", "=", "len", "(", "cb", ")", "if", "len_cb", ">", "0", ":", "return", "cb", "[", "len_cb", "-", "1", "]", "else", ":", "return", "None" ]
The last entry in code_builder, or ``None`` if none so far.
[ "The", "last", "entry", "in", "code_builder", "or", "None", "if", "none", "so", "far", "." ]
python
train
30
raff/dynash
dynash2/dynash2.py
https://github.com/raff/dynash/blob/a2b4fab67dd85ceaa9c1bb7604ebc1768a7fc28e/dynash2/dynash2.py#L574-L617
def _todo_do_update(self, line): "update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes}" table, line = self.get_table_params(line) hkey, line = line.split(" ", 1) expected, attr = self.get_expected(line) if attr[0] == '-': op, attr = attr.split(" ", 1) op = op[1] else: op = "u" if attr[0] == '+': ret, attr = attr.split(" ", 1) ret = ret[1:] else: ret = "ALL_NEW" if ',' in hkey: hkey, rkey = hkey.split(",", 1) else: rkey = None item = table.new_item(hash_key=self.get_typed_key_value(table, hkey), range_key=self.get_typed_key_value(table, rkey, False)) attr = json.loads(attr.strip()) for name in attr.keys(): value = attr[name] if isinstance(value, list): value = set(value) if op == 'a': item.add_attribute(name, value) elif op == 'd': item.delete_attribute(name, value) else: item.put_attribute(name, value) self.pprint(item) updated = item.save(expected_value=expected or None, return_values=ret) self.pprint(updated) if self.consumed: print "consumed units:", item.consumed_units
[ "def", "_todo_do_update", "(", "self", ",", "line", ")", ":", "table", ",", "line", "=", "self", ".", "get_table_params", "(", "line", ")", "hkey", ",", "line", "=", "line", ".", "split", "(", "\" \"", ",", "1", ")", "expected", ",", "attr", "=", "self", ".", "get_expected", "(", "line", ")", "if", "attr", "[", "0", "]", "==", "'-'", ":", "op", ",", "attr", "=", "attr", ".", "split", "(", "\" \"", ",", "1", ")", "op", "=", "op", "[", "1", "]", "else", ":", "op", "=", "\"u\"", "if", "attr", "[", "0", "]", "==", "'+'", ":", "ret", ",", "attr", "=", "attr", ".", "split", "(", "\" \"", ",", "1", ")", "ret", "=", "ret", "[", "1", ":", "]", "else", ":", "ret", "=", "\"ALL_NEW\"", "if", "','", "in", "hkey", ":", "hkey", ",", "rkey", "=", "hkey", ".", "split", "(", "\",\"", ",", "1", ")", "else", ":", "rkey", "=", "None", "item", "=", "table", ".", "new_item", "(", "hash_key", "=", "self", ".", "get_typed_key_value", "(", "table", ",", "hkey", ")", ",", "range_key", "=", "self", ".", "get_typed_key_value", "(", "table", ",", "rkey", ",", "False", ")", ")", "attr", "=", "json", ".", "loads", "(", "attr", ".", "strip", "(", ")", ")", "for", "name", "in", "attr", ".", "keys", "(", ")", ":", "value", "=", "attr", "[", "name", "]", "if", "isinstance", "(", "value", ",", "list", ")", ":", "value", "=", "set", "(", "value", ")", "if", "op", "==", "'a'", ":", "item", ".", "add_attribute", "(", "name", ",", "value", ")", "elif", "op", "==", "'d'", ":", "item", ".", "delete_attribute", "(", "name", ",", "value", ")", "else", ":", "item", ".", "put_attribute", "(", "name", ",", "value", ")", "self", ".", "pprint", "(", "item", ")", "updated", "=", "item", ".", "save", "(", "expected_value", "=", "expected", "or", "None", ",", "return_values", "=", "ret", ")", "self", ".", "pprint", "(", "updated", ")", "if", "self", ".", "consumed", ":", "print", "\"consumed units:\"", ",", "item", ".", "consumed_units" ]
update [:tablename] {hashkey[,rangekey]} [!fieldname:expectedvalue] [-add|-delete] [+ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW] {attributes}
[ "update", "[", ":", "tablename", "]", "{", "hashkey", "[", "rangekey", "]", "}", "[", "!fieldname", ":", "expectedvalue", "]", "[", "-", "add|", "-", "delete", "]", "[", "+", "ALL_OLD|ALL_NEW|UPDATED_OLD|UPDATED_NEW", "]", "{", "attributes", "}" ]
python
train
31.954545
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L22763-L22787
def replace_namespaced_service_account(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_service_account # noqa: E501 replace the specified ServiceAccount # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceAccount (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ServiceAccount body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ServiceAccount If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_service_account_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
[ "def", "replace_namespaced_service_account", "(", "self", ",", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "replace_namespaced_service_account_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "replace_namespaced_service_account_with_http_info", "(", "name", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
replace_namespaced_service_account # noqa: E501 replace the specified ServiceAccount # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_service_account(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceAccount (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ServiceAccount body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ServiceAccount If the method is called asynchronously, returns the request thread.
[ "replace_namespaced_service_account", "#", "noqa", ":", "E501" ]
python
train
62.64
saltstack/salt
salt/modules/rdp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rdp.py#L104-L151
def list_sessions(logged_in_users_only=False): ''' List information about the sessions. .. versionadded:: 2016.11.0 :param logged_in_users_only: If True, only return sessions with users logged in. :return: A list containing dictionaries of session information. CLI Example: .. code-block:: bash salt '*' rdp.list_sessions ''' ret = list() server = win32ts.WTS_CURRENT_SERVER_HANDLE protocols = {win32ts.WTS_PROTOCOL_TYPE_CONSOLE: 'console', win32ts.WTS_PROTOCOL_TYPE_ICA: 'citrix', win32ts.WTS_PROTOCOL_TYPE_RDP: 'rdp'} statuses = {win32ts.WTSActive: 'active', win32ts.WTSConnected: 'connected', win32ts.WTSConnectQuery: 'connect_query', win32ts.WTSShadow: 'shadow', win32ts.WTSDisconnected: 'disconnected', win32ts.WTSIdle: 'idle', win32ts.WTSListen: 'listen', win32ts.WTSReset: 'reset', win32ts.WTSDown: 'down', win32ts.WTSInit: 'init'} for session in win32ts.WTSEnumerateSessions(server): user = win32ts.WTSQuerySessionInformation(server, session['SessionId'], win32ts.WTSUserName) or None protocol_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'], win32ts.WTSClientProtocolType) status_id = win32ts.WTSQuerySessionInformation(server, session['SessionId'], win32ts.WTSConnectState) protocol = protocols.get(protocol_id, 'unknown') connection_status = statuses.get(status_id, 'unknown') station = session['WinStationName'] or 'Disconnected' connection_info = {'connection_status': connection_status, 'protocol': protocol, 'session_id': session['SessionId'], 'station': station, 'user': user} if logged_in_users_only: if user: ret.append(connection_info) else: ret.append(connection_info) if not ret: _LOG.warning('No sessions found.') return sorted(ret, key=lambda k: k['session_id'])
[ "def", "list_sessions", "(", "logged_in_users_only", "=", "False", ")", ":", "ret", "=", "list", "(", ")", "server", "=", "win32ts", ".", "WTS_CURRENT_SERVER_HANDLE", "protocols", "=", "{", "win32ts", ".", "WTS_PROTOCOL_TYPE_CONSOLE", ":", "'console'", ",", "win32ts", ".", "WTS_PROTOCOL_TYPE_ICA", ":", "'citrix'", ",", "win32ts", ".", "WTS_PROTOCOL_TYPE_RDP", ":", "'rdp'", "}", "statuses", "=", "{", "win32ts", ".", "WTSActive", ":", "'active'", ",", "win32ts", ".", "WTSConnected", ":", "'connected'", ",", "win32ts", ".", "WTSConnectQuery", ":", "'connect_query'", ",", "win32ts", ".", "WTSShadow", ":", "'shadow'", ",", "win32ts", ".", "WTSDisconnected", ":", "'disconnected'", ",", "win32ts", ".", "WTSIdle", ":", "'idle'", ",", "win32ts", ".", "WTSListen", ":", "'listen'", ",", "win32ts", ".", "WTSReset", ":", "'reset'", ",", "win32ts", ".", "WTSDown", ":", "'down'", ",", "win32ts", ".", "WTSInit", ":", "'init'", "}", "for", "session", "in", "win32ts", ".", "WTSEnumerateSessions", "(", "server", ")", ":", "user", "=", "win32ts", ".", "WTSQuerySessionInformation", "(", "server", ",", "session", "[", "'SessionId'", "]", ",", "win32ts", ".", "WTSUserName", ")", "or", "None", "protocol_id", "=", "win32ts", ".", "WTSQuerySessionInformation", "(", "server", ",", "session", "[", "'SessionId'", "]", ",", "win32ts", ".", "WTSClientProtocolType", ")", "status_id", "=", "win32ts", ".", "WTSQuerySessionInformation", "(", "server", ",", "session", "[", "'SessionId'", "]", ",", "win32ts", ".", "WTSConnectState", ")", "protocol", "=", "protocols", ".", "get", "(", "protocol_id", ",", "'unknown'", ")", "connection_status", "=", "statuses", ".", "get", "(", "status_id", ",", "'unknown'", ")", "station", "=", "session", "[", "'WinStationName'", "]", "or", "'Disconnected'", "connection_info", "=", "{", "'connection_status'", ":", "connection_status", ",", "'protocol'", ":", "protocol", ",", "'session_id'", ":", "session", "[", "'SessionId'", "]", ",", "'station'", ":", "station", ",", "'user'", ":", "user", "}", "if", "logged_in_users_only", ":", "if", "user", ":", "ret", ".", "append", "(", "connection_info", ")", "else", ":", "ret", ".", "append", "(", "connection_info", ")", "if", "not", "ret", ":", "_LOG", ".", "warning", "(", "'No sessions found.'", ")", "return", "sorted", "(", "ret", ",", "key", "=", "lambda", "k", ":", "k", "[", "'session_id'", "]", ")" ]
List information about the sessions. .. versionadded:: 2016.11.0 :param logged_in_users_only: If True, only return sessions with users logged in. :return: A list containing dictionaries of session information. CLI Example: .. code-block:: bash salt '*' rdp.list_sessions
[ "List", "information", "about", "the", "sessions", "." ]
python
train
45.166667
pyusb/pyusb
usb/legacy.py
https://github.com/pyusb/pyusb/blob/ffe6faf42c6ad273880b0b464b9bbf44c1d4b2e9/usb/legacy.py#L131-L141
def bulkWrite(self, endpoint, buffer, timeout = 100): r"""Perform a bulk write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written. """ return self.dev.write(endpoint, buffer, timeout)
[ "def", "bulkWrite", "(", "self", ",", "endpoint", ",", "buffer", ",", "timeout", "=", "100", ")", ":", "return", "self", ".", "dev", ".", "write", "(", "endpoint", ",", "buffer", ",", "timeout", ")" ]
r"""Perform a bulk write request to the endpoint specified. Arguments: endpoint: endpoint number. buffer: sequence data buffer to write. This parameter can be any sequence type. timeout: operation timeout in milliseconds. (default: 100) Returns the number of bytes written.
[ "r", "Perform", "a", "bulk", "write", "request", "to", "the", "endpoint", "specified", "." ]
python
train
44.636364
mastro35/flows
flows/FlowsManager.py
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L229-L237
def _deliver_message(self, msg): """ Deliver the message to the subscripted actions """ my_subscribed_actions = self.subscriptions.get(msg.sender, []) for action in my_subscribed_actions: if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug(f"delivering message to {action.name}") action.on_input_received(msg)
[ "def", "_deliver_message", "(", "self", ",", "msg", ")", ":", "my_subscribed_actions", "=", "self", ".", "subscriptions", ".", "get", "(", "msg", ".", "sender", ",", "[", "]", ")", "for", "action", "in", "my_subscribed_actions", ":", "if", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"delivering message to {action.name}\"", ")", "action", ".", "on_input_received", "(", "msg", ")" ]
Deliver the message to the subscripted actions
[ "Deliver", "the", "message", "to", "the", "subscripted", "actions" ]
python
train
43.111111
idlesign/django-sitetree
sitetree/admin.py
https://github.com/idlesign/django-sitetree/blob/61de4608e6e415247c75fe8691027d7c4ed0d1e7/sitetree/admin.py#L264-L276
def save_model(self, request, obj, form, change): """Saves TreeItem model under certain Tree. Handles item's parent assignment exception. """ if change: # No, you're not allowed to make item parent of itself if obj.parent is not None and obj.parent.id == obj.id: obj.parent = self.previous_parent messages.warning( request, _("Item's parent left unchanged. Item couldn't be parent to itself."), '', True) obj.tree = self.tree obj.save()
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "if", "change", ":", "# No, you're not allowed to make item parent of itself", "if", "obj", ".", "parent", "is", "not", "None", "and", "obj", ".", "parent", ".", "id", "==", "obj", ".", "id", ":", "obj", ".", "parent", "=", "self", ".", "previous_parent", "messages", ".", "warning", "(", "request", ",", "_", "(", "\"Item's parent left unchanged. Item couldn't be parent to itself.\"", ")", ",", "''", ",", "True", ")", "obj", ".", "tree", "=", "self", ".", "tree", "obj", ".", "save", "(", ")" ]
Saves TreeItem model under certain Tree. Handles item's parent assignment exception.
[ "Saves", "TreeItem", "model", "under", "certain", "Tree", ".", "Handles", "item", "s", "parent", "assignment", "exception", "." ]
python
test
42.230769
pkgw/pwkit
pwkit/msmt.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/msmt.py#L401-L478
def text_pieces(self, method, uplaces=2, use_exponent=True): """Return (main, dhigh, dlow, sharedexponent), all as strings. The delta terms do not have sign indicators. Any item except the first may be None. `method` is passed to Uval.repvals() to compute representative statistical limits. """ md, hi, lo = self.repvals(method) if hi == lo: return '%g' % lo, None, None, None if not np.isfinite([lo, md, hi]).all(): raise ValueError('got nonfinite values when formatting Uval') # Deltas. Round to limited # of places because we don't actually know # the fourth moment of the thing we're trying to describe. from numpy import abs, ceil, floor, log10 dh = hi - md dl = md - lo if dh <= 0: raise ValueError('strange problem formatting Uval; ' 'hi=%g md=%g dh=%g' % (hi, md, dh)) if dl <= 0: raise ValueError('strange problem formatting Uval; ' 'lo=%g md=%g dl=%g' % (lo, md, dl)) p = int(ceil(log10(dh))) rdh = round(dh * 10**(-p), uplaces) * 10**p p = int(ceil(log10(dl))) rdl = round(dl * 10**(-p), uplaces) * 10**p # The least significant place to worry about is the L.S.P. of one of # the deltas, which we can find relative to its M.S.P. Any precision # in the datum beyond this point is false. lsp = int(ceil(log10(min(rdh, rdl)))) - uplaces # We should round the datum since it might be something like # 0.999+-0.1 and we're about to try to decide what its most # significant place is. Might get -1 rather than 0. rmd = round(md, -lsp) if rmd == -0.: # 0 = -0, too, but no problem there. rmd = 0. # The most significant place to worry about is the M.S.P. of any of # the datum or the deltas. rdl and rdl must be positive, but not # necessarily rmd. msp = int(floor(log10(max(abs(rmd), rdh, rdl)))) # If we're not very large or very small, or it's been explicitly # disabled, don't use scientific notation. if (msp > -3 and msp < 3) or not use_exponent: srmd = '%.*f' % (-lsp, rmd) srdh = '%.*f' % (-lsp, rdh) srdl = '%.*f' % (-lsp, rdl) return srmd, srdh, srdl, None # Use scientific notation. Adjust values, then format. armd = rmd * 10**-msp ardh = rdh * 10**-msp ardl = rdl * 10**-msp prec = msp - lsp sarmd = '%.*f' % (prec, armd) sardh = '%.*f' % (prec, ardh) sardl = '%.*f' % (prec, ardl) return sarmd, sardh, sardl, str(msp)
[ "def", "text_pieces", "(", "self", ",", "method", ",", "uplaces", "=", "2", ",", "use_exponent", "=", "True", ")", ":", "md", ",", "hi", ",", "lo", "=", "self", ".", "repvals", "(", "method", ")", "if", "hi", "==", "lo", ":", "return", "'%g'", "%", "lo", ",", "None", ",", "None", ",", "None", "if", "not", "np", ".", "isfinite", "(", "[", "lo", ",", "md", ",", "hi", "]", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "'got nonfinite values when formatting Uval'", ")", "# Deltas. Round to limited # of places because we don't actually know", "# the fourth moment of the thing we're trying to describe.", "from", "numpy", "import", "abs", ",", "ceil", ",", "floor", ",", "log10", "dh", "=", "hi", "-", "md", "dl", "=", "md", "-", "lo", "if", "dh", "<=", "0", ":", "raise", "ValueError", "(", "'strange problem formatting Uval; '", "'hi=%g md=%g dh=%g'", "%", "(", "hi", ",", "md", ",", "dh", ")", ")", "if", "dl", "<=", "0", ":", "raise", "ValueError", "(", "'strange problem formatting Uval; '", "'lo=%g md=%g dl=%g'", "%", "(", "lo", ",", "md", ",", "dl", ")", ")", "p", "=", "int", "(", "ceil", "(", "log10", "(", "dh", ")", ")", ")", "rdh", "=", "round", "(", "dh", "*", "10", "**", "(", "-", "p", ")", ",", "uplaces", ")", "*", "10", "**", "p", "p", "=", "int", "(", "ceil", "(", "log10", "(", "dl", ")", ")", ")", "rdl", "=", "round", "(", "dl", "*", "10", "**", "(", "-", "p", ")", ",", "uplaces", ")", "*", "10", "**", "p", "# The least significant place to worry about is the L.S.P. of one of", "# the deltas, which we can find relative to its M.S.P. Any precision", "# in the datum beyond this point is false.", "lsp", "=", "int", "(", "ceil", "(", "log10", "(", "min", "(", "rdh", ",", "rdl", ")", ")", ")", ")", "-", "uplaces", "# We should round the datum since it might be something like", "# 0.999+-0.1 and we're about to try to decide what its most", "# significant place is. Might get -1 rather than 0.", "rmd", "=", "round", "(", "md", ",", "-", "lsp", ")", "if", "rmd", "==", "-", "0.", ":", "# 0 = -0, too, but no problem there.", "rmd", "=", "0.", "# The most significant place to worry about is the M.S.P. of any of", "# the datum or the deltas. rdl and rdl must be positive, but not", "# necessarily rmd.", "msp", "=", "int", "(", "floor", "(", "log10", "(", "max", "(", "abs", "(", "rmd", ")", ",", "rdh", ",", "rdl", ")", ")", ")", ")", "# If we're not very large or very small, or it's been explicitly", "# disabled, don't use scientific notation.", "if", "(", "msp", ">", "-", "3", "and", "msp", "<", "3", ")", "or", "not", "use_exponent", ":", "srmd", "=", "'%.*f'", "%", "(", "-", "lsp", ",", "rmd", ")", "srdh", "=", "'%.*f'", "%", "(", "-", "lsp", ",", "rdh", ")", "srdl", "=", "'%.*f'", "%", "(", "-", "lsp", ",", "rdl", ")", "return", "srmd", ",", "srdh", ",", "srdl", ",", "None", "# Use scientific notation. Adjust values, then format.", "armd", "=", "rmd", "*", "10", "**", "-", "msp", "ardh", "=", "rdh", "*", "10", "**", "-", "msp", "ardl", "=", "rdl", "*", "10", "**", "-", "msp", "prec", "=", "msp", "-", "lsp", "sarmd", "=", "'%.*f'", "%", "(", "prec", ",", "armd", ")", "sardh", "=", "'%.*f'", "%", "(", "prec", ",", "ardh", ")", "sardl", "=", "'%.*f'", "%", "(", "prec", ",", "ardl", ")", "return", "sarmd", ",", "sardh", ",", "sardl", ",", "str", "(", "msp", ")" ]
Return (main, dhigh, dlow, sharedexponent), all as strings. The delta terms do not have sign indicators. Any item except the first may be None. `method` is passed to Uval.repvals() to compute representative statistical limits.
[ "Return", "(", "main", "dhigh", "dlow", "sharedexponent", ")", "all", "as", "strings", ".", "The", "delta", "terms", "do", "not", "have", "sign", "indicators", ".", "Any", "item", "except", "the", "first", "may", "be", "None", "." ]
python
train
34.679487
timofurrer/w1thermsensor
w1thermsensor/core.py
https://github.com/timofurrer/w1thermsensor/blob/8ac4fbb85e0c247dbb39e8b178cca0a975adc332/w1thermsensor/core.py#L329-L338
def get_precision(self): """ Get the current precision from the sensor. :returns: sensor resolution from 9-12 bits :rtype: int """ config_str = self.raw_sensor_strings[1].split()[4] # Byte 5 is the config register bit_base = int(config_str, 16) >> 5 # Bit 5-6 contains the resolution, cut off the rest return bit_base + 9
[ "def", "get_precision", "(", "self", ")", ":", "config_str", "=", "self", ".", "raw_sensor_strings", "[", "1", "]", ".", "split", "(", ")", "[", "4", "]", "# Byte 5 is the config register", "bit_base", "=", "int", "(", "config_str", ",", "16", ")", ">>", "5", "# Bit 5-6 contains the resolution, cut off the rest", "return", "bit_base", "+", "9" ]
Get the current precision from the sensor. :returns: sensor resolution from 9-12 bits :rtype: int
[ "Get", "the", "current", "precision", "from", "the", "sensor", "." ]
python
train
39.1
allenai/allennlp
allennlp/nn/initializers.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/nn/initializers.py#L58-L95
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"): """ An initaliser which preserves output variance for approximately gaussian distributed inputs. This boils down to initialising layers using a uniform distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where ``dim[0]`` is equal to the input dimension of the parameter and the ``scale`` is a constant scaling factor which depends on the non-linearity used. See `Random Walk Initialisation for Training Very Deep Feedforward Networks <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_ for more information. Parameters ---------- tensor : ``torch.Tensor``, required. The tensor to initialise. nonlinearity : ``str``, optional (default = "linear") The non-linearity which is performed after the projection that this tensor is involved in. This must be the name of a function contained in the ``torch.nn.functional`` package. Returns ------- The initialised tensor. """ size = 1. # Estimate the input size. This won't work perfectly, # but it covers almost all use cases where this initialiser # would be expected to be useful, i.e in large linear and # convolutional layers, as the last dimension will almost # always be the output size. for dimension in list(tensor.size())[:-1]: size *= dimension activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor) max_value = math.sqrt(3 / size) * activation_scaling return tensor.data.uniform_(-max_value, max_value)
[ "def", "uniform_unit_scaling", "(", "tensor", ":", "torch", ".", "Tensor", ",", "nonlinearity", ":", "str", "=", "\"linear\"", ")", ":", "size", "=", "1.", "# Estimate the input size. This won't work perfectly,", "# but it covers almost all use cases where this initialiser", "# would be expected to be useful, i.e in large linear and", "# convolutional layers, as the last dimension will almost", "# always be the output size.", "for", "dimension", "in", "list", "(", "tensor", ".", "size", "(", ")", ")", "[", ":", "-", "1", "]", ":", "size", "*=", "dimension", "activation_scaling", "=", "torch", ".", "nn", ".", "init", ".", "calculate_gain", "(", "nonlinearity", ",", "tensor", ")", "max_value", "=", "math", ".", "sqrt", "(", "3", "/", "size", ")", "*", "activation_scaling", "return", "tensor", ".", "data", ".", "uniform_", "(", "-", "max_value", ",", "max_value", ")" ]
An initaliser which preserves output variance for approximately gaussian distributed inputs. This boils down to initialising layers using a uniform distribution in the range ``(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)``, where ``dim[0]`` is equal to the input dimension of the parameter and the ``scale`` is a constant scaling factor which depends on the non-linearity used. See `Random Walk Initialisation for Training Very Deep Feedforward Networks <https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_ for more information. Parameters ---------- tensor : ``torch.Tensor``, required. The tensor to initialise. nonlinearity : ``str``, optional (default = "linear") The non-linearity which is performed after the projection that this tensor is involved in. This must be the name of a function contained in the ``torch.nn.functional`` package. Returns ------- The initialised tensor.
[ "An", "initaliser", "which", "preserves", "output", "variance", "for", "approximately", "gaussian", "distributed", "inputs", ".", "This", "boils", "down", "to", "initialising", "layers", "using", "a", "uniform", "distribution", "in", "the", "range", "(", "-", "sqrt", "(", "3", "/", "dim", "[", "0", "]", ")", "*", "scale", "sqrt", "(", "3", "/", "dim", "[", "0", "]", ")", "*", "scale", ")", "where", "dim", "[", "0", "]", "is", "equal", "to", "the", "input", "dimension", "of", "the", "parameter", "and", "the", "scale", "is", "a", "constant", "scaling", "factor", "which", "depends", "on", "the", "non", "-", "linearity", "used", "." ]
python
train
44.605263
DataBiosphere/dsub
dsub/lib/job_model.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/lib/job_model.py#L927-L942
def task_view_generator(job_descriptor): """Generator that yields a task-specific view of the job. This generator exists to make it easy for callers to iterate over the tasks in a JobDescriptor. Each pass yields a new JobDescriptor with a single task. Args: job_descriptor: A JobDescriptor with 1 or more tasks. Yields: A JobDescriptor with a single task. """ for task_descriptor in job_descriptor.task_descriptors: jd = JobDescriptor(job_descriptor.job_metadata, job_descriptor.job_params, job_descriptor.job_resources, [task_descriptor]) yield jd
[ "def", "task_view_generator", "(", "job_descriptor", ")", ":", "for", "task_descriptor", "in", "job_descriptor", ".", "task_descriptors", ":", "jd", "=", "JobDescriptor", "(", "job_descriptor", ".", "job_metadata", ",", "job_descriptor", ".", "job_params", ",", "job_descriptor", ".", "job_resources", ",", "[", "task_descriptor", "]", ")", "yield", "jd" ]
Generator that yields a task-specific view of the job. This generator exists to make it easy for callers to iterate over the tasks in a JobDescriptor. Each pass yields a new JobDescriptor with a single task. Args: job_descriptor: A JobDescriptor with 1 or more tasks. Yields: A JobDescriptor with a single task.
[ "Generator", "that", "yields", "a", "task", "-", "specific", "view", "of", "the", "job", "." ]
python
valid
36.8125
dossier/dossier.models
dossier/models/openquery/fetcher.py
https://github.com/dossier/dossier.models/blob/c9e282f690eab72963926329efe1600709e48b13/dossier/models/openquery/fetcher.py#L108-L121
def add(self, si): '''puts `si` into the currently open chunk, which it creates if necessary. If this item causes the chunk to cross chunk_max, then the chunk closed after adding. ''' if self.o_chunk is None: if os.path.exists(self.t_path): os.remove(self.t_path) self.o_chunk = streamcorpus.Chunk(self.t_path, mode='wb') self.o_chunk.add(si) logger.debug('added %d-th item to chunk', len(self.o_chunk)) if len(self.o_chunk) == self.chunk_max: self.close()
[ "def", "add", "(", "self", ",", "si", ")", ":", "if", "self", ".", "o_chunk", "is", "None", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "t_path", ")", ":", "os", ".", "remove", "(", "self", ".", "t_path", ")", "self", ".", "o_chunk", "=", "streamcorpus", ".", "Chunk", "(", "self", ".", "t_path", ",", "mode", "=", "'wb'", ")", "self", ".", "o_chunk", ".", "add", "(", "si", ")", "logger", ".", "debug", "(", "'added %d-th item to chunk'", ",", "len", "(", "self", ".", "o_chunk", ")", ")", "if", "len", "(", "self", ".", "o_chunk", ")", "==", "self", ".", "chunk_max", ":", "self", ".", "close", "(", ")" ]
puts `si` into the currently open chunk, which it creates if necessary. If this item causes the chunk to cross chunk_max, then the chunk closed after adding.
[ "puts", "si", "into", "the", "currently", "open", "chunk", "which", "it", "creates", "if", "necessary", ".", "If", "this", "item", "causes", "the", "chunk", "to", "cross", "chunk_max", "then", "the", "chunk", "closed", "after", "adding", "." ]
python
train
40.071429
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASU/save_tdx.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/save_tdx.py#L4016-L4146
def QA_SU_save_future_min(client=DATABASE, ui_log=None, ui_progress=None): """save future_min Keyword Arguments: client {[type]} -- [description] (default: {DATABASE}) """ future_list = [ item for item in QA_fetch_get_future_list().code.unique().tolist() if str(item)[-2:] in ['L8', 'L9'] ] coll = client.future_min coll.create_index( [ ('code', pymongo.ASCENDING), ('time_stamp', pymongo.ASCENDING), ('date_stamp', pymongo.ASCENDING) ] ) err = [] def __saving_work(code, coll): QA_util_log_info( '##JOB13 Now Saving Future_MIN ==== {}'.format(str(code)), ui_log=ui_log ) try: for type in ['1min', '5min', '15min', '30min', '60min']: ref_ = coll.find({'code': str(code)[0:6], 'type': type}) end_time = str(now_time())[0:19] if ref_.count() > 0: start_time = ref_[ref_.count() - 1]['datetime'] QA_util_log_info( '##JOB13.{} Now Saving Future {} from {} to {} =={} ' .format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_future_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data[1::]) ) else: start_time = '2015-01-01' QA_util_log_info( '##JOB13.{} Now Saving Future {} from {} to {} =={} ' .format( ['1min', '5min', '15min', '30min', '60min'].index(type), str(code), start_time, end_time, type ), ui_log=ui_log ) if start_time != end_time: __data = QA_fetch_get_future_min( str(code), start_time, end_time, type ) if len(__data) > 1: coll.insert_many( QA_util_to_json_from_pandas(__data) ) except: err.append(code) executor = ThreadPoolExecutor(max_workers=4) res = { executor.submit(__saving_work, future_list[i_], coll) for i_ in range(len(future_list)) } # multi index ./. count = 0 for i_ in concurrent.futures.as_completed(res): QA_util_log_info( 'The {} of Total {}'.format(count, len(future_list)), ui_log=ui_log ) strLogProgress = 'DOWNLOAD PROGRESS {} '.format( str(float(count / len(future_list) * 100))[0:4] + '%' ) intLogProgress = int(float(count / len(future_list) * 10000.0)) QA_util_log_info( strLogProgress, ui_log=ui_log, ui_progress=ui_progress, ui_progress_int_value=intLogProgress ) count = count + 1 if len(err) < 1: QA_util_log_info('SUCCESS', ui_log=ui_log) else: QA_util_log_info(' ERROR CODE \n ', ui_log=ui_log) QA_util_log_info(err, ui_log=ui_log)
[ "def", "QA_SU_save_future_min", "(", "client", "=", "DATABASE", ",", "ui_log", "=", "None", ",", "ui_progress", "=", "None", ")", ":", "future_list", "=", "[", "item", "for", "item", "in", "QA_fetch_get_future_list", "(", ")", ".", "code", ".", "unique", "(", ")", ".", "tolist", "(", ")", "if", "str", "(", "item", ")", "[", "-", "2", ":", "]", "in", "[", "'L8'", ",", "'L9'", "]", "]", "coll", "=", "client", ".", "future_min", "coll", ".", "create_index", "(", "[", "(", "'code'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'time_stamp'", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "'date_stamp'", ",", "pymongo", ".", "ASCENDING", ")", "]", ")", "err", "=", "[", "]", "def", "__saving_work", "(", "code", ",", "coll", ")", ":", "QA_util_log_info", "(", "'##JOB13 Now Saving Future_MIN ==== {}'", ".", "format", "(", "str", "(", "code", ")", ")", ",", "ui_log", "=", "ui_log", ")", "try", ":", "for", "type", "in", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ":", "ref_", "=", "coll", ".", "find", "(", "{", "'code'", ":", "str", "(", "code", ")", "[", "0", ":", "6", "]", ",", "'type'", ":", "type", "}", ")", "end_time", "=", "str", "(", "now_time", "(", ")", ")", "[", "0", ":", "19", "]", "if", "ref_", ".", "count", "(", ")", ">", "0", ":", "start_time", "=", "ref_", "[", "ref_", ".", "count", "(", ")", "-", "1", "]", "[", "'datetime'", "]", "QA_util_log_info", "(", "'##JOB13.{} Now Saving Future {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_future_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", "[", "1", ":", ":", "]", ")", ")", "else", ":", "start_time", "=", "'2015-01-01'", "QA_util_log_info", "(", "'##JOB13.{} Now Saving Future {} from {} to {} =={} '", ".", "format", "(", "[", "'1min'", ",", "'5min'", ",", "'15min'", ",", "'30min'", ",", "'60min'", "]", ".", "index", "(", "type", ")", ",", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", ",", "ui_log", "=", "ui_log", ")", "if", "start_time", "!=", "end_time", ":", "__data", "=", "QA_fetch_get_future_min", "(", "str", "(", "code", ")", ",", "start_time", ",", "end_time", ",", "type", ")", "if", "len", "(", "__data", ")", ">", "1", ":", "coll", ".", "insert_many", "(", "QA_util_to_json_from_pandas", "(", "__data", ")", ")", "except", ":", "err", ".", "append", "(", "code", ")", "executor", "=", "ThreadPoolExecutor", "(", "max_workers", "=", "4", ")", "res", "=", "{", "executor", ".", "submit", "(", "__saving_work", ",", "future_list", "[", "i_", "]", ",", "coll", ")", "for", "i_", "in", "range", "(", "len", "(", "future_list", ")", ")", "}", "# multi index ./.", "count", "=", "0", "for", "i_", "in", "concurrent", ".", "futures", ".", "as_completed", "(", "res", ")", ":", "QA_util_log_info", "(", "'The {} of Total {}'", ".", "format", "(", "count", ",", "len", "(", "future_list", ")", ")", ",", "ui_log", "=", "ui_log", ")", "strLogProgress", "=", "'DOWNLOAD PROGRESS {} '", ".", "format", "(", "str", "(", "float", "(", "count", "/", "len", "(", "future_list", ")", "*", "100", ")", ")", "[", "0", ":", "4", "]", "+", "'%'", ")", "intLogProgress", "=", "int", "(", "float", "(", "count", "/", "len", "(", "future_list", ")", "*", "10000.0", ")", ")", "QA_util_log_info", "(", "strLogProgress", ",", "ui_log", "=", "ui_log", ",", "ui_progress", "=", "ui_progress", ",", "ui_progress_int_value", "=", "intLogProgress", ")", "count", "=", "count", "+", "1", "if", "len", "(", "err", ")", "<", "1", ":", "QA_util_log_info", "(", "'SUCCESS'", ",", "ui_log", "=", "ui_log", ")", "else", ":", "QA_util_log_info", "(", "' ERROR CODE \\n '", ",", "ui_log", "=", "ui_log", ")", "QA_util_log_info", "(", "err", ",", "ui_log", "=", "ui_log", ")" ]
save future_min Keyword Arguments: client {[type]} -- [description] (default: {DATABASE})
[ "save", "future_min" ]
python
train
32.755725
JarryShaw/PyPCAPKit
src/protocols/transport/tcp.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/transport/tcp.py#L464-L494
def _read_mode_tsopt(self, size, kind): """Read Timestamps option. Positional arguments: * size - int, length of option * kind - int, 8 (Timestamps) Returns: * dict -- extracted Timestamps (TS) option Structure of TCP TSopt [RFC 7323]: +-------+-------+---------------------+---------------------+ |Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)| +-------+-------+---------------------+---------------------+ 1 1 4 4 Octets Bits Name Description 0 0 tcp.ts.kind Kind (8) 1 8 tcp.ts.length Length (10) 2 16 tcp.ts.val Timestamp Value 6 48 tcp.ts.ecr Timestamps Echo Reply """ temp = struct.unpack('>II', self._read_fileng(size)) data = dict( kind=kind, length=size, val=temp[0], ecr=temp[1], ) return data
[ "def", "_read_mode_tsopt", "(", "self", ",", "size", ",", "kind", ")", ":", "temp", "=", "struct", ".", "unpack", "(", "'>II'", ",", "self", ".", "_read_fileng", "(", "size", ")", ")", "data", "=", "dict", "(", "kind", "=", "kind", ",", "length", "=", "size", ",", "val", "=", "temp", "[", "0", "]", ",", "ecr", "=", "temp", "[", "1", "]", ",", ")", "return", "data" ]
Read Timestamps option. Positional arguments: * size - int, length of option * kind - int, 8 (Timestamps) Returns: * dict -- extracted Timestamps (TS) option Structure of TCP TSopt [RFC 7323]: +-------+-------+---------------------+---------------------+ |Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)| +-------+-------+---------------------+---------------------+ 1 1 4 4 Octets Bits Name Description 0 0 tcp.ts.kind Kind (8) 1 8 tcp.ts.length Length (10) 2 16 tcp.ts.val Timestamp Value 6 48 tcp.ts.ecr Timestamps Echo Reply
[ "Read", "Timestamps", "option", "." ]
python
train
36.774194
Crypto-toolbox/btfxwss
btfxwss/connection.py
https://github.com/Crypto-toolbox/btfxwss/blob/16827fa6aacb2c0e289aa852bf61a18df6905835/btfxwss/connection.py#L291-L314
def send(self, api_key=None, secret=None, list_data=None, auth=False, **kwargs): """Sends the given Payload to the API via the websocket connection. :param kwargs: payload paarameters as key=value pairs :return: """ if auth: nonce = str(int(time.time() * 10000000)) auth_string = 'AUTH' + nonce auth_sig = hmac.new(secret.encode(), auth_string.encode(), hashlib.sha384).hexdigest() payload = {'event': 'auth', 'apiKey': api_key, 'authSig': auth_sig, 'authPayload': auth_string, 'authNonce': nonce} payload = json.dumps(payload) elif list_data: payload = json.dumps(list_data) else: payload = json.dumps(kwargs) self.log.debug("send(): Sending payload to API: %s", payload) try: self.socket.send(payload) except websocket.WebSocketConnectionClosedException: self.log.error("send(): Did not send out payload %s - client not connected. ", kwargs)
[ "def", "send", "(", "self", ",", "api_key", "=", "None", ",", "secret", "=", "None", ",", "list_data", "=", "None", ",", "auth", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "auth", ":", "nonce", "=", "str", "(", "int", "(", "time", ".", "time", "(", ")", "*", "10000000", ")", ")", "auth_string", "=", "'AUTH'", "+", "nonce", "auth_sig", "=", "hmac", ".", "new", "(", "secret", ".", "encode", "(", ")", ",", "auth_string", ".", "encode", "(", ")", ",", "hashlib", ".", "sha384", ")", ".", "hexdigest", "(", ")", "payload", "=", "{", "'event'", ":", "'auth'", ",", "'apiKey'", ":", "api_key", ",", "'authSig'", ":", "auth_sig", ",", "'authPayload'", ":", "auth_string", ",", "'authNonce'", ":", "nonce", "}", "payload", "=", "json", ".", "dumps", "(", "payload", ")", "elif", "list_data", ":", "payload", "=", "json", ".", "dumps", "(", "list_data", ")", "else", ":", "payload", "=", "json", ".", "dumps", "(", "kwargs", ")", "self", ".", "log", ".", "debug", "(", "\"send(): Sending payload to API: %s\"", ",", "payload", ")", "try", ":", "self", ".", "socket", ".", "send", "(", "payload", ")", "except", "websocket", ".", "WebSocketConnectionClosedException", ":", "self", ".", "log", ".", "error", "(", "\"send(): Did not send out payload %s - client not connected. \"", ",", "kwargs", ")" ]
Sends the given Payload to the API via the websocket connection. :param kwargs: payload paarameters as key=value pairs :return:
[ "Sends", "the", "given", "Payload", "to", "the", "API", "via", "the", "websocket", "connection", "." ]
python
test
44.375
sbg/sevenbridges-python
sevenbridges/transfer/download.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/transfer/download.py#L305-L318
def start(self): """ Starts the download. :raises SbgError: If download is not in PREPARING state. """ if self._status == TransferState.PREPARING: self._running.set() super(Download, self).start() self._status = TransferState.RUNNING self._time_started = time.time() else: raise SbgError( 'Unable to start. Download not in PREPARING state.' )
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "_status", "==", "TransferState", ".", "PREPARING", ":", "self", ".", "_running", ".", "set", "(", ")", "super", "(", "Download", ",", "self", ")", ".", "start", "(", ")", "self", ".", "_status", "=", "TransferState", ".", "RUNNING", "self", ".", "_time_started", "=", "time", ".", "time", "(", ")", "else", ":", "raise", "SbgError", "(", "'Unable to start. Download not in PREPARING state.'", ")" ]
Starts the download. :raises SbgError: If download is not in PREPARING state.
[ "Starts", "the", "download", ".", ":", "raises", "SbgError", ":", "If", "download", "is", "not", "in", "PREPARING", "state", "." ]
python
train
33.214286
PredixDev/predixpy
predix/admin/service.py
https://github.com/PredixDev/predixpy/blob/a0cb34cf40f716229351bb6d90d6ecace958c81f/predix/admin/service.py#L30-L34
def _generate_name(self, space, service_name, plan_name): """ Can generate a name based on the space, service name and plan. """ return str.join('-', [space, service_name, plan_name]).lower()
[ "def", "_generate_name", "(", "self", ",", "space", ",", "service_name", ",", "plan_name", ")", ":", "return", "str", ".", "join", "(", "'-'", ",", "[", "space", ",", "service_name", ",", "plan_name", "]", ")", ".", "lower", "(", ")" ]
Can generate a name based on the space, service name and plan.
[ "Can", "generate", "a", "name", "based", "on", "the", "space", "service", "name", "and", "plan", "." ]
python
train
43.8
JustinLovinger/optimal
optimal/optimize.py
https://github.com/JustinLovinger/optimal/blob/ab48a4961697338cc32d50e3a6b06ac989e39c3f/optimal/optimize.py#L138-L141
def decode_solution(self, encoded_solution): """Return solution from an encoded representation.""" return self._decode_function(encoded_solution, *self._decode_args, **self._decode_kwargs)
[ "def", "decode_solution", "(", "self", ",", "encoded_solution", ")", ":", "return", "self", ".", "_decode_function", "(", "encoded_solution", ",", "*", "self", ".", "_decode_args", ",", "*", "*", "self", ".", "_decode_kwargs", ")" ]
Return solution from an encoded representation.
[ "Return", "solution", "from", "an", "encoded", "representation", "." ]
python
train
59.5
peterwittek/ncpol2sdpa
ncpol2sdpa/sdpa_utils.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/sdpa_utils.py#L148-L191
def solve_with_sdpa(sdp, solverparameters=None): """Helper function to write out the SDP problem to a temporary file, call the solver, and parse the output. :param sdp: The SDP relaxation to be solved. :type sdp: :class:`ncpol2sdpa.sdp`. :param solverparameters: Optional parameters to SDPA. :type solverparameters: dict of str. :returns: tuple of float and list -- the primal and dual solution of the SDP, respectively, and a status string. """ solverexecutable = detect_sdpa(solverparameters) if solverexecutable is None: raise OSError("SDPA is not in the path or the executable provided is" + " not correct") primal, dual = 0, 0 tempfile_ = tempfile.NamedTemporaryFile() tmp_filename = tempfile_.name tempfile_.close() tmp_dats_filename = tmp_filename + ".dat-s" tmp_out_filename = tmp_filename + ".out" write_to_sdpa(sdp, tmp_dats_filename) command_line = [solverexecutable, "-ds", tmp_dats_filename, "-o", tmp_out_filename] if solverparameters is not None: for key, value in list(solverparameters.items()): if key == "executable": continue elif key == "paramsfile": command_line.extend(["-p", value]) else: raise ValueError("Unknown parameter for SDPA: " + key) if sdp.verbose < 1: with open(os.devnull, "w") as fnull: call(command_line, stdout=fnull, stderr=fnull) else: call(command_line) primal, dual, x_mat, y_mat, status = read_sdpa_out(tmp_out_filename, True, True) if sdp.verbose < 2: os.remove(tmp_dats_filename) os.remove(tmp_out_filename) return primal+sdp.constant_term, \ dual+sdp.constant_term, x_mat, y_mat, status
[ "def", "solve_with_sdpa", "(", "sdp", ",", "solverparameters", "=", "None", ")", ":", "solverexecutable", "=", "detect_sdpa", "(", "solverparameters", ")", "if", "solverexecutable", "is", "None", ":", "raise", "OSError", "(", "\"SDPA is not in the path or the executable provided is\"", "+", "\" not correct\"", ")", "primal", ",", "dual", "=", "0", ",", "0", "tempfile_", "=", "tempfile", ".", "NamedTemporaryFile", "(", ")", "tmp_filename", "=", "tempfile_", ".", "name", "tempfile_", ".", "close", "(", ")", "tmp_dats_filename", "=", "tmp_filename", "+", "\".dat-s\"", "tmp_out_filename", "=", "tmp_filename", "+", "\".out\"", "write_to_sdpa", "(", "sdp", ",", "tmp_dats_filename", ")", "command_line", "=", "[", "solverexecutable", ",", "\"-ds\"", ",", "tmp_dats_filename", ",", "\"-o\"", ",", "tmp_out_filename", "]", "if", "solverparameters", "is", "not", "None", ":", "for", "key", ",", "value", "in", "list", "(", "solverparameters", ".", "items", "(", ")", ")", ":", "if", "key", "==", "\"executable\"", ":", "continue", "elif", "key", "==", "\"paramsfile\"", ":", "command_line", ".", "extend", "(", "[", "\"-p\"", ",", "value", "]", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown parameter for SDPA: \"", "+", "key", ")", "if", "sdp", ".", "verbose", "<", "1", ":", "with", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", "as", "fnull", ":", "call", "(", "command_line", ",", "stdout", "=", "fnull", ",", "stderr", "=", "fnull", ")", "else", ":", "call", "(", "command_line", ")", "primal", ",", "dual", ",", "x_mat", ",", "y_mat", ",", "status", "=", "read_sdpa_out", "(", "tmp_out_filename", ",", "True", ",", "True", ")", "if", "sdp", ".", "verbose", "<", "2", ":", "os", ".", "remove", "(", "tmp_dats_filename", ")", "os", ".", "remove", "(", "tmp_out_filename", ")", "return", "primal", "+", "sdp", ".", "constant_term", ",", "dual", "+", "sdp", ".", "constant_term", ",", "x_mat", ",", "y_mat", ",", "status" ]
Helper function to write out the SDP problem to a temporary file, call the solver, and parse the output. :param sdp: The SDP relaxation to be solved. :type sdp: :class:`ncpol2sdpa.sdp`. :param solverparameters: Optional parameters to SDPA. :type solverparameters: dict of str. :returns: tuple of float and list -- the primal and dual solution of the SDP, respectively, and a status string.
[ "Helper", "function", "to", "write", "out", "the", "SDP", "problem", "to", "a", "temporary", "file", "call", "the", "solver", "and", "parse", "the", "output", "." ]
python
train
42.045455
NASA-AMMOS/AIT-Core
ait/core/dtype.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/dtype.py#L240-L250
def decode(self, bytes, raw=False): """decode(bytearray, raw=False) -> value Decodes the given bytearray according to this PrimitiveType definition. NOTE: The parameter ``raw`` is present to adhere to the ``decode()`` inteface, but has no effect for PrimitiveType definitions. """ return struct.unpack(self.format, buffer(bytes))[0]
[ "def", "decode", "(", "self", ",", "bytes", ",", "raw", "=", "False", ")", ":", "return", "struct", ".", "unpack", "(", "self", ".", "format", ",", "buffer", "(", "bytes", ")", ")", "[", "0", "]" ]
decode(bytearray, raw=False) -> value Decodes the given bytearray according to this PrimitiveType definition. NOTE: The parameter ``raw`` is present to adhere to the ``decode()`` inteface, but has no effect for PrimitiveType definitions.
[ "decode", "(", "bytearray", "raw", "=", "False", ")", "-", ">", "value" ]
python
train
35.272727
todddeluca/dones
dones.py
https://github.com/todddeluca/dones/blob/6ef56565556987e701fed797a405f0825fe2e15a/dones.py#L265-L283
def open_conn(host, db, user, password, retries=0, sleep=0.5): ''' Return an open mysql db connection using the given credentials. Use `retries` and `sleep` to be robust to the occassional transient connection failure. retries: if an exception when getting the connection, try again at most this many times. sleep: pause between retries for this many seconds. a float >= 0. ''' assert retries >= 0 try: return MySQLdb.connect(host=host, user=user, passwd=password, db=db) except Exception: if retries > 0: time.sleep(sleep) return open_conn(host, db, user, password, retries - 1, sleep) else: raise
[ "def", "open_conn", "(", "host", ",", "db", ",", "user", ",", "password", ",", "retries", "=", "0", ",", "sleep", "=", "0.5", ")", ":", "assert", "retries", ">=", "0", "try", ":", "return", "MySQLdb", ".", "connect", "(", "host", "=", "host", ",", "user", "=", "user", ",", "passwd", "=", "password", ",", "db", "=", "db", ")", "except", "Exception", ":", "if", "retries", ">", "0", ":", "time", ".", "sleep", "(", "sleep", ")", "return", "open_conn", "(", "host", ",", "db", ",", "user", ",", "password", ",", "retries", "-", "1", ",", "sleep", ")", "else", ":", "raise" ]
Return an open mysql db connection using the given credentials. Use `retries` and `sleep` to be robust to the occassional transient connection failure. retries: if an exception when getting the connection, try again at most this many times. sleep: pause between retries for this many seconds. a float >= 0.
[ "Return", "an", "open", "mysql", "db", "connection", "using", "the", "given", "credentials", ".", "Use", "retries", "and", "sleep", "to", "be", "robust", "to", "the", "occassional", "transient", "connection", "failure", "." ]
python
train
36
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ip_policy.py#L234-L254
def hide_routemap_holder_route_map_content_match_ip_route_source_prefix_list_rmrs(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop('name') action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop('action_rm') instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop('instance') content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") ip = ET.SubElement(match, "ip") route_source = ET.SubElement(ip, "route-source") prefix_list_rmrs = ET.SubElement(route_source, "prefix-list-rmrs") prefix_list_rmrs.text = kwargs.pop('prefix_list_rmrs') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "hide_routemap_holder_route_map_content_match_ip_route_source_prefix_list_rmrs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "hide_routemap_holder", "=", "ET", ".", "SubElement", "(", "config", ",", "\"hide-routemap-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "route_map", "=", "ET", ".", "SubElement", "(", "hide_routemap_holder", ",", "\"route-map\"", ")", "name_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"name\"", ")", "name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "action_rm_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"action-rm\"", ")", "action_rm_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'action_rm'", ")", "instance_key", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"instance\"", ")", "instance_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance'", ")", "content", "=", "ET", ".", "SubElement", "(", "route_map", ",", "\"content\"", ")", "match", "=", "ET", ".", "SubElement", "(", "content", ",", "\"match\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "match", ",", "\"ip\"", ")", "route_source", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"route-source\"", ")", "prefix_list_rmrs", "=", "ET", ".", "SubElement", "(", "route_source", ",", "\"prefix-list-rmrs\"", ")", "prefix_list_rmrs", ".", "text", "=", "kwargs", ".", "pop", "(", "'prefix_list_rmrs'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
52.47619
fake-name/ChromeController
ChromeController/transport.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/transport.py#L474-L509
def send(self, command, tab_key, params=None): ''' Send command `command` with optional parameters `params` to the remote chrome instance. The command `id` is automatically added to the outgoing message. return value is the command id, which can be used to match a command to it's associated response. ''' self.__check_open_socket(tab_key) sent_id = self.msg_id command = { "id": self.msg_id, "method": command, } if params: command["params"] = params navcom = json.dumps(command) # self.log.debug(" Sending: '%s'", navcom) try: self.soclist[tab_key].send(navcom) except (socket.timeout, websocket.WebSocketTimeoutException): raise cr_exceptions.ChromeCommunicationsError("Failure sending command to chromium.") except websocket.WebSocketConnectionClosedException: raise cr_exceptions.ChromeCommunicationsError("Websocket appears to have been closed. Is the" " remote chromium instance dead?") self.msg_id += 1 return sent_id
[ "def", "send", "(", "self", ",", "command", ",", "tab_key", ",", "params", "=", "None", ")", ":", "self", ".", "__check_open_socket", "(", "tab_key", ")", "sent_id", "=", "self", ".", "msg_id", "command", "=", "{", "\"id\"", ":", "self", ".", "msg_id", ",", "\"method\"", ":", "command", ",", "}", "if", "params", ":", "command", "[", "\"params\"", "]", "=", "params", "navcom", "=", "json", ".", "dumps", "(", "command", ")", "# self.log.debug(\"\t\tSending: '%s'\", navcom)", "try", ":", "self", ".", "soclist", "[", "tab_key", "]", ".", "send", "(", "navcom", ")", "except", "(", "socket", ".", "timeout", ",", "websocket", ".", "WebSocketTimeoutException", ")", ":", "raise", "cr_exceptions", ".", "ChromeCommunicationsError", "(", "\"Failure sending command to chromium.\"", ")", "except", "websocket", ".", "WebSocketConnectionClosedException", ":", "raise", "cr_exceptions", ".", "ChromeCommunicationsError", "(", "\"Websocket appears to have been closed. Is the\"", "\" remote chromium instance dead?\"", ")", "self", ".", "msg_id", "+=", "1", "return", "sent_id" ]
Send command `command` with optional parameters `params` to the remote chrome instance. The command `id` is automatically added to the outgoing message. return value is the command id, which can be used to match a command to it's associated response.
[ "Send", "command", "command", "with", "optional", "parameters", "params", "to", "the", "remote", "chrome", "instance", "." ]
python
train
26.75
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L825-L881
def get_or_create_node(self, graph: BELGraph, node: BaseEntity) -> Optional[Node]: """Create an entry and object for given node if it does not exist.""" sha512 = node.as_sha512() if sha512 in self.object_cache_node: return self.object_cache_node[sha512] node_model = self.get_node_by_hash(sha512) if node_model is not None: self.object_cache_node[sha512] = node_model return node_model node_model = Node._start_from_base_entity(node) namespace = node.get(NAMESPACE) if namespace is None: pass elif namespace in graph.namespace_url: url = graph.namespace_url[namespace] name = node[NAME] entry = self.get_namespace_entry(url, name) if entry is None: log.debug('skipping node with identifier %s: %s', url, name) return self.session.add(entry) node_model.namespace_entry = entry elif namespace in graph.namespace_pattern: name = node[NAME] pattern = graph.namespace_pattern[namespace] entry = self.get_or_create_regex_namespace_entry(namespace, pattern, name) self.session.add(entry) node_model.namespace_entry = entry else: log.warning("No reference in BELGraph for namespace: {}".format(node[NAMESPACE])) return if VARIANTS in node or FUSION in node: node_model.is_variant = True node_model.has_fusion = FUSION in node modifications = self.get_or_create_modification(graph, node) if modifications is None: log.warning('could not create %s because had an uncachable modification', node.as_bel()) return node_model.modifications = modifications self.session.add(node_model) self.object_cache_node[sha512] = node_model return node_model
[ "def", "get_or_create_node", "(", "self", ",", "graph", ":", "BELGraph", ",", "node", ":", "BaseEntity", ")", "->", "Optional", "[", "Node", "]", ":", "sha512", "=", "node", ".", "as_sha512", "(", ")", "if", "sha512", "in", "self", ".", "object_cache_node", ":", "return", "self", ".", "object_cache_node", "[", "sha512", "]", "node_model", "=", "self", ".", "get_node_by_hash", "(", "sha512", ")", "if", "node_model", "is", "not", "None", ":", "self", ".", "object_cache_node", "[", "sha512", "]", "=", "node_model", "return", "node_model", "node_model", "=", "Node", ".", "_start_from_base_entity", "(", "node", ")", "namespace", "=", "node", ".", "get", "(", "NAMESPACE", ")", "if", "namespace", "is", "None", ":", "pass", "elif", "namespace", "in", "graph", ".", "namespace_url", ":", "url", "=", "graph", ".", "namespace_url", "[", "namespace", "]", "name", "=", "node", "[", "NAME", "]", "entry", "=", "self", ".", "get_namespace_entry", "(", "url", ",", "name", ")", "if", "entry", "is", "None", ":", "log", ".", "debug", "(", "'skipping node with identifier %s: %s'", ",", "url", ",", "name", ")", "return", "self", ".", "session", ".", "add", "(", "entry", ")", "node_model", ".", "namespace_entry", "=", "entry", "elif", "namespace", "in", "graph", ".", "namespace_pattern", ":", "name", "=", "node", "[", "NAME", "]", "pattern", "=", "graph", ".", "namespace_pattern", "[", "namespace", "]", "entry", "=", "self", ".", "get_or_create_regex_namespace_entry", "(", "namespace", ",", "pattern", ",", "name", ")", "self", ".", "session", ".", "add", "(", "entry", ")", "node_model", ".", "namespace_entry", "=", "entry", "else", ":", "log", ".", "warning", "(", "\"No reference in BELGraph for namespace: {}\"", ".", "format", "(", "node", "[", "NAMESPACE", "]", ")", ")", "return", "if", "VARIANTS", "in", "node", "or", "FUSION", "in", "node", ":", "node_model", ".", "is_variant", "=", "True", "node_model", ".", "has_fusion", "=", "FUSION", "in", "node", "modifications", "=", "self", ".", "get_or_create_modification", "(", "graph", ",", "node", ")", "if", "modifications", "is", "None", ":", "log", ".", "warning", "(", "'could not create %s because had an uncachable modification'", ",", "node", ".", "as_bel", "(", ")", ")", "return", "node_model", ".", "modifications", "=", "modifications", "self", ".", "session", ".", "add", "(", "node_model", ")", "self", ".", "object_cache_node", "[", "sha512", "]", "=", "node_model", "return", "node_model" ]
Create an entry and object for given node if it does not exist.
[ "Create", "an", "entry", "and", "object", "for", "given", "node", "if", "it", "does", "not", "exist", "." ]
python
train
34.017544
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py#L455-L467
def draw_marked_line(self, data, coordinates, linestyle, markerstyle, label, mplobj=None): """Draw a line that also has markers. If this isn't reimplemented by a renderer object, by default, it will make a call to BOTH draw_line and draw_markers when both markerstyle and linestyle are not None in the same Line2D object. """ if linestyle is not None: self.draw_line(data, coordinates, linestyle, label, mplobj) if markerstyle is not None: self.draw_markers(data, coordinates, markerstyle, label, mplobj)
[ "def", "draw_marked_line", "(", "self", ",", "data", ",", "coordinates", ",", "linestyle", ",", "markerstyle", ",", "label", ",", "mplobj", "=", "None", ")", ":", "if", "linestyle", "is", "not", "None", ":", "self", ".", "draw_line", "(", "data", ",", "coordinates", ",", "linestyle", ",", "label", ",", "mplobj", ")", "if", "markerstyle", "is", "not", "None", ":", "self", ".", "draw_markers", "(", "data", ",", "coordinates", ",", "markerstyle", ",", "label", ",", "mplobj", ")" ]
Draw a line that also has markers. If this isn't reimplemented by a renderer object, by default, it will make a call to BOTH draw_line and draw_markers when both markerstyle and linestyle are not None in the same Line2D object.
[ "Draw", "a", "line", "that", "also", "has", "markers", "." ]
python
train
46.076923
couchbase/couchbase-python-client
couchbase/bucket.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucket.py#L1235-L1246
def lock_multi(self, keys, ttl=0): """Lock multiple keys. Multi variant of :meth:`lock` :param keys: the keys to lock :type keys: :ref:`iterable<argtypes>` :param int ttl: The lock timeout for all keys :return: a :class:`~.MultiResult` object .. seealso:: :meth:`lock` """ return _Base.lock_multi(self, keys, ttl=ttl)
[ "def", "lock_multi", "(", "self", ",", "keys", ",", "ttl", "=", "0", ")", ":", "return", "_Base", ".", "lock_multi", "(", "self", ",", "keys", ",", "ttl", "=", "ttl", ")" ]
Lock multiple keys. Multi variant of :meth:`lock` :param keys: the keys to lock :type keys: :ref:`iterable<argtypes>` :param int ttl: The lock timeout for all keys :return: a :class:`~.MultiResult` object .. seealso:: :meth:`lock`
[ "Lock", "multiple", "keys", ".", "Multi", "variant", "of", ":", "meth", ":", "lock" ]
python
train
31.083333
samfoo/vt102
vt102/__init__.py
https://github.com/samfoo/vt102/blob/ff5be883bc9a880a422b09bb87b210d7c408cf2c/vt102/__init__.py#L513-L550
def _print(self, char): """ Print a character at the current cursor position and advance the cursor. """ # Don't make bugs where we try to print a screen. assert len(char) == 1 try: try: # Python 3 char = self.decoder(bytes(char, self.encoding))[0] except TypeError: # Python 2.x char = self.decoder(char)[0] except UnicodeDecodeError: char = "?" if self.current_charset == "g0" and self.g0 is not None: char = char.translate(self.g0) elif self.current_charset == "g1" and self.g1 is not None: char = char.translate(self.g1) row = self.display[self.y] self.display[self.y] = row[:self.x] + char + row[self.x+1:] attrs = self.attributes[self.y] self.attributes[self.y] = attrs[:self.x] + [self.cursor_attributes] + \ attrs[self.x+1:] self.x += 1 if self.x >= self.size[1]: # If this was the last column in a row, move the cursor to the # next row. self._linefeed()
[ "def", "_print", "(", "self", ",", "char", ")", ":", "# Don't make bugs where we try to print a screen. ", "assert", "len", "(", "char", ")", "==", "1", "try", ":", "try", ":", "# Python 3", "char", "=", "self", ".", "decoder", "(", "bytes", "(", "char", ",", "self", ".", "encoding", ")", ")", "[", "0", "]", "except", "TypeError", ":", "# Python 2.x", "char", "=", "self", ".", "decoder", "(", "char", ")", "[", "0", "]", "except", "UnicodeDecodeError", ":", "char", "=", "\"?\"", "if", "self", ".", "current_charset", "==", "\"g0\"", "and", "self", ".", "g0", "is", "not", "None", ":", "char", "=", "char", ".", "translate", "(", "self", ".", "g0", ")", "elif", "self", ".", "current_charset", "==", "\"g1\"", "and", "self", ".", "g1", "is", "not", "None", ":", "char", "=", "char", ".", "translate", "(", "self", ".", "g1", ")", "row", "=", "self", ".", "display", "[", "self", ".", "y", "]", "self", ".", "display", "[", "self", ".", "y", "]", "=", "row", "[", ":", "self", ".", "x", "]", "+", "char", "+", "row", "[", "self", ".", "x", "+", "1", ":", "]", "attrs", "=", "self", ".", "attributes", "[", "self", ".", "y", "]", "self", ".", "attributes", "[", "self", ".", "y", "]", "=", "attrs", "[", ":", "self", ".", "x", "]", "+", "[", "self", ".", "cursor_attributes", "]", "+", "attrs", "[", "self", ".", "x", "+", "1", ":", "]", "self", ".", "x", "+=", "1", "if", "self", ".", "x", ">=", "self", ".", "size", "[", "1", "]", ":", "# If this was the last column in a row, move the cursor to the", "# next row.", "self", ".", "_linefeed", "(", ")" ]
Print a character at the current cursor position and advance the cursor.
[ "Print", "a", "character", "at", "the", "current", "cursor", "position", "and", "advance", "the", "cursor", "." ]
python
train
29.973684
tradenity/python-sdk
tradenity/resources/braintree_gateway.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/braintree_gateway.py#L709-L731
def list_all_braintree_gateways(cls, **kwargs): """List BraintreeGateways Return a list of BraintreeGateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_braintree_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[BraintreeGateway] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_braintree_gateways_with_http_info(**kwargs) else: (data) = cls._list_all_braintree_gateways_with_http_info(**kwargs) return data
[ "def", "list_all_braintree_gateways", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_braintree_gateways_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_braintree_gateways_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List BraintreeGateways Return a list of BraintreeGateways This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_braintree_gateways(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[BraintreeGateway] If the method is called asynchronously, returns the request thread.
[ "List", "BraintreeGateways" ]
python
train
39.086957
mlperf/training
reinforcement/tensorflow/minigo/dual_net.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/reinforcement/tensorflow/minigo/dual_net.py#L219-L376
def model_fn(features, labels, mode, params): """ Create the model for estimator api Args: features: tensor with shape [BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES] labels: dict from string to tensor with shape 'pi_tensor': [BATCH_SIZE, go.N * go.N + 1] 'value_tensor': [BATCH_SIZE] mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only) params: A dictionary (Typically derived from the FLAGS object.) Returns: tf.estimator.EstimatorSpec with props mode: same as mode arg predictions: dict of tensors 'policy': [BATCH_SIZE, go.N * go.N + 1] 'value': [BATCH_SIZE] loss: a single value tensor train_op: train op eval_metric_ops return dict of tensors logits: [BATCH_SIZE, go.N * go.N + 1] """ policy_output, value_output, logits = model_inference_fn( features, mode == tf.estimator.ModeKeys.TRAIN, params) # train ops policy_cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2( logits=logits, labels=tf.stop_gradient(labels['pi_tensor']))) value_cost = params['value_cost_weight'] * tf.reduce_mean( tf.square(value_output - labels['value_tensor'])) reg_vars = [v for v in tf.trainable_variables() if 'bias' not in v.name and 'beta' not in v.name] l2_cost = params['l2_strength'] * \ tf.add_n([tf.nn.l2_loss(v) for v in reg_vars]) combined_cost = policy_cost + value_cost + l2_cost global_step = tf.train.get_or_create_global_step() learning_rate = tf.train.piecewise_constant( global_step, params['lr_boundaries'], params['lr_rates']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Insert quantization ops if requested if params['quantize']: if mode == tf.estimator.ModeKeys.TRAIN: tf.contrib.quantize.create_training_graph( quant_delay=params['quant_delay']) else: tf.contrib.quantize.create_eval_graph() optimizer = tf.train.MomentumOptimizer( learning_rate, params['sgd_momentum']) if params['use_tpu']: optimizer = tpu_optimizer.CrossShardOptimizer(optimizer) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(combined_cost, global_step=global_step) # Computations to be executed on CPU, outside of the main TPU queues. def eval_metrics_host_call_fn(policy_output, value_output, pi_tensor, policy_cost, value_cost, l2_cost, combined_cost, step, est_mode=tf.estimator.ModeKeys.TRAIN): policy_entropy = -tf.reduce_mean(tf.reduce_sum( policy_output * tf.log(policy_output), axis=1)) # pi_tensor is one_hot when generated from sgfs (for supervised learning) # and soft-max when using self-play records. argmax normalizes the two. policy_target_top_1 = tf.argmax(pi_tensor, axis=1) policy_output_in_top1 = tf.to_float( tf.nn.in_top_k(policy_output, policy_target_top_1, k=1)) policy_output_in_top3 = tf.to_float( tf.nn.in_top_k(policy_output, policy_target_top_1, k=3)) policy_top_1_confidence = tf.reduce_max(policy_output, axis=1) policy_target_top_1_confidence = tf.boolean_mask( policy_output, tf.one_hot(policy_target_top_1, tf.shape(policy_output)[1])) value_cost_normalized = value_cost / params['value_cost_weight'] with tf.variable_scope("metrics"): metric_ops = { 'policy_cost': tf.metrics.mean(policy_cost), 'value_cost': tf.metrics.mean(value_cost), 'value_cost_normalized': tf.metrics.mean(value_cost_normalized), 'l2_cost': tf.metrics.mean(l2_cost), 'policy_entropy': tf.metrics.mean(policy_entropy), 'combined_cost': tf.metrics.mean(combined_cost), 'policy_accuracy_top_1': tf.metrics.mean(policy_output_in_top1), 'policy_accuracy_top_3': tf.metrics.mean(policy_output_in_top3), 'policy_top_1_confidence': tf.metrics.mean(policy_top_1_confidence), 'policy_target_top_1_confidence': tf.metrics.mean( policy_target_top_1_confidence), 'value_confidence': tf.metrics.mean(tf.abs(value_output)), } if est_mode == tf.estimator.ModeKeys.EVAL: return metric_ops # NOTE: global_step is rounded to a multiple of FLAGS.summary_steps. eval_step = tf.reduce_min(step) # Create summary ops so that they show up in SUMMARIES collection # That way, they get logged automatically during training summary_writer = summary.create_file_writer(FLAGS.work_dir) with summary_writer.as_default(), \ summary.record_summaries_every_n_global_steps( params['summary_steps'], eval_step): for metric_name, metric_op in metric_ops.items(): summary.scalar(metric_name, metric_op[1], step=eval_step) # Reset metrics occasionally so that they are mean of recent batches. reset_op = tf.variables_initializer(tf.local_variables("metrics")) cond_reset_op = tf.cond( tf.equal(eval_step % params['summary_steps'], tf.to_int64(1)), lambda: reset_op, lambda: tf.no_op()) return summary.all_summary_ops() + [cond_reset_op] metric_args = [ policy_output, value_output, labels['pi_tensor'], tf.reshape(policy_cost, [1]), tf.reshape(value_cost, [1]), tf.reshape(l2_cost, [1]), tf.reshape(combined_cost, [1]), tf.reshape(global_step, [1]), ] predictions = { 'policy_output': policy_output, 'value_output': value_output, } eval_metrics_only_fn = functools.partial( eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.EVAL) host_call_fn = functools.partial( eval_metrics_host_call_fn, est_mode=tf.estimator.ModeKeys.TRAIN) tpu_estimator_spec = tpu_estimator.TPUEstimatorSpec( mode=mode, predictions=predictions, loss=combined_cost, train_op=train_op, eval_metrics=(eval_metrics_only_fn, metric_args), host_call=(host_call_fn, metric_args) ) if params['use_tpu']: return tpu_estimator_spec else: return tpu_estimator_spec.as_estimator_spec()
[ "def", "model_fn", "(", "features", ",", "labels", ",", "mode", ",", "params", ")", ":", "policy_output", ",", "value_output", ",", "logits", "=", "model_inference_fn", "(", "features", ",", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ",", "params", ")", "# train ops", "policy_cost", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "nn", ".", "softmax_cross_entropy_with_logits_v2", "(", "logits", "=", "logits", ",", "labels", "=", "tf", ".", "stop_gradient", "(", "labels", "[", "'pi_tensor'", "]", ")", ")", ")", "value_cost", "=", "params", "[", "'value_cost_weight'", "]", "*", "tf", ".", "reduce_mean", "(", "tf", ".", "square", "(", "value_output", "-", "labels", "[", "'value_tensor'", "]", ")", ")", "reg_vars", "=", "[", "v", "for", "v", "in", "tf", ".", "trainable_variables", "(", ")", "if", "'bias'", "not", "in", "v", ".", "name", "and", "'beta'", "not", "in", "v", ".", "name", "]", "l2_cost", "=", "params", "[", "'l2_strength'", "]", "*", "tf", ".", "add_n", "(", "[", "tf", ".", "nn", ".", "l2_loss", "(", "v", ")", "for", "v", "in", "reg_vars", "]", ")", "combined_cost", "=", "policy_cost", "+", "value_cost", "+", "l2_cost", "global_step", "=", "tf", ".", "train", ".", "get_or_create_global_step", "(", ")", "learning_rate", "=", "tf", ".", "train", ".", "piecewise_constant", "(", "global_step", ",", "params", "[", "'lr_boundaries'", "]", ",", "params", "[", "'lr_rates'", "]", ")", "update_ops", "=", "tf", ".", "get_collection", "(", "tf", ".", "GraphKeys", ".", "UPDATE_OPS", ")", "# Insert quantization ops if requested", "if", "params", "[", "'quantize'", "]", ":", "if", "mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ":", "tf", ".", "contrib", ".", "quantize", ".", "create_training_graph", "(", "quant_delay", "=", "params", "[", "'quant_delay'", "]", ")", "else", ":", "tf", ".", "contrib", ".", "quantize", ".", "create_eval_graph", "(", ")", "optimizer", "=", "tf", ".", "train", ".", "MomentumOptimizer", "(", "learning_rate", ",", "params", "[", "'sgd_momentum'", "]", ")", "if", "params", "[", "'use_tpu'", "]", ":", "optimizer", "=", "tpu_optimizer", ".", "CrossShardOptimizer", "(", "optimizer", ")", "with", "tf", ".", "control_dependencies", "(", "update_ops", ")", ":", "train_op", "=", "optimizer", ".", "minimize", "(", "combined_cost", ",", "global_step", "=", "global_step", ")", "# Computations to be executed on CPU, outside of the main TPU queues.", "def", "eval_metrics_host_call_fn", "(", "policy_output", ",", "value_output", ",", "pi_tensor", ",", "policy_cost", ",", "value_cost", ",", "l2_cost", ",", "combined_cost", ",", "step", ",", "est_mode", "=", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", ":", "policy_entropy", "=", "-", "tf", ".", "reduce_mean", "(", "tf", ".", "reduce_sum", "(", "policy_output", "*", "tf", ".", "log", "(", "policy_output", ")", ",", "axis", "=", "1", ")", ")", "# pi_tensor is one_hot when generated from sgfs (for supervised learning)", "# and soft-max when using self-play records. argmax normalizes the two.", "policy_target_top_1", "=", "tf", ".", "argmax", "(", "pi_tensor", ",", "axis", "=", "1", ")", "policy_output_in_top1", "=", "tf", ".", "to_float", "(", "tf", ".", "nn", ".", "in_top_k", "(", "policy_output", ",", "policy_target_top_1", ",", "k", "=", "1", ")", ")", "policy_output_in_top3", "=", "tf", ".", "to_float", "(", "tf", ".", "nn", ".", "in_top_k", "(", "policy_output", ",", "policy_target_top_1", ",", "k", "=", "3", ")", ")", "policy_top_1_confidence", "=", "tf", ".", "reduce_max", "(", "policy_output", ",", "axis", "=", "1", ")", "policy_target_top_1_confidence", "=", "tf", ".", "boolean_mask", "(", "policy_output", ",", "tf", ".", "one_hot", "(", "policy_target_top_1", ",", "tf", ".", "shape", "(", "policy_output", ")", "[", "1", "]", ")", ")", "value_cost_normalized", "=", "value_cost", "/", "params", "[", "'value_cost_weight'", "]", "with", "tf", ".", "variable_scope", "(", "\"metrics\"", ")", ":", "metric_ops", "=", "{", "'policy_cost'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_cost", ")", ",", "'value_cost'", ":", "tf", ".", "metrics", ".", "mean", "(", "value_cost", ")", ",", "'value_cost_normalized'", ":", "tf", ".", "metrics", ".", "mean", "(", "value_cost_normalized", ")", ",", "'l2_cost'", ":", "tf", ".", "metrics", ".", "mean", "(", "l2_cost", ")", ",", "'policy_entropy'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_entropy", ")", ",", "'combined_cost'", ":", "tf", ".", "metrics", ".", "mean", "(", "combined_cost", ")", ",", "'policy_accuracy_top_1'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_output_in_top1", ")", ",", "'policy_accuracy_top_3'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_output_in_top3", ")", ",", "'policy_top_1_confidence'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_top_1_confidence", ")", ",", "'policy_target_top_1_confidence'", ":", "tf", ".", "metrics", ".", "mean", "(", "policy_target_top_1_confidence", ")", ",", "'value_confidence'", ":", "tf", ".", "metrics", ".", "mean", "(", "tf", ".", "abs", "(", "value_output", ")", ")", ",", "}", "if", "est_mode", "==", "tf", ".", "estimator", ".", "ModeKeys", ".", "EVAL", ":", "return", "metric_ops", "# NOTE: global_step is rounded to a multiple of FLAGS.summary_steps.", "eval_step", "=", "tf", ".", "reduce_min", "(", "step", ")", "# Create summary ops so that they show up in SUMMARIES collection", "# That way, they get logged automatically during training", "summary_writer", "=", "summary", ".", "create_file_writer", "(", "FLAGS", ".", "work_dir", ")", "with", "summary_writer", ".", "as_default", "(", ")", ",", "summary", ".", "record_summaries_every_n_global_steps", "(", "params", "[", "'summary_steps'", "]", ",", "eval_step", ")", ":", "for", "metric_name", ",", "metric_op", "in", "metric_ops", ".", "items", "(", ")", ":", "summary", ".", "scalar", "(", "metric_name", ",", "metric_op", "[", "1", "]", ",", "step", "=", "eval_step", ")", "# Reset metrics occasionally so that they are mean of recent batches.", "reset_op", "=", "tf", ".", "variables_initializer", "(", "tf", ".", "local_variables", "(", "\"metrics\"", ")", ")", "cond_reset_op", "=", "tf", ".", "cond", "(", "tf", ".", "equal", "(", "eval_step", "%", "params", "[", "'summary_steps'", "]", ",", "tf", ".", "to_int64", "(", "1", ")", ")", ",", "lambda", ":", "reset_op", ",", "lambda", ":", "tf", ".", "no_op", "(", ")", ")", "return", "summary", ".", "all_summary_ops", "(", ")", "+", "[", "cond_reset_op", "]", "metric_args", "=", "[", "policy_output", ",", "value_output", ",", "labels", "[", "'pi_tensor'", "]", ",", "tf", ".", "reshape", "(", "policy_cost", ",", "[", "1", "]", ")", ",", "tf", ".", "reshape", "(", "value_cost", ",", "[", "1", "]", ")", ",", "tf", ".", "reshape", "(", "l2_cost", ",", "[", "1", "]", ")", ",", "tf", ".", "reshape", "(", "combined_cost", ",", "[", "1", "]", ")", ",", "tf", ".", "reshape", "(", "global_step", ",", "[", "1", "]", ")", ",", "]", "predictions", "=", "{", "'policy_output'", ":", "policy_output", ",", "'value_output'", ":", "value_output", ",", "}", "eval_metrics_only_fn", "=", "functools", ".", "partial", "(", "eval_metrics_host_call_fn", ",", "est_mode", "=", "tf", ".", "estimator", ".", "ModeKeys", ".", "EVAL", ")", "host_call_fn", "=", "functools", ".", "partial", "(", "eval_metrics_host_call_fn", ",", "est_mode", "=", "tf", ".", "estimator", ".", "ModeKeys", ".", "TRAIN", ")", "tpu_estimator_spec", "=", "tpu_estimator", ".", "TPUEstimatorSpec", "(", "mode", "=", "mode", ",", "predictions", "=", "predictions", ",", "loss", "=", "combined_cost", ",", "train_op", "=", "train_op", ",", "eval_metrics", "=", "(", "eval_metrics_only_fn", ",", "metric_args", ")", ",", "host_call", "=", "(", "host_call_fn", ",", "metric_args", ")", ")", "if", "params", "[", "'use_tpu'", "]", ":", "return", "tpu_estimator_spec", "else", ":", "return", "tpu_estimator_spec", ".", "as_estimator_spec", "(", ")" ]
Create the model for estimator api Args: features: tensor with shape [BATCH_SIZE, go.N, go.N, features_lib.NEW_FEATURES_PLANES] labels: dict from string to tensor with shape 'pi_tensor': [BATCH_SIZE, go.N * go.N + 1] 'value_tensor': [BATCH_SIZE] mode: a tf.estimator.ModeKeys (batchnorm params update for TRAIN only) params: A dictionary (Typically derived from the FLAGS object.) Returns: tf.estimator.EstimatorSpec with props mode: same as mode arg predictions: dict of tensors 'policy': [BATCH_SIZE, go.N * go.N + 1] 'value': [BATCH_SIZE] loss: a single value tensor train_op: train op eval_metric_ops return dict of tensors logits: [BATCH_SIZE, go.N * go.N + 1]
[ "Create", "the", "model", "for", "estimator", "api" ]
python
train
40.892405
swharden/SWHLab
swhlab/common.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/common.py#L36-L47
def lowpass(data,filterSize=None): """ minimal complexity low-pass filtering. Filter size is how "wide" the filter will be. Sigma will be 1/10 of this filter width. If filter size isn't given, it will be 1/10 of the data size. """ if filterSize is None: filterSize=len(data)/10 kernel=kernel_gaussian(size=filterSize) data=convolve(data,kernel) # do the convolution with padded edges return data
[ "def", "lowpass", "(", "data", ",", "filterSize", "=", "None", ")", ":", "if", "filterSize", "is", "None", ":", "filterSize", "=", "len", "(", "data", ")", "/", "10", "kernel", "=", "kernel_gaussian", "(", "size", "=", "filterSize", ")", "data", "=", "convolve", "(", "data", ",", "kernel", ")", "# do the convolution with padded edges", "return", "data" ]
minimal complexity low-pass filtering. Filter size is how "wide" the filter will be. Sigma will be 1/10 of this filter width. If filter size isn't given, it will be 1/10 of the data size.
[ "minimal", "complexity", "low", "-", "pass", "filtering", ".", "Filter", "size", "is", "how", "wide", "the", "filter", "will", "be", ".", "Sigma", "will", "be", "1", "/", "10", "of", "this", "filter", "width", ".", "If", "filter", "size", "isn", "t", "given", "it", "will", "be", "1", "/", "10", "of", "the", "data", "size", "." ]
python
valid
36
amelchio/eternalegypt
examples/sms.py
https://github.com/amelchio/eternalegypt/blob/895e0b235ceaf7f61458c620237c3ad397780e98/examples/sms.py#L15-L26
async def send_message(): """Example of sending a message.""" jar = aiohttp.CookieJar(unsafe=True) websession = aiohttp.ClientSession(cookie_jar=jar) modem = eternalegypt.Modem(hostname=sys.argv[1], websession=websession) await modem.login(password=sys.argv[2]) await modem.sms(phone=sys.argv[3], message=sys.argv[4]) await modem.logout() await websession.close()
[ "async", "def", "send_message", "(", ")", ":", "jar", "=", "aiohttp", ".", "CookieJar", "(", "unsafe", "=", "True", ")", "websession", "=", "aiohttp", ".", "ClientSession", "(", "cookie_jar", "=", "jar", ")", "modem", "=", "eternalegypt", ".", "Modem", "(", "hostname", "=", "sys", ".", "argv", "[", "1", "]", ",", "websession", "=", "websession", ")", "await", "modem", ".", "login", "(", "password", "=", "sys", ".", "argv", "[", "2", "]", ")", "await", "modem", ".", "sms", "(", "phone", "=", "sys", ".", "argv", "[", "3", "]", ",", "message", "=", "sys", ".", "argv", "[", "4", "]", ")", "await", "modem", ".", "logout", "(", ")", "await", "websession", ".", "close", "(", ")" ]
Example of sending a message.
[ "Example", "of", "sending", "a", "message", "." ]
python
test
32.25
gtaylor/python-route53
route53/connection.py
https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/connection.py#L113-L139
def list_hosted_zones(self, page_chunks=100): """ List all hosted zones associated with this connection's account. Since this method returns a generator, you can pull as many or as few entries as you'd like, without having to query and receive every hosted zone you may have. :keyword int page_chunks: This API call is "paginated" behind-the-scenes in order to break up large result sets. This number determines the maximum number of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances to retrieve per request. The default is fine for almost everyone. :rtype: generator :returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances. """ return self._do_autopaginating_api_call( path='hostedzone', params={'maxitems': page_chunks}, method='GET', parser_func=xml_parsers.list_hosted_zones_parser, next_marker_xpath="./{*}NextMarker", next_marker_param_name="marker", )
[ "def", "list_hosted_zones", "(", "self", ",", "page_chunks", "=", "100", ")", ":", "return", "self", ".", "_do_autopaginating_api_call", "(", "path", "=", "'hostedzone'", ",", "params", "=", "{", "'maxitems'", ":", "page_chunks", "}", ",", "method", "=", "'GET'", ",", "parser_func", "=", "xml_parsers", ".", "list_hosted_zones_parser", ",", "next_marker_xpath", "=", "\"./{*}NextMarker\"", ",", "next_marker_param_name", "=", "\"marker\"", ",", ")" ]
List all hosted zones associated with this connection's account. Since this method returns a generator, you can pull as many or as few entries as you'd like, without having to query and receive every hosted zone you may have. :keyword int page_chunks: This API call is "paginated" behind-the-scenes in order to break up large result sets. This number determines the maximum number of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances to retrieve per request. The default is fine for almost everyone. :rtype: generator :returns: A generator of :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instances.
[ "List", "all", "hosted", "zones", "associated", "with", "this", "connection", "s", "account", ".", "Since", "this", "method", "returns", "a", "generator", "you", "can", "pull", "as", "many", "or", "as", "few", "entries", "as", "you", "d", "like", "without", "having", "to", "query", "and", "receive", "every", "hosted", "zone", "you", "may", "have", "." ]
python
test
41.444444
xmikos/reparser
reparser.py
https://github.com/xmikos/reparser/blob/0668112a15b9e8e9355a1261040c36b4a6034020/reparser.py#L103-L109
def get_matched_token(self, match): """Find which token has been matched by compound regex""" match_groupdict = match.groupdict() for group in self.groups: if match_groupdict[group] is not None: token, match_type = self.groups[group] return (token, match_type, group)
[ "def", "get_matched_token", "(", "self", ",", "match", ")", ":", "match_groupdict", "=", "match", ".", "groupdict", "(", ")", "for", "group", "in", "self", ".", "groups", ":", "if", "match_groupdict", "[", "group", "]", "is", "not", "None", ":", "token", ",", "match_type", "=", "self", ".", "groups", "[", "group", "]", "return", "(", "token", ",", "match_type", ",", "group", ")" ]
Find which token has been matched by compound regex
[ "Find", "which", "token", "has", "been", "matched", "by", "compound", "regex" ]
python
train
47
bitcraze/crazyflie-lib-python
cflib/crazyflie/__init__.py
https://github.com/bitcraze/crazyflie-lib-python/blob/f6ebb4eb315bbe6e02db518936ac17fb615b2af8/cflib/crazyflie/__init__.py#L302-L341
def send_packet(self, pk, expected_reply=(), resend=False, timeout=0.2): """ Send a packet through the link interface. pk -- Packet to send expect_answer -- True if a packet from the Crazyflie is expected to be sent back, otherwise false """ self._send_lock.acquire() if self.link is not None: if len(expected_reply) > 0 and not resend and \ self.link.needs_resending: pattern = (pk.header,) + expected_reply logger.debug( 'Sending packet and expecting the %s pattern back', pattern) new_timer = Timer(timeout, lambda: self._no_answer_do_retry(pk, pattern)) self._answer_patterns[pattern] = new_timer new_timer.start() elif resend: # Check if we have gotten an answer, if not try again pattern = expected_reply if pattern in self._answer_patterns: logger.debug('We want to resend and the pattern is there') if self._answer_patterns[pattern]: new_timer = Timer(timeout, lambda: self._no_answer_do_retry( pk, pattern)) self._answer_patterns[pattern] = new_timer new_timer.start() else: logger.debug('Resend requested, but no pattern found: %s', self._answer_patterns) self.link.send_packet(pk) self.packet_sent.call(pk) self._send_lock.release()
[ "def", "send_packet", "(", "self", ",", "pk", ",", "expected_reply", "=", "(", ")", ",", "resend", "=", "False", ",", "timeout", "=", "0.2", ")", ":", "self", ".", "_send_lock", ".", "acquire", "(", ")", "if", "self", ".", "link", "is", "not", "None", ":", "if", "len", "(", "expected_reply", ")", ">", "0", "and", "not", "resend", "and", "self", ".", "link", ".", "needs_resending", ":", "pattern", "=", "(", "pk", ".", "header", ",", ")", "+", "expected_reply", "logger", ".", "debug", "(", "'Sending packet and expecting the %s pattern back'", ",", "pattern", ")", "new_timer", "=", "Timer", "(", "timeout", ",", "lambda", ":", "self", ".", "_no_answer_do_retry", "(", "pk", ",", "pattern", ")", ")", "self", ".", "_answer_patterns", "[", "pattern", "]", "=", "new_timer", "new_timer", ".", "start", "(", ")", "elif", "resend", ":", "# Check if we have gotten an answer, if not try again", "pattern", "=", "expected_reply", "if", "pattern", "in", "self", ".", "_answer_patterns", ":", "logger", ".", "debug", "(", "'We want to resend and the pattern is there'", ")", "if", "self", ".", "_answer_patterns", "[", "pattern", "]", ":", "new_timer", "=", "Timer", "(", "timeout", ",", "lambda", ":", "self", ".", "_no_answer_do_retry", "(", "pk", ",", "pattern", ")", ")", "self", ".", "_answer_patterns", "[", "pattern", "]", "=", "new_timer", "new_timer", ".", "start", "(", ")", "else", ":", "logger", ".", "debug", "(", "'Resend requested, but no pattern found: %s'", ",", "self", ".", "_answer_patterns", ")", "self", ".", "link", ".", "send_packet", "(", "pk", ")", "self", ".", "packet_sent", ".", "call", "(", "pk", ")", "self", ".", "_send_lock", ".", "release", "(", ")" ]
Send a packet through the link interface. pk -- Packet to send expect_answer -- True if a packet from the Crazyflie is expected to be sent back, otherwise false
[ "Send", "a", "packet", "through", "the", "link", "interface", "." ]
python
train
46.075
bitesofcode/projexui
projexui/widgets/xnodewidget/xnode.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L1831-L1839
def setIsolateHidden(self, state): """ Sets whether or not this item is hidden due to isolation. :param state | <bool> """ self._isolatedHidden = state super(XNode, self).setVisible(self.isVisible())
[ "def", "setIsolateHidden", "(", "self", ",", "state", ")", ":", "self", ".", "_isolatedHidden", "=", "state", "super", "(", "XNode", ",", "self", ")", ".", "setVisible", "(", "self", ".", "isVisible", "(", ")", ")" ]
Sets whether or not this item is hidden due to isolation. :param state | <bool>
[ "Sets", "whether", "or", "not", "this", "item", "is", "hidden", "due", "to", "isolation", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
29.111111
pallets/werkzeug
src/werkzeug/serving.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/serving.py#L611-L630
def select_address_family(host, port): """Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on the host and port.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. # try: # info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, # socket.SOCK_STREAM, 0, # socket.AI_PASSIVE) # if info: # return info[0][0] # except socket.gaierror: # pass if host.startswith("unix://"): return socket.AF_UNIX elif ":" in host and hasattr(socket, "AF_INET6"): return socket.AF_INET6 return socket.AF_INET
[ "def", "select_address_family", "(", "host", ",", "port", ")", ":", "# disabled due to problems with current ipv6 implementations", "# and various operating systems. Probably this code also is", "# not supposed to work, but I can't come up with any other", "# ways to implement this.", "# try:", "# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,", "# socket.SOCK_STREAM, 0,", "# socket.AI_PASSIVE)", "# if info:", "# return info[0][0]", "# except socket.gaierror:", "# pass", "if", "host", ".", "startswith", "(", "\"unix://\"", ")", ":", "return", "socket", ".", "AF_UNIX", "elif", "\":\"", "in", "host", "and", "hasattr", "(", "socket", ",", "\"AF_INET6\"", ")", ":", "return", "socket", ".", "AF_INET6", "return", "socket", ".", "AF_INET" ]
Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on the host and port.
[ "Return", "AF_INET4", "AF_INET6", "or", "AF_UNIX", "depending", "on", "the", "host", "and", "port", "." ]
python
train
40.05
etcher-be/epab
epab/utils/_repo.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L33-L52
def tag(self, tag: str, overwrite: bool = False) -> None: """ Tags the current commit :param tag: tag :type tag: str :param overwrite: overwrite existing tag :type overwrite: bool """ LOGGER.info('tagging repo: %s', tag) try: self.repo.create_tag(tag) except GitCommandError as exc: if 'already exists' in exc.stderr and overwrite: LOGGER.info('overwriting existing tag') self.remove_tag(tag) self.repo.create_tag(tag) else: LOGGER.exception('error while tagging repo') raise
[ "def", "tag", "(", "self", ",", "tag", ":", "str", ",", "overwrite", ":", "bool", "=", "False", ")", "->", "None", ":", "LOGGER", ".", "info", "(", "'tagging repo: %s'", ",", "tag", ")", "try", ":", "self", ".", "repo", ".", "create_tag", "(", "tag", ")", "except", "GitCommandError", "as", "exc", ":", "if", "'already exists'", "in", "exc", ".", "stderr", "and", "overwrite", ":", "LOGGER", ".", "info", "(", "'overwriting existing tag'", ")", "self", ".", "remove_tag", "(", "tag", ")", "self", ".", "repo", ".", "create_tag", "(", "tag", ")", "else", ":", "LOGGER", ".", "exception", "(", "'error while tagging repo'", ")", "raise" ]
Tags the current commit :param tag: tag :type tag: str :param overwrite: overwrite existing tag :type overwrite: bool
[ "Tags", "the", "current", "commit" ]
python
train
32.65
ninuxorg/nodeshot
nodeshot/core/nodes/models/node.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/nodes/models/node.py#L88-L116
def save(self, *args, **kwargs): """ Custom save method does the following things: * converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point) * intercepts changes to status and fires node_status_changed signal * set default status """ # geometry collection check if isinstance(self.geometry, GeometryCollection) and 0 < len(self.geometry) < 2: self.geometry = self.geometry[0] # if no status specified if not self.status and not self.status_id: try: self.status = Status.objects.filter(is_default=True)[0] except IndexError: pass super(Node, self).save(*args, **kwargs) # if status of a node changes if (self.status and self._current_status and self.status.id != self._current_status) or\ (self.status_id and self._current_status and self.status_id != self._current_status): # send django signal node_status_changed.send( sender=self.__class__, instance=self, old_status=Status.objects.get(pk=self._current_status), new_status=self.status ) # update _current_status self._current_status = self.status_id
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# geometry collection check", "if", "isinstance", "(", "self", ".", "geometry", ",", "GeometryCollection", ")", "and", "0", "<", "len", "(", "self", ".", "geometry", ")", "<", "2", ":", "self", ".", "geometry", "=", "self", ".", "geometry", "[", "0", "]", "# if no status specified", "if", "not", "self", ".", "status", "and", "not", "self", ".", "status_id", ":", "try", ":", "self", ".", "status", "=", "Status", ".", "objects", ".", "filter", "(", "is_default", "=", "True", ")", "[", "0", "]", "except", "IndexError", ":", "pass", "super", "(", "Node", ",", "self", ")", ".", "save", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# if status of a node changes", "if", "(", "self", ".", "status", "and", "self", ".", "_current_status", "and", "self", ".", "status", ".", "id", "!=", "self", ".", "_current_status", ")", "or", "(", "self", ".", "status_id", "and", "self", ".", "_current_status", "and", "self", ".", "status_id", "!=", "self", ".", "_current_status", ")", ":", "# send django signal", "node_status_changed", ".", "send", "(", "sender", "=", "self", ".", "__class__", ",", "instance", "=", "self", ",", "old_status", "=", "Status", ".", "objects", ".", "get", "(", "pk", "=", "self", ".", "_current_status", ")", ",", "new_status", "=", "self", ".", "status", ")", "# update _current_status", "self", ".", "_current_status", "=", "self", ".", "status_id" ]
Custom save method does the following things: * converts geometry collections of just 1 item to that item (eg: a collection of 1 Point becomes a Point) * intercepts changes to status and fires node_status_changed signal * set default status
[ "Custom", "save", "method", "does", "the", "following", "things", ":", "*", "converts", "geometry", "collections", "of", "just", "1", "item", "to", "that", "item", "(", "eg", ":", "a", "collection", "of", "1", "Point", "becomes", "a", "Point", ")", "*", "intercepts", "changes", "to", "status", "and", "fires", "node_status_changed", "signal", "*", "set", "default", "status" ]
python
train
45.965517
Robpol86/Flask-Celery-Helper
flask_celery.py
https://github.com/Robpol86/Flask-Celery-Helper/blob/92bd3b02954422665260116adda8eb899546c365/flask_celery.py#L82-L85
def is_already_running(self): """Return True if lock exists and has not timed out.""" redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier) return self.celery_self.backend.client.exists(redis_key)
[ "def", "is_already_running", "(", "self", ")", ":", "redis_key", "=", "self", ".", "CELERY_LOCK", ".", "format", "(", "task_id", "=", "self", ".", "task_identifier", ")", "return", "self", ".", "celery_self", ".", "backend", ".", "client", ".", "exists", "(", "redis_key", ")" ]
Return True if lock exists and has not timed out.
[ "Return", "True", "if", "lock", "exists", "and", "has", "not", "timed", "out", "." ]
python
valid
57.25