repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
bokeh/bokeh
bokeh/core/has_props.py
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/has_props.py#L528-L570
def query_properties_with_values(self, query, include_defaults=True): ''' Query the properties values of |HasProps| instances with a predicate. Args: query (callable) : A callable that accepts property descriptors and returns True or False include_defaults (bool, optional) : Whether to include properties that have not been explicitly set by a user (default: True) Returns: dict : mapping of property names and values for matching properties ''' themed_keys = set() result = dict() if include_defaults: keys = self.properties() else: # TODO (bev) For now, include unstable default values. Things rely on Instances # always getting serialized, even defaults, and adding unstable defaults here # accomplishes that. Unmodified defaults for property value containers will be # weeded out below. keys = set(self._property_values.keys()) | set(self._unstable_default_values.keys()) if self.themed_values(): themed_keys = set(self.themed_values().keys()) keys |= themed_keys for key in keys: descriptor = self.lookup(key) if not query(descriptor): continue value = descriptor.serializable_value(self) if not include_defaults and key not in themed_keys: if isinstance(value, PropertyValueContainer) and key in self._unstable_default_values: continue result[key] = value return result
[ "def", "query_properties_with_values", "(", "self", ",", "query", ",", "include_defaults", "=", "True", ")", ":", "themed_keys", "=", "set", "(", ")", "result", "=", "dict", "(", ")", "if", "include_defaults", ":", "keys", "=", "self", ".", "properties", "(", ")", "else", ":", "# TODO (bev) For now, include unstable default values. Things rely on Instances", "# always getting serialized, even defaults, and adding unstable defaults here", "# accomplishes that. Unmodified defaults for property value containers will be", "# weeded out below.", "keys", "=", "set", "(", "self", ".", "_property_values", ".", "keys", "(", ")", ")", "|", "set", "(", "self", ".", "_unstable_default_values", ".", "keys", "(", ")", ")", "if", "self", ".", "themed_values", "(", ")", ":", "themed_keys", "=", "set", "(", "self", ".", "themed_values", "(", ")", ".", "keys", "(", ")", ")", "keys", "|=", "themed_keys", "for", "key", "in", "keys", ":", "descriptor", "=", "self", ".", "lookup", "(", "key", ")", "if", "not", "query", "(", "descriptor", ")", ":", "continue", "value", "=", "descriptor", ".", "serializable_value", "(", "self", ")", "if", "not", "include_defaults", "and", "key", "not", "in", "themed_keys", ":", "if", "isinstance", "(", "value", ",", "PropertyValueContainer", ")", "and", "key", "in", "self", ".", "_unstable_default_values", ":", "continue", "result", "[", "key", "]", "=", "value", "return", "result" ]
Query the properties values of |HasProps| instances with a predicate. Args: query (callable) : A callable that accepts property descriptors and returns True or False include_defaults (bool, optional) : Whether to include properties that have not been explicitly set by a user (default: True) Returns: dict : mapping of property names and values for matching properties
[ "Query", "the", "properties", "values", "of", "|HasProps|", "instances", "with", "a", "predicate", "." ]
python
train
WebarchivCZ/WA-KAT
src/wa_kat/convertors/mrc.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/convertors/mrc.py#L19-L81
def mrc_to_marc(mrc): """ Convert MRC data format to MARC XML. Args: mrc (str): MRC as string. Returns: str: XML with MARC. """ # ignore blank lines lines = [ line for line in mrc.splitlines() if line.strip() ] def split_to_parts(lines): for line in lines: first_part, second_part = line.split(" L ", 1) yield line, first_part, second_part.lstrip() control_lines = [] data_lines = [] for line, first_part, second_part in split_to_parts(lines): if second_part.startswith("$"): data_lines.append(line) else: control_lines.append(line) # convert controlfield lines record = MARCXMLRecord() record.oai_marc = True for line, descr, content in split_to_parts(control_lines): record.controlfields[descr.strip()[:3]] = content def get_subfield_dict(line): fields = ( (field[0], field[1:]) for field in line.split("$$")[1:] ) fields_dict = defaultdict(list) for key, val in fields: fields_dict[key].append(val) return fields_dict # convert datafield lines for line, descr, content_line in split_to_parts(data_lines): name = descr[:3] i1 = descr[3] i2 = descr[4] record.add_data_field( name, i1, i2, get_subfield_dict(content_line) ) return record.to_XML()
[ "def", "mrc_to_marc", "(", "mrc", ")", ":", "# ignore blank lines", "lines", "=", "[", "line", "for", "line", "in", "mrc", ".", "splitlines", "(", ")", "if", "line", ".", "strip", "(", ")", "]", "def", "split_to_parts", "(", "lines", ")", ":", "for", "line", "in", "lines", ":", "first_part", ",", "second_part", "=", "line", ".", "split", "(", "\" L \"", ",", "1", ")", "yield", "line", ",", "first_part", ",", "second_part", ".", "lstrip", "(", ")", "control_lines", "=", "[", "]", "data_lines", "=", "[", "]", "for", "line", ",", "first_part", ",", "second_part", "in", "split_to_parts", "(", "lines", ")", ":", "if", "second_part", ".", "startswith", "(", "\"$\"", ")", ":", "data_lines", ".", "append", "(", "line", ")", "else", ":", "control_lines", ".", "append", "(", "line", ")", "# convert controlfield lines", "record", "=", "MARCXMLRecord", "(", ")", "record", ".", "oai_marc", "=", "True", "for", "line", ",", "descr", ",", "content", "in", "split_to_parts", "(", "control_lines", ")", ":", "record", ".", "controlfields", "[", "descr", ".", "strip", "(", ")", "[", ":", "3", "]", "]", "=", "content", "def", "get_subfield_dict", "(", "line", ")", ":", "fields", "=", "(", "(", "field", "[", "0", "]", ",", "field", "[", "1", ":", "]", ")", "for", "field", "in", "line", ".", "split", "(", "\"$$\"", ")", "[", "1", ":", "]", ")", "fields_dict", "=", "defaultdict", "(", "list", ")", "for", "key", ",", "val", "in", "fields", ":", "fields_dict", "[", "key", "]", ".", "append", "(", "val", ")", "return", "fields_dict", "# convert datafield lines", "for", "line", ",", "descr", ",", "content_line", "in", "split_to_parts", "(", "data_lines", ")", ":", "name", "=", "descr", "[", ":", "3", "]", "i1", "=", "descr", "[", "3", "]", "i2", "=", "descr", "[", "4", "]", "record", ".", "add_data_field", "(", "name", ",", "i1", ",", "i2", ",", "get_subfield_dict", "(", "content_line", ")", ")", "return", "record", ".", "to_XML", "(", ")" ]
Convert MRC data format to MARC XML. Args: mrc (str): MRC as string. Returns: str: XML with MARC.
[ "Convert", "MRC", "data", "format", "to", "MARC", "XML", "." ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/flask/wrappers.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/flask/wrappers.py#L82-L92
def module(self): """The name of the current module if the request was dispatched to an actual module. This is deprecated functionality, use blueprints instead. """ from warnings import warn warn(DeprecationWarning('modules were deprecated in favor of ' 'blueprints. Use request.blueprint ' 'instead.'), stacklevel=2) if self._is_old_module: return self.blueprint
[ "def", "module", "(", "self", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "DeprecationWarning", "(", "'modules were deprecated in favor of '", "'blueprints. Use request.blueprint '", "'instead.'", ")", ",", "stacklevel", "=", "2", ")", "if", "self", ".", "_is_old_module", ":", "return", "self", ".", "blueprint" ]
The name of the current module if the request was dispatched to an actual module. This is deprecated functionality, use blueprints instead.
[ "The", "name", "of", "the", "current", "module", "if", "the", "request", "was", "dispatched", "to", "an", "actual", "module", ".", "This", "is", "deprecated", "functionality", "use", "blueprints", "instead", "." ]
python
test
mingchen/django-cas-ng
django_cas_ng/models.py
https://github.com/mingchen/django-cas-ng/blob/202ca92cd770d9679bfe4e9e20b41fd19b81c311/django_cas_ng/models.py#L41-L63
def retrieve_pt(cls, request, service): """`request` should be the current HttpRequest object `service` a string representing the service for witch we want to retrieve a ticket. The function return a Proxy Ticket or raise `ProxyError` """ try: pgt = cls.objects.get(user=request.user, session_key=request.session.session_key).pgt except cls.DoesNotExist: raise ProxyError( "INVALID_TICKET", "No proxy ticket found for this HttpRequest object" ) else: client = get_cas_client(service_url=service, request=request) try: return client.get_proxy_ticket(pgt) # change CASError to ProxyError nicely except CASError as error: raise ProxyError(*error.args) # just embed other errors except Exception as e: raise ProxyError(e)
[ "def", "retrieve_pt", "(", "cls", ",", "request", ",", "service", ")", ":", "try", ":", "pgt", "=", "cls", ".", "objects", ".", "get", "(", "user", "=", "request", ".", "user", ",", "session_key", "=", "request", ".", "session", ".", "session_key", ")", ".", "pgt", "except", "cls", ".", "DoesNotExist", ":", "raise", "ProxyError", "(", "\"INVALID_TICKET\"", ",", "\"No proxy ticket found for this HttpRequest object\"", ")", "else", ":", "client", "=", "get_cas_client", "(", "service_url", "=", "service", ",", "request", "=", "request", ")", "try", ":", "return", "client", ".", "get_proxy_ticket", "(", "pgt", ")", "# change CASError to ProxyError nicely", "except", "CASError", "as", "error", ":", "raise", "ProxyError", "(", "*", "error", ".", "args", ")", "# just embed other errors", "except", "Exception", "as", "e", ":", "raise", "ProxyError", "(", "e", ")" ]
`request` should be the current HttpRequest object `service` a string representing the service for witch we want to retrieve a ticket. The function return a Proxy Ticket or raise `ProxyError`
[ "request", "should", "be", "the", "current", "HttpRequest", "object", "service", "a", "string", "representing", "the", "service", "for", "witch", "we", "want", "to", "retrieve", "a", "ticket", ".", "The", "function", "return", "a", "Proxy", "Ticket", "or", "raise", "ProxyError" ]
python
train
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L1930-L1953
def PushPopItem(obj, key, value): ''' A context manager to replace and restore a value using a getter and setter. :param object obj: The object to replace/restore. :param object key: The key to replace/restore in the object. :param object value: The value to replace. Example:: with PushPop2(sys.modules, 'alpha', None): pytest.raises(ImportError): import alpha ''' if key in obj: old_value = obj[key] obj[key] = value yield value obj[key] = old_value else: obj[key] = value yield value del obj[key]
[ "def", "PushPopItem", "(", "obj", ",", "key", ",", "value", ")", ":", "if", "key", "in", "obj", ":", "old_value", "=", "obj", "[", "key", "]", "obj", "[", "key", "]", "=", "value", "yield", "value", "obj", "[", "key", "]", "=", "old_value", "else", ":", "obj", "[", "key", "]", "=", "value", "yield", "value", "del", "obj", "[", "key", "]" ]
A context manager to replace and restore a value using a getter and setter. :param object obj: The object to replace/restore. :param object key: The key to replace/restore in the object. :param object value: The value to replace. Example:: with PushPop2(sys.modules, 'alpha', None): pytest.raises(ImportError): import alpha
[ "A", "context", "manager", "to", "replace", "and", "restore", "a", "value", "using", "a", "getter", "and", "setter", "." ]
python
valid
mrstephenneal/mysql-toolkit
mysql/toolkit/components/structure/schema.py
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/components/structure/schema.py#L32-L44
def get_schema(self, table, with_headers=False): """Retrieve the database schema for a particular table.""" f = self.fetch('desc ' + wrap(table)) if not isinstance(f[0], list): f = [f] # Replace None with '' schema = [['' if col is None else col for col in row] for row in f] # If with_headers is True, insert headers to first row before returning if with_headers: schema.insert(0, ['Column', 'Type', 'Null', 'Key', 'Default', 'Extra']) return schema
[ "def", "get_schema", "(", "self", ",", "table", ",", "with_headers", "=", "False", ")", ":", "f", "=", "self", ".", "fetch", "(", "'desc '", "+", "wrap", "(", "table", ")", ")", "if", "not", "isinstance", "(", "f", "[", "0", "]", ",", "list", ")", ":", "f", "=", "[", "f", "]", "# Replace None with ''", "schema", "=", "[", "[", "''", "if", "col", "is", "None", "else", "col", "for", "col", "in", "row", "]", "for", "row", "in", "f", "]", "# If with_headers is True, insert headers to first row before returning", "if", "with_headers", ":", "schema", ".", "insert", "(", "0", ",", "[", "'Column'", ",", "'Type'", ",", "'Null'", ",", "'Key'", ",", "'Default'", ",", "'Extra'", "]", ")", "return", "schema" ]
Retrieve the database schema for a particular table.
[ "Retrieve", "the", "database", "schema", "for", "a", "particular", "table", "." ]
python
train
andreikop/qutepart
qutepart/rectangularselection.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/rectangularselection.py#L100-L116
def _visibleToRealColumn(self, text, visiblePos): """If \t is used, real position of symbol in block and visible position differs This function converts visible to real. Bigger value is returned, if visiblePos is in the middle of \t, None if text is too short """ if visiblePos == 0: return 0 elif not '\t' in text: return visiblePos else: currentIndex = 1 for currentVisiblePos in self._visibleCharPositionGenerator(text): if currentVisiblePos >= visiblePos: return currentIndex - 1 currentIndex += 1 return None
[ "def", "_visibleToRealColumn", "(", "self", ",", "text", ",", "visiblePos", ")", ":", "if", "visiblePos", "==", "0", ":", "return", "0", "elif", "not", "'\\t'", "in", "text", ":", "return", "visiblePos", "else", ":", "currentIndex", "=", "1", "for", "currentVisiblePos", "in", "self", ".", "_visibleCharPositionGenerator", "(", "text", ")", ":", "if", "currentVisiblePos", ">=", "visiblePos", ":", "return", "currentIndex", "-", "1", "currentIndex", "+=", "1", "return", "None" ]
If \t is used, real position of symbol in block and visible position differs This function converts visible to real. Bigger value is returned, if visiblePos is in the middle of \t, None if text is too short
[ "If", "\\", "t", "is", "used", "real", "position", "of", "symbol", "in", "block", "and", "visible", "position", "differs", "This", "function", "converts", "visible", "to", "real", ".", "Bigger", "value", "is", "returned", "if", "visiblePos", "is", "in", "the", "middle", "of", "\\", "t", "None", "if", "text", "is", "too", "short" ]
python
train
googleads/googleads-python-lib
googleads/adwords.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/googleads/adwords.py#L1989-L2000
def DoesNotContainIgnoreCase(self, value): """Sets the type of the WHERE clause as "doesn not contain ignore case". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to. """ self._awql = self._CreateSingleValueCondition( value, 'DOES_NOT_CONTAIN_IGNORE_CASE') return self._query_builder
[ "def", "DoesNotContainIgnoreCase", "(", "self", ",", "value", ")", ":", "self", ".", "_awql", "=", "self", ".", "_CreateSingleValueCondition", "(", "value", ",", "'DOES_NOT_CONTAIN_IGNORE_CASE'", ")", "return", "self", ".", "_query_builder" ]
Sets the type of the WHERE clause as "doesn not contain ignore case". Args: value: The value to be used in the WHERE condition. Returns: The query builder that this WHERE builder links to.
[ "Sets", "the", "type", "of", "the", "WHERE", "clause", "as", "doesn", "not", "contain", "ignore", "case", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/certificates/certificates.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/certificates/certificates.py#L321-L332
def type(self): """Certificate type. :return: The type of the certificate. :rtype: CertificateType """ if self._device_mode == 1 or self._type == CertificateType.developer: return CertificateType.developer elif self._type == CertificateType.bootstrap: return CertificateType.bootstrap else: return CertificateType.lwm2m
[ "def", "type", "(", "self", ")", ":", "if", "self", ".", "_device_mode", "==", "1", "or", "self", ".", "_type", "==", "CertificateType", ".", "developer", ":", "return", "CertificateType", ".", "developer", "elif", "self", ".", "_type", "==", "CertificateType", ".", "bootstrap", ":", "return", "CertificateType", ".", "bootstrap", "else", ":", "return", "CertificateType", ".", "lwm2m" ]
Certificate type. :return: The type of the certificate. :rtype: CertificateType
[ "Certificate", "type", "." ]
python
train
quantopian/zipline
zipline/utils/numpy_utils.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/utils/numpy_utils.py#L334-L340
def isnat(obj): """ Check if a value is np.NaT. """ if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT
[ "def", "isnat", "(", "obj", ")", ":", "if", "obj", ".", "dtype", ".", "kind", "not", "in", "(", "'m'", ",", "'M'", ")", ":", "raise", "ValueError", "(", "\"%s is not a numpy datetime or timedelta\"", ")", "return", "obj", ".", "view", "(", "int64_dtype", ")", "==", "iNaT" ]
Check if a value is np.NaT.
[ "Check", "if", "a", "value", "is", "np", ".", "NaT", "." ]
python
train
brbsix/python-batchpath
batchpath.py
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L142-L149
def _sorter(generated): """Return a list of paths sorted by dirname & basename.""" pairs = [(os.path.dirname(f), os.path.basename(f)) for f in set(list(generated))] pairs.sort() return [os.path.join(pair[0], pair[1]) for pair in pairs]
[ "def", "_sorter", "(", "generated", ")", ":", "pairs", "=", "[", "(", "os", ".", "path", ".", "dirname", "(", "f", ")", ",", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "for", "f", "in", "set", "(", "list", "(", "generated", ")", ")", "]", "pairs", ".", "sort", "(", ")", "return", "[", "os", ".", "path", ".", "join", "(", "pair", "[", "0", "]", ",", "pair", "[", "1", "]", ")", "for", "pair", "in", "pairs", "]" ]
Return a list of paths sorted by dirname & basename.
[ "Return", "a", "list", "of", "paths", "sorted", "by", "dirname", "&", "basename", "." ]
python
train
dslackw/slpkg
slpkg/main.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L722-L745
def auto_detect(self, args): """Check for already Slackware binary packages exist """ suffixes = [ ".tgz", ".txz", ".tbz", ".tlz" ] if (not args[0].startswith("-") and args[0] not in self.commands and args[0].endswith(tuple(suffixes))): packages, not_found = [], [] for pkg in args: if pkg.endswith(tuple(suffixes)): if os.path.isfile(pkg): packages.append(pkg) else: not_found.append(pkg) if packages: Auto(packages).select() if not_found: for ntf in not_found: self.msg.pkg_not_found("", ntf, "Not installed", "") raise SystemExit()
[ "def", "auto_detect", "(", "self", ",", "args", ")", ":", "suffixes", "=", "[", "\".tgz\"", ",", "\".txz\"", ",", "\".tbz\"", ",", "\".tlz\"", "]", "if", "(", "not", "args", "[", "0", "]", ".", "startswith", "(", "\"-\"", ")", "and", "args", "[", "0", "]", "not", "in", "self", ".", "commands", "and", "args", "[", "0", "]", ".", "endswith", "(", "tuple", "(", "suffixes", ")", ")", ")", ":", "packages", ",", "not_found", "=", "[", "]", ",", "[", "]", "for", "pkg", "in", "args", ":", "if", "pkg", ".", "endswith", "(", "tuple", "(", "suffixes", ")", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "pkg", ")", ":", "packages", ".", "append", "(", "pkg", ")", "else", ":", "not_found", ".", "append", "(", "pkg", ")", "if", "packages", ":", "Auto", "(", "packages", ")", ".", "select", "(", ")", "if", "not_found", ":", "for", "ntf", "in", "not_found", ":", "self", ".", "msg", ".", "pkg_not_found", "(", "\"\"", ",", "ntf", ",", "\"Not installed\"", ",", "\"\"", ")", "raise", "SystemExit", "(", ")" ]
Check for already Slackware binary packages exist
[ "Check", "for", "already", "Slackware", "binary", "packages", "exist" ]
python
train
google/grumpy
third_party/stdlib/base64.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/base64.py#L308-L315
def decode(input, output): """Decode a file.""" while True: line = input.readline() if not line: break s = binascii.a2b_base64(line) output.write(s)
[ "def", "decode", "(", "input", ",", "output", ")", ":", "while", "True", ":", "line", "=", "input", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "s", "=", "binascii", ".", "a2b_base64", "(", "line", ")", "output", ".", "write", "(", "s", ")" ]
Decode a file.
[ "Decode", "a", "file", "." ]
python
valid
bigchaindb/bigchaindb
bigchaindb/lib.py
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/lib.py#L92-L96
def write_transaction(self, transaction, mode): # This method offers backward compatibility with the Web API. """Submit a valid transaction to the mempool.""" response = self.post_transaction(transaction, mode) return self._process_post_response(response.json(), mode)
[ "def", "write_transaction", "(", "self", ",", "transaction", ",", "mode", ")", ":", "# This method offers backward compatibility with the Web API.", "response", "=", "self", ".", "post_transaction", "(", "transaction", ",", "mode", ")", "return", "self", ".", "_process_post_response", "(", "response", ".", "json", "(", ")", ",", "mode", ")" ]
Submit a valid transaction to the mempool.
[ "Submit", "a", "valid", "transaction", "to", "the", "mempool", "." ]
python
train
googleapis/gax-python
google/gapic/longrunning/operations_client.py
https://github.com/googleapis/gax-python/blob/309aedfcfd48e4c8fa22dd60e9c84c3cc71bb20e/google/gapic/longrunning/operations_client.py#L215-L264
def list_operations(self, name, filter_, page_size=0, options=None): """ Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns ``UNIMPLEMENTED``. NOTE: the ``name`` binding below allows API services to override the binding to use different resource name schemes, such as ``users/*/operations``. Example: >>> from google.gapic.longrunning import operations_client >>> from google.gax import CallOptions, INITIAL_PAGE >>> api = operations_client.OperationsClient() >>> name = '' >>> filter_ = '' >>> >>> # Iterate over all results >>> for element in api.list_operations(name, filter_): >>> # process element >>> pass >>> >>> # Or iterate over results one page at a time >>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)): >>> for element in page: >>> # process element >>> pass Args: name (string): The name of the operation collection. filter_ (string): The standard list filter. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.gax.PageIterator` instance. By default, this is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances. This object can also be configured to iterate over the pages of the response through the `CallOptions` parameter. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid. """ # Create the request object. request = operations_pb2.ListOperationsRequest( name=name, filter=filter_, page_size=page_size) return self._list_operations(request, options)
[ "def", "list_operations", "(", "self", ",", "name", ",", "filter_", ",", "page_size", "=", "0", ",", "options", "=", "None", ")", ":", "# Create the request object.", "request", "=", "operations_pb2", ".", "ListOperationsRequest", "(", "name", "=", "name", ",", "filter", "=", "filter_", ",", "page_size", "=", "page_size", ")", "return", "self", ".", "_list_operations", "(", "request", ",", "options", ")" ]
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns ``UNIMPLEMENTED``. NOTE: the ``name`` binding below allows API services to override the binding to use different resource name schemes, such as ``users/*/operations``. Example: >>> from google.gapic.longrunning import operations_client >>> from google.gax import CallOptions, INITIAL_PAGE >>> api = operations_client.OperationsClient() >>> name = '' >>> filter_ = '' >>> >>> # Iterate over all results >>> for element in api.list_operations(name, filter_): >>> # process element >>> pass >>> >>> # Or iterate over results one page at a time >>> for page in api.list_operations(name, filter_, options=CallOptions(page_token=INITIAL_PAGE)): >>> for element in page: >>> # process element >>> pass Args: name (string): The name of the operation collection. filter_ (string): The standard list filter. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. options (:class:`google.gax.CallOptions`): Overrides the default settings for this call, e.g, timeout, retries etc. Returns: A :class:`google.gax.PageIterator` instance. By default, this is an iterable of :class:`google.longrunning.operations_pb2.Operation` instances. This object can also be configured to iterate over the pages of the response through the `CallOptions` parameter. Raises: :exc:`google.gax.errors.GaxError` if the RPC is aborted. :exc:`ValueError` if the parameters are invalid.
[ "Lists", "operations", "that", "match", "the", "specified", "filter", "in", "the", "request", ".", "If", "the", "server", "doesn", "t", "support", "this", "method", "it", "returns", "UNIMPLEMENTED", ".", "NOTE", ":", "the", "name", "binding", "below", "allows", "API", "services", "to", "override", "the", "binding", "to", "use", "different", "resource", "name", "schemes", "such", "as", "users", "/", "*", "/", "operations", "." ]
python
train
pymc-devs/pymc
pymc/database/pickle.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/pickle.py#L77-L100
def load(filename): """Load a pickled database. Return a Database instance. """ file = open(filename, 'rb') container = std_pickle.load(file) file.close() db = Database(file.name) chains = 0 funs = set() for k, v in six.iteritems(container): if k == '_state_': db._state_ = v else: db._traces[k] = Trace(name=k, value=v, db=db) setattr(db, k, db._traces[k]) chains = max(chains, len(v)) funs.add(k) db.chains = chains db.trace_names = chains * [list(funs)] return db
[ "def", "load", "(", "filename", ")", ":", "file", "=", "open", "(", "filename", ",", "'rb'", ")", "container", "=", "std_pickle", ".", "load", "(", "file", ")", "file", ".", "close", "(", ")", "db", "=", "Database", "(", "file", ".", "name", ")", "chains", "=", "0", "funs", "=", "set", "(", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "container", ")", ":", "if", "k", "==", "'_state_'", ":", "db", ".", "_state_", "=", "v", "else", ":", "db", ".", "_traces", "[", "k", "]", "=", "Trace", "(", "name", "=", "k", ",", "value", "=", "v", ",", "db", "=", "db", ")", "setattr", "(", "db", ",", "k", ",", "db", ".", "_traces", "[", "k", "]", ")", "chains", "=", "max", "(", "chains", ",", "len", "(", "v", ")", ")", "funs", ".", "add", "(", "k", ")", "db", ".", "chains", "=", "chains", "db", ".", "trace_names", "=", "chains", "*", "[", "list", "(", "funs", ")", "]", "return", "db" ]
Load a pickled database. Return a Database instance.
[ "Load", "a", "pickled", "database", "." ]
python
train
pgmpy/pgmpy
pgmpy/models/NoisyOrModel.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/NoisyOrModel.py#L91-L113
def del_variables(self, variables): """ Deletes variables from the NoisyOrModel. Parameters ---------- variables: list, tuple, dict (array like) list of variables to be deleted. Examples -------- >>> from pgmpy.models import NoisyOrModel >>> model = NoisyOrModel(['x1', 'x2', 'x3'], [2, 3, 2], [[0.6, 0.4], ... [0.2, 0.4, 0.7], ... [0.1, 0. 4]]) >>> model.del_variables(['x1']) """ variables = [variables] if isinstance(variables, six.string_types) else set(variables) indices = [index for index, variable in enumerate(self.variables) if variable in variables] self.variables = np.delete(self.variables, indices, 0) self.cardinality = np.delete(self.cardinality, indices, 0) self.inhibitor_probability = [prob_array for index, prob_array in enumerate(self.inhibitor_probability) if index not in indices]
[ "def", "del_variables", "(", "self", ",", "variables", ")", ":", "variables", "=", "[", "variables", "]", "if", "isinstance", "(", "variables", ",", "six", ".", "string_types", ")", "else", "set", "(", "variables", ")", "indices", "=", "[", "index", "for", "index", ",", "variable", "in", "enumerate", "(", "self", ".", "variables", ")", "if", "variable", "in", "variables", "]", "self", ".", "variables", "=", "np", ".", "delete", "(", "self", ".", "variables", ",", "indices", ",", "0", ")", "self", ".", "cardinality", "=", "np", ".", "delete", "(", "self", ".", "cardinality", ",", "indices", ",", "0", ")", "self", ".", "inhibitor_probability", "=", "[", "prob_array", "for", "index", ",", "prob_array", "in", "enumerate", "(", "self", ".", "inhibitor_probability", ")", "if", "index", "not", "in", "indices", "]" ]
Deletes variables from the NoisyOrModel. Parameters ---------- variables: list, tuple, dict (array like) list of variables to be deleted. Examples -------- >>> from pgmpy.models import NoisyOrModel >>> model = NoisyOrModel(['x1', 'x2', 'x3'], [2, 3, 2], [[0.6, 0.4], ... [0.2, 0.4, 0.7], ... [0.1, 0. 4]]) >>> model.del_variables(['x1'])
[ "Deletes", "variables", "from", "the", "NoisyOrModel", "." ]
python
train
DiamondLightSource/python-workflows
workflows/recipe/wrapper.py
https://github.com/DiamondLightSource/python-workflows/blob/7ef47b457655b96f4d2ef7ee9863cf1b6d20e023/workflows/recipe/wrapper.py#L130-L151
def checkpoint(self, message, header=None, delay=0, **kwargs): """Send a message to the current recipe destination. This can be used to keep a state for longer processing tasks. :param delay: Delay transport of message by this many seconds """ if not self.transport: raise ValueError( "This RecipeWrapper object does not contain " "a reference to a transport object." ) if not self.recipe_step: raise ValueError( "This RecipeWrapper object does not contain " "a recipe with a selected step." ) kwargs["delay"] = delay self._send_to_destination( self.recipe_pointer, header, message, kwargs, add_path_step=False )
[ "def", "checkpoint", "(", "self", ",", "message", ",", "header", "=", "None", ",", "delay", "=", "0", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "transport", ":", "raise", "ValueError", "(", "\"This RecipeWrapper object does not contain \"", "\"a reference to a transport object.\"", ")", "if", "not", "self", ".", "recipe_step", ":", "raise", "ValueError", "(", "\"This RecipeWrapper object does not contain \"", "\"a recipe with a selected step.\"", ")", "kwargs", "[", "\"delay\"", "]", "=", "delay", "self", ".", "_send_to_destination", "(", "self", ".", "recipe_pointer", ",", "header", ",", "message", ",", "kwargs", ",", "add_path_step", "=", "False", ")" ]
Send a message to the current recipe destination. This can be used to keep a state for longer processing tasks. :param delay: Delay transport of message by this many seconds
[ "Send", "a", "message", "to", "the", "current", "recipe", "destination", ".", "This", "can", "be", "used", "to", "keep", "a", "state", "for", "longer", "processing", "tasks", ".", ":", "param", "delay", ":", "Delay", "transport", "of", "message", "by", "this", "many", "seconds" ]
python
train
NoviceLive/intellicoder
intellicoder/intellisense/formatters.py
https://github.com/NoviceLive/intellicoder/blob/6cac5ebfce65c370dbebe47756a1789b120ef982/intellicoder/intellisense/formatters.py#L59-L72
def format_info(raw): """Format a string representing the information concerning the name. """ logging.debug(_('raw[0]: %s'), raw[0]) results, sense = raw # A scenario where ORM really stands out. new = '\n'.join( '{} {} {} {}'.format( i[0], sense.kind_id_to_name(i[1]), sense.file_id_to_name(i[2]).lower(), i[3] + ' ' if i[3] else '').strip() for i in results) return new
[ "def", "format_info", "(", "raw", ")", ":", "logging", ".", "debug", "(", "_", "(", "'raw[0]: %s'", ")", ",", "raw", "[", "0", "]", ")", "results", ",", "sense", "=", "raw", "# A scenario where ORM really stands out.", "new", "=", "'\\n'", ".", "join", "(", "'{} {} {} {}'", ".", "format", "(", "i", "[", "0", "]", ",", "sense", ".", "kind_id_to_name", "(", "i", "[", "1", "]", ")", ",", "sense", ".", "file_id_to_name", "(", "i", "[", "2", "]", ")", ".", "lower", "(", ")", ",", "i", "[", "3", "]", "+", "' '", "if", "i", "[", "3", "]", "else", "''", ")", ".", "strip", "(", ")", "for", "i", "in", "results", ")", "return", "new" ]
Format a string representing the information concerning the name.
[ "Format", "a", "string", "representing", "the", "information", "concerning", "the", "name", "." ]
python
train
nilp0inter/cpe
cpe/comp/cpecomp1_1.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/comp/cpecomp1_1.py#L333-L354
def set_value(self, comp_str, comp_att): """ Set the value of component. By default, the component has a simple value. :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component TEST: >>> val = 'xp!vista' >>> val2 = 'sp2' >>> att = CPEComponentSimple.ATT_VERSION >>> comp1 = CPEComponent1_1(val, att) >>> comp1.set_value(val2, att) >>> comp1.get_value() 'sp2' """ super(CPEComponent1_1, self).set_value(comp_str, comp_att) self._is_negated = comp_str.startswith('~')
[ "def", "set_value", "(", "self", ",", "comp_str", ",", "comp_att", ")", ":", "super", "(", "CPEComponent1_1", ",", "self", ")", ".", "set_value", "(", "comp_str", ",", "comp_att", ")", "self", ".", "_is_negated", "=", "comp_str", ".", "startswith", "(", "'~'", ")" ]
Set the value of component. By default, the component has a simple value. :param string comp_att: attribute associated with value of component :returns: None :exception: ValueError - incorrect value of component TEST: >>> val = 'xp!vista' >>> val2 = 'sp2' >>> att = CPEComponentSimple.ATT_VERSION >>> comp1 = CPEComponent1_1(val, att) >>> comp1.set_value(val2, att) >>> comp1.get_value() 'sp2'
[ "Set", "the", "value", "of", "component", ".", "By", "default", "the", "component", "has", "a", "simple", "value", "." ]
python
train
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/independence/graph/FSRegression.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/independence/graph/FSRegression.py#L150-L167
def predict_features(self, df_features, df_target, idx=0, **kwargs): """For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target """ X = df_features.values y = df_target.values[:, 0] rr = ReliefF() rr.fit(X, y) return rr.feature_importances_
[ "def", "predict_features", "(", "self", ",", "df_features", ",", "df_target", ",", "idx", "=", "0", ",", "*", "*", "kwargs", ")", ":", "X", "=", "df_features", ".", "values", "y", "=", "df_target", ".", "values", "[", ":", ",", "0", "]", "rr", "=", "ReliefF", "(", ")", "rr", ".", "fit", "(", "X", ",", "y", ")", "return", "rr", ".", "feature_importances_" ]
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
[ "For", "one", "variable", "predict", "its", "neighbouring", "nodes", "." ]
python
valid
mozilla/django-tidings
tidings/utils.py
https://github.com/mozilla/django-tidings/blob/b2895b3cdec6aae18315afcceb92bb16317f0f96/tidings/utils.py#L110-L127
def import_from_setting(setting_name, fallback): """Return the resolution of an import path stored in a Django setting. :arg setting_name: The name of the setting holding the import path :arg fallback: An alternate object to use if the setting is empty or doesn't exist Raise ImproperlyConfigured if a path is given that can't be resolved. """ path = getattr(settings, setting_name, None) if path: try: return import_string(path) except ImportError: raise ImproperlyConfigured('%s: No such path.' % path) else: return fallback
[ "def", "import_from_setting", "(", "setting_name", ",", "fallback", ")", ":", "path", "=", "getattr", "(", "settings", ",", "setting_name", ",", "None", ")", "if", "path", ":", "try", ":", "return", "import_string", "(", "path", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "'%s: No such path.'", "%", "path", ")", "else", ":", "return", "fallback" ]
Return the resolution of an import path stored in a Django setting. :arg setting_name: The name of the setting holding the import path :arg fallback: An alternate object to use if the setting is empty or doesn't exist Raise ImproperlyConfigured if a path is given that can't be resolved.
[ "Return", "the", "resolution", "of", "an", "import", "path", "stored", "in", "a", "Django", "setting", "." ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/dist_metrics.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/dist_metrics.py#L9-L23
def abs_angle_diff(v_i, v_j): """ Returns the absolute value of the angle between two 3D vectors. Parameters ---------- v_i : :obj:`numpy.ndarray` the first 3D array v_j : :obj:`numpy.ndarray` the second 3D array """ # compute angle distance dot_prod = min(max(v_i.dot(v_j), -1), 1) angle_diff = np.arccos(dot_prod) return np.abs(angle_diff)
[ "def", "abs_angle_diff", "(", "v_i", ",", "v_j", ")", ":", "# compute angle distance", "dot_prod", "=", "min", "(", "max", "(", "v_i", ".", "dot", "(", "v_j", ")", ",", "-", "1", ")", ",", "1", ")", "angle_diff", "=", "np", ".", "arccos", "(", "dot_prod", ")", "return", "np", ".", "abs", "(", "angle_diff", ")" ]
Returns the absolute value of the angle between two 3D vectors. Parameters ---------- v_i : :obj:`numpy.ndarray` the first 3D array v_j : :obj:`numpy.ndarray` the second 3D array
[ "Returns", "the", "absolute", "value", "of", "the", "angle", "between", "two", "3D", "vectors", ".", "Parameters", "----------", "v_i", ":", ":", "obj", ":", "numpy", ".", "ndarray", "the", "first", "3D", "array", "v_j", ":", ":", "obj", ":", "numpy", ".", "ndarray", "the", "second", "3D", "array" ]
python
train
wtsi-hgi/python-hgijson
hgijson/serialization.py
https://github.com/wtsi-hgi/python-hgijson/blob/6e8ccb562eabcaa816a136268a16504c2e0d4664/hgijson/serialization.py#L154-L162
def _create_serializer_of_type_with_cache(self, serializer_type: Type) -> "Serializer": """ Creates a deserializer of the given type, exploiting a cache. :param serializer_type: the type of deserializer to create :return: the created serializer """ if serializer_type not in self._serializers_cache: self._serializers_cache[serializer_type] = self._create_serializer_of_type(serializer_type) return self._serializers_cache[serializer_type]
[ "def", "_create_serializer_of_type_with_cache", "(", "self", ",", "serializer_type", ":", "Type", ")", "->", "\"Serializer\"", ":", "if", "serializer_type", "not", "in", "self", ".", "_serializers_cache", ":", "self", ".", "_serializers_cache", "[", "serializer_type", "]", "=", "self", ".", "_create_serializer_of_type", "(", "serializer_type", ")", "return", "self", ".", "_serializers_cache", "[", "serializer_type", "]" ]
Creates a deserializer of the given type, exploiting a cache. :param serializer_type: the type of deserializer to create :return: the created serializer
[ "Creates", "a", "deserializer", "of", "the", "given", "type", "exploiting", "a", "cache", ".", ":", "param", "serializer_type", ":", "the", "type", "of", "deserializer", "to", "create", ":", "return", ":", "the", "created", "serializer" ]
python
train
gem/oq-engine
openquake/hazardlib/valid.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/valid.py#L507-L515
def positiveint(value): """ :param value: input string :returns: positive integer """ i = int(not_empty(value)) if i < 0: raise ValueError('integer %d < 0' % i) return i
[ "def", "positiveint", "(", "value", ")", ":", "i", "=", "int", "(", "not_empty", "(", "value", ")", ")", "if", "i", "<", "0", ":", "raise", "ValueError", "(", "'integer %d < 0'", "%", "i", ")", "return", "i" ]
:param value: input string :returns: positive integer
[ ":", "param", "value", ":", "input", "string", ":", "returns", ":", "positive", "integer" ]
python
train
adrn/schwimmbad
schwimmbad/mpi.py
https://github.com/adrn/schwimmbad/blob/d2538b77c821a56096f92eafecd1c08dd02f1f58/schwimmbad/mpi.py#L74-L106
def wait(self, callback=None): """Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user. """ if self.is_master(): return worker = self.comm.rank status = MPI.Status() while True: log.log(_VERBOSE, "Worker {0} waiting for task".format(worker)) task = self.comm.recv(source=self.master, tag=MPI.ANY_TAG, status=status) if task is None: log.log(_VERBOSE, "Worker {0} told to quit work".format(worker)) break func, arg = task log.log(_VERBOSE, "Worker {0} got task {1} with tag {2}" .format(worker, arg, status.tag)) result = func(arg) log.log(_VERBOSE, "Worker {0} sending answer {1} with tag {2}" .format(worker, result, status.tag)) self.comm.ssend(result, self.master, status.tag) if callback is not None: callback()
[ "def", "wait", "(", "self", ",", "callback", "=", "None", ")", ":", "if", "self", ".", "is_master", "(", ")", ":", "return", "worker", "=", "self", ".", "comm", ".", "rank", "status", "=", "MPI", ".", "Status", "(", ")", "while", "True", ":", "log", ".", "log", "(", "_VERBOSE", ",", "\"Worker {0} waiting for task\"", ".", "format", "(", "worker", ")", ")", "task", "=", "self", ".", "comm", ".", "recv", "(", "source", "=", "self", ".", "master", ",", "tag", "=", "MPI", ".", "ANY_TAG", ",", "status", "=", "status", ")", "if", "task", "is", "None", ":", "log", ".", "log", "(", "_VERBOSE", ",", "\"Worker {0} told to quit work\"", ".", "format", "(", "worker", ")", ")", "break", "func", ",", "arg", "=", "task", "log", ".", "log", "(", "_VERBOSE", ",", "\"Worker {0} got task {1} with tag {2}\"", ".", "format", "(", "worker", ",", "arg", ",", "status", ".", "tag", ")", ")", "result", "=", "func", "(", "arg", ")", "log", ".", "log", "(", "_VERBOSE", ",", "\"Worker {0} sending answer {1} with tag {2}\"", ".", "format", "(", "worker", ",", "result", ",", "status", ".", "tag", ")", ")", "self", ".", "comm", ".", "ssend", "(", "result", ",", "self", ".", "master", ",", "status", ".", "tag", ")", "if", "callback", "is", "not", "None", ":", "callback", "(", ")" ]
Tell the workers to wait and listen for the master process. This is called automatically when using :meth:`MPIPool.map` and doesn't need to be called by the user.
[ "Tell", "the", "workers", "to", "wait", "and", "listen", "for", "the", "master", "process", ".", "This", "is", "called", "automatically", "when", "using", ":", "meth", ":", "MPIPool", ".", "map", "and", "doesn", "t", "need", "to", "be", "called", "by", "the", "user", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L6747-L6771
def create_namespaced_resource_quota(self, namespace, body, **kwargs): # noqa: E501 """create_namespaced_resource_quota # noqa: E501 create a ResourceQuota # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ResourceQuota body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ResourceQuota If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501 else: (data) = self.create_namespaced_resource_quota_with_http_info(namespace, body, **kwargs) # noqa: E501 return data
[ "def", "create_namespaced_resource_quota", "(", "self", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "create_namespaced_resource_quota_with_http_info", "(", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "create_namespaced_resource_quota_with_http_info", "(", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
create_namespaced_resource_quota # noqa: E501 create a ResourceQuota # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_resource_quota(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ResourceQuota body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1ResourceQuota If the method is called asynchronously, returns the request thread.
[ "create_namespaced_resource_quota", "#", "noqa", ":", "E501" ]
python
train
dmlc/xgboost
python-package/xgboost/training.py
https://github.com/dmlc/xgboost/blob/253fdd8a42d5ec6b819788199584d27bf9ea6253/python-package/xgboost/training.py#L115-L216
def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None, evals_result=None, verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None): # pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init """Train a booster with given parameters. Parameters ---------- params : dict Booster params. dtrain : DMatrix Data to be trained. num_boost_round: int Number of boosting iterations. evals: list of pairs (DMatrix, string) List of items to be evaluated during training, this allows user to watch performance on the validation set. obj : function Customized objective function. feval : function Customized evaluation function. maximize : bool Whether to maximize feval. early_stopping_rounds: int Activates early stopping. Validation error needs to decrease at least every **early_stopping_rounds** round(s) to continue training. Requires at least one item in **evals**. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have three additional fields: ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``. (Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree`` and/or ``num_class`` appears in the parameters) evals_result: dict This dictionary stores the evaluation results of all the items in watchlist. Example: with a watchlist containing ``[(dtest,'eval'), (dtrain,'train')]`` and a parameter containing ``('eval_metric': 'logloss')``, the **evals_result** returns .. code-block:: python {'train': {'logloss': ['0.48253', '0.35953']}, 'eval': {'logloss': ['0.480385', '0.357756']}} verbose_eval : bool or int Requires at least one item in **evals**. If **verbose_eval** is True then the evaluation metric on the validation set is printed at each boosting stage. If **verbose_eval** is an integer then the evaluation metric on the validation set is printed at every given **verbose_eval** boosting stage. The last boosting stage / the boosting stage found by using **early_stopping_rounds** is also printed. Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric is printed every 4 boosting stages, instead of every boosting stage. learning_rates: list or function (deprecated - use callback API instead) List of learning rate for each boosting round or a customized function that calculates eta in terms of current number of round and the total number of boosting round (e.g. yields learning rate decay) xgb_model : file name of stored xgb model or 'Booster' instance Xgb model to be loaded before training (allows training continuation). callbacks : list of callback functions List of callback functions that are applied at end of each iteration. It is possible to use predefined callbacks by using :ref:`Callback API <callback_api>`. Example: .. code-block:: python [xgb.callback.reset_learning_rate(custom_rates)] Returns ------- Booster : a trained booster model """ callbacks = [] if callbacks is None else callbacks # Most of legacy advanced options becomes callbacks if isinstance(verbose_eval, bool) and verbose_eval: callbacks.append(callback.print_evaluation()) else: if isinstance(verbose_eval, int): callbacks.append(callback.print_evaluation(verbose_eval)) if early_stopping_rounds is not None: callbacks.append(callback.early_stop(early_stopping_rounds, maximize=maximize, verbose=bool(verbose_eval))) if evals_result is not None: callbacks.append(callback.record_evaluation(evals_result)) if learning_rates is not None: warnings.warn("learning_rates parameter is deprecated - use callback API instead", DeprecationWarning) callbacks.append(callback.reset_learning_rate(learning_rates)) return _train_internal(params, dtrain, num_boost_round=num_boost_round, evals=evals, obj=obj, feval=feval, xgb_model=xgb_model, callbacks=callbacks)
[ "def", "train", "(", "params", ",", "dtrain", ",", "num_boost_round", "=", "10", ",", "evals", "=", "(", ")", ",", "obj", "=", "None", ",", "feval", "=", "None", ",", "maximize", "=", "False", ",", "early_stopping_rounds", "=", "None", ",", "evals_result", "=", "None", ",", "verbose_eval", "=", "True", ",", "xgb_model", "=", "None", ",", "callbacks", "=", "None", ",", "learning_rates", "=", "None", ")", ":", "# pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init", "callbacks", "=", "[", "]", "if", "callbacks", "is", "None", "else", "callbacks", "# Most of legacy advanced options becomes callbacks", "if", "isinstance", "(", "verbose_eval", ",", "bool", ")", "and", "verbose_eval", ":", "callbacks", ".", "append", "(", "callback", ".", "print_evaluation", "(", ")", ")", "else", ":", "if", "isinstance", "(", "verbose_eval", ",", "int", ")", ":", "callbacks", ".", "append", "(", "callback", ".", "print_evaluation", "(", "verbose_eval", ")", ")", "if", "early_stopping_rounds", "is", "not", "None", ":", "callbacks", ".", "append", "(", "callback", ".", "early_stop", "(", "early_stopping_rounds", ",", "maximize", "=", "maximize", ",", "verbose", "=", "bool", "(", "verbose_eval", ")", ")", ")", "if", "evals_result", "is", "not", "None", ":", "callbacks", ".", "append", "(", "callback", ".", "record_evaluation", "(", "evals_result", ")", ")", "if", "learning_rates", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"learning_rates parameter is deprecated - use callback API instead\"", ",", "DeprecationWarning", ")", "callbacks", ".", "append", "(", "callback", ".", "reset_learning_rate", "(", "learning_rates", ")", ")", "return", "_train_internal", "(", "params", ",", "dtrain", ",", "num_boost_round", "=", "num_boost_round", ",", "evals", "=", "evals", ",", "obj", "=", "obj", ",", "feval", "=", "feval", ",", "xgb_model", "=", "xgb_model", ",", "callbacks", "=", "callbacks", ")" ]
Train a booster with given parameters. Parameters ---------- params : dict Booster params. dtrain : DMatrix Data to be trained. num_boost_round: int Number of boosting iterations. evals: list of pairs (DMatrix, string) List of items to be evaluated during training, this allows user to watch performance on the validation set. obj : function Customized objective function. feval : function Customized evaluation function. maximize : bool Whether to maximize feval. early_stopping_rounds: int Activates early stopping. Validation error needs to decrease at least every **early_stopping_rounds** round(s) to continue training. Requires at least one item in **evals**. If there's more than one, will use the last. Returns the model from the last iteration (not the best one). If early stopping occurs, the model will have three additional fields: ``bst.best_score``, ``bst.best_iteration`` and ``bst.best_ntree_limit``. (Use ``bst.best_ntree_limit`` to get the correct value if ``num_parallel_tree`` and/or ``num_class`` appears in the parameters) evals_result: dict This dictionary stores the evaluation results of all the items in watchlist. Example: with a watchlist containing ``[(dtest,'eval'), (dtrain,'train')]`` and a parameter containing ``('eval_metric': 'logloss')``, the **evals_result** returns .. code-block:: python {'train': {'logloss': ['0.48253', '0.35953']}, 'eval': {'logloss': ['0.480385', '0.357756']}} verbose_eval : bool or int Requires at least one item in **evals**. If **verbose_eval** is True then the evaluation metric on the validation set is printed at each boosting stage. If **verbose_eval** is an integer then the evaluation metric on the validation set is printed at every given **verbose_eval** boosting stage. The last boosting stage / the boosting stage found by using **early_stopping_rounds** is also printed. Example: with ``verbose_eval=4`` and at least one item in **evals**, an evaluation metric is printed every 4 boosting stages, instead of every boosting stage. learning_rates: list or function (deprecated - use callback API instead) List of learning rate for each boosting round or a customized function that calculates eta in terms of current number of round and the total number of boosting round (e.g. yields learning rate decay) xgb_model : file name of stored xgb model or 'Booster' instance Xgb model to be loaded before training (allows training continuation). callbacks : list of callback functions List of callback functions that are applied at end of each iteration. It is possible to use predefined callbacks by using :ref:`Callback API <callback_api>`. Example: .. code-block:: python [xgb.callback.reset_learning_rate(custom_rates)] Returns ------- Booster : a trained booster model
[ "Train", "a", "booster", "with", "given", "parameters", "." ]
python
train
numberoverzero/bloop
bloop/conditions.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L166-L181
def _value_ref(self, column, value, *, dumped=False, inner=False): """inner=True uses column.typedef.inner_type instead of column.typedef""" ref = ":v{}".format(self.next_index) # Need to dump this value if not dumped: typedef = column.typedef for segment in path_of(column): typedef = typedef[segment] if inner: typedef = typedef.inner_typedef value = self.engine._dump(typedef, value) self.attr_values[ref] = value self.counts[ref] += 1 return ref, value
[ "def", "_value_ref", "(", "self", ",", "column", ",", "value", ",", "*", ",", "dumped", "=", "False", ",", "inner", "=", "False", ")", ":", "ref", "=", "\":v{}\"", ".", "format", "(", "self", ".", "next_index", ")", "# Need to dump this value", "if", "not", "dumped", ":", "typedef", "=", "column", ".", "typedef", "for", "segment", "in", "path_of", "(", "column", ")", ":", "typedef", "=", "typedef", "[", "segment", "]", "if", "inner", ":", "typedef", "=", "typedef", ".", "inner_typedef", "value", "=", "self", ".", "engine", ".", "_dump", "(", "typedef", ",", "value", ")", "self", ".", "attr_values", "[", "ref", "]", "=", "value", "self", ".", "counts", "[", "ref", "]", "+=", "1", "return", "ref", ",", "value" ]
inner=True uses column.typedef.inner_type instead of column.typedef
[ "inner", "=", "True", "uses", "column", ".", "typedef", ".", "inner_type", "instead", "of", "column", ".", "typedef" ]
python
train
GPflow/GPflow
gpflow/transforms.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/transforms.py#L378-L391
def backward_tensor(self, y): """ Transforms a series of triangular matrices y to the packed representation x (tf.tensors) :param y: unpacked tensor with shape self.num_matrices, self.N, self.N :return: packed tensor with shape self.num_matrices, (self.N**2 + self.N) / 2 """ if self.squeeze: y = tf.expand_dims(y, axis=0) indices = np.vstack(np.tril_indices(self.N)).T indices = itertools.product(np.arange(self.num_matrices), indices) indices = np.array([np.hstack(x) for x in indices]) triangular = tf.gather_nd(y, indices) return tf.reshape(triangular, [self.num_matrices, (self.N**2 + self.N) // 2])
[ "def", "backward_tensor", "(", "self", ",", "y", ")", ":", "if", "self", ".", "squeeze", ":", "y", "=", "tf", ".", "expand_dims", "(", "y", ",", "axis", "=", "0", ")", "indices", "=", "np", ".", "vstack", "(", "np", ".", "tril_indices", "(", "self", ".", "N", ")", ")", ".", "T", "indices", "=", "itertools", ".", "product", "(", "np", ".", "arange", "(", "self", ".", "num_matrices", ")", ",", "indices", ")", "indices", "=", "np", ".", "array", "(", "[", "np", ".", "hstack", "(", "x", ")", "for", "x", "in", "indices", "]", ")", "triangular", "=", "tf", ".", "gather_nd", "(", "y", ",", "indices", ")", "return", "tf", ".", "reshape", "(", "triangular", ",", "[", "self", ".", "num_matrices", ",", "(", "self", ".", "N", "**", "2", "+", "self", ".", "N", ")", "//", "2", "]", ")" ]
Transforms a series of triangular matrices y to the packed representation x (tf.tensors) :param y: unpacked tensor with shape self.num_matrices, self.N, self.N :return: packed tensor with shape self.num_matrices, (self.N**2 + self.N) / 2
[ "Transforms", "a", "series", "of", "triangular", "matrices", "y", "to", "the", "packed", "representation", "x", "(", "tf", ".", "tensors", ")", ":", "param", "y", ":", "unpacked", "tensor", "with", "shape", "self", ".", "num_matrices", "self", ".", "N", "self", ".", "N", ":", "return", ":", "packed", "tensor", "with", "shape", "self", ".", "num_matrices", "(", "self", ".", "N", "**", "2", "+", "self", ".", "N", ")", "/", "2" ]
python
train
APSL/transmanager
transmanager/utils.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/utils.py#L61-L73
def has_field(mc, field_name): """ detect if a model has a given field has :param field_name: :param mc: :return: """ try: mc._meta.get_field(field_name) except FieldDoesNotExist: return False return True
[ "def", "has_field", "(", "mc", ",", "field_name", ")", ":", "try", ":", "mc", ".", "_meta", ".", "get_field", "(", "field_name", ")", "except", "FieldDoesNotExist", ":", "return", "False", "return", "True" ]
detect if a model has a given field has :param field_name: :param mc: :return:
[ "detect", "if", "a", "model", "has", "a", "given", "field", "has" ]
python
train
rodluger/everest
everest/missions/k2/utils.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/utils.py#L627-L641
def SaturationFlux(EPIC, campaign=None, **kwargs): ''' Returns the well depth for the target. If any of the target's pixels have flux larger than this value, they are likely to be saturated and cause charge bleeding. The well depths were obtained from Table 13 of the Kepler instrument handbook. We assume an exposure time of 6.02s. ''' channel, well_depth = np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'well_depth.tsv'), unpack=True) satflx = well_depth[channel == Channel(EPIC, campaign=campaign)][0] / 6.02 return satflx
[ "def", "SaturationFlux", "(", "EPIC", ",", "campaign", "=", "None", ",", "*", "*", "kwargs", ")", ":", "channel", ",", "well_depth", "=", "np", ".", "loadtxt", "(", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'well_depth.tsv'", ")", ",", "unpack", "=", "True", ")", "satflx", "=", "well_depth", "[", "channel", "==", "Channel", "(", "EPIC", ",", "campaign", "=", "campaign", ")", "]", "[", "0", "]", "/", "6.02", "return", "satflx" ]
Returns the well depth for the target. If any of the target's pixels have flux larger than this value, they are likely to be saturated and cause charge bleeding. The well depths were obtained from Table 13 of the Kepler instrument handbook. We assume an exposure time of 6.02s.
[ "Returns", "the", "well", "depth", "for", "the", "target", ".", "If", "any", "of", "the", "target", "s", "pixels", "have", "flux", "larger", "than", "this", "value", "they", "are", "likely", "to", "be", "saturated", "and", "cause", "charge", "bleeding", ".", "The", "well", "depths", "were", "obtained", "from", "Table", "13", "of", "the", "Kepler", "instrument", "handbook", ".", "We", "assume", "an", "exposure", "time", "of", "6", ".", "02s", "." ]
python
train
secdev/scapy
scapy/layers/tls/cert.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/cert.py#L910-L930
def verifyChain(self, anchors, untrusted=None): """ Perform verification of certificate chains for that certificate. A list of anchors is required. The certificates in the optional untrusted list may be used as additional elements to the final chain. On par with chain instantiation, only one chain constructed with the untrusted candidates will be retained. Eventually, dates are checked. """ untrusted = untrusted or [] for a in anchors: chain = Chain(self + untrusted, a) if len(chain) == 1: # anchor only continue # check that the chain does not exclusively rely on untrusted if any(c in chain[1:] for c in self): for c in chain: if c.remainingDays() < 0: break if c is chain[-1]: # we got to the end of the chain return chain return None
[ "def", "verifyChain", "(", "self", ",", "anchors", ",", "untrusted", "=", "None", ")", ":", "untrusted", "=", "untrusted", "or", "[", "]", "for", "a", "in", "anchors", ":", "chain", "=", "Chain", "(", "self", "+", "untrusted", ",", "a", ")", "if", "len", "(", "chain", ")", "==", "1", ":", "# anchor only", "continue", "# check that the chain does not exclusively rely on untrusted", "if", "any", "(", "c", "in", "chain", "[", "1", ":", "]", "for", "c", "in", "self", ")", ":", "for", "c", "in", "chain", ":", "if", "c", ".", "remainingDays", "(", ")", "<", "0", ":", "break", "if", "c", "is", "chain", "[", "-", "1", "]", ":", "# we got to the end of the chain", "return", "chain", "return", "None" ]
Perform verification of certificate chains for that certificate. A list of anchors is required. The certificates in the optional untrusted list may be used as additional elements to the final chain. On par with chain instantiation, only one chain constructed with the untrusted candidates will be retained. Eventually, dates are checked.
[ "Perform", "verification", "of", "certificate", "chains", "for", "that", "certificate", ".", "A", "list", "of", "anchors", "is", "required", ".", "The", "certificates", "in", "the", "optional", "untrusted", "list", "may", "be", "used", "as", "additional", "elements", "to", "the", "final", "chain", ".", "On", "par", "with", "chain", "instantiation", "only", "one", "chain", "constructed", "with", "the", "untrusted", "candidates", "will", "be", "retained", ".", "Eventually", "dates", "are", "checked", "." ]
python
train
aio-libs/aioftp
aioftp/client.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/client.py#L895-L911
def append_stream(self, destination, *, offset=0): """ Create stream for append (write) data to `destination` file. :param destination: destination path of file on server side :type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath` :param offset: byte offset for stream start position :type offset: :py:class:`int` :rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO` """ return self.get_stream( "APPE " + str(destination), "1xx", offset=offset, )
[ "def", "append_stream", "(", "self", ",", "destination", ",", "*", ",", "offset", "=", "0", ")", ":", "return", "self", ".", "get_stream", "(", "\"APPE \"", "+", "str", "(", "destination", ")", ",", "\"1xx\"", ",", "offset", "=", "offset", ",", ")" ]
Create stream for append (write) data to `destination` file. :param destination: destination path of file on server side :type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath` :param offset: byte offset for stream start position :type offset: :py:class:`int` :rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`
[ "Create", "stream", "for", "append", "(", "write", ")", "data", "to", "destination", "file", "." ]
python
valid
diffeo/rejester
rejester/_task_master.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_task_master.py#L443-L469
def update(self, lease_time=None): '''Refresh this task's expiration time. This tries to set the task's expiration time to the current time, plus `lease_time` seconds. It requires the job to not already be complete. If `lease_time` is negative, makes the job immediately be available for other workers to run. :param int lease_time: time to extend job lease beyond now :raises rejester.exceptions.LostLease: if the lease has already expired ''' if lease_time is None: lease_time = self.default_lifetime with self.registry.lock(identifier=self.worker_id) as session: self._refresh(session) try: self.expires = time.time() + lease_time session.update( WORK_UNITS_ + self.work_spec_name, {self.key: self.data}, priorities={self.key: self.expires}, locks={self.key: self.worker_id}) except EnvironmentError, exc: raise LostLease(exc)
[ "def", "update", "(", "self", ",", "lease_time", "=", "None", ")", ":", "if", "lease_time", "is", "None", ":", "lease_time", "=", "self", ".", "default_lifetime", "with", "self", ".", "registry", ".", "lock", "(", "identifier", "=", "self", ".", "worker_id", ")", "as", "session", ":", "self", ".", "_refresh", "(", "session", ")", "try", ":", "self", ".", "expires", "=", "time", ".", "time", "(", ")", "+", "lease_time", "session", ".", "update", "(", "WORK_UNITS_", "+", "self", ".", "work_spec_name", ",", "{", "self", ".", "key", ":", "self", ".", "data", "}", ",", "priorities", "=", "{", "self", ".", "key", ":", "self", ".", "expires", "}", ",", "locks", "=", "{", "self", ".", "key", ":", "self", ".", "worker_id", "}", ")", "except", "EnvironmentError", ",", "exc", ":", "raise", "LostLease", "(", "exc", ")" ]
Refresh this task's expiration time. This tries to set the task's expiration time to the current time, plus `lease_time` seconds. It requires the job to not already be complete. If `lease_time` is negative, makes the job immediately be available for other workers to run. :param int lease_time: time to extend job lease beyond now :raises rejester.exceptions.LostLease: if the lease has already expired
[ "Refresh", "this", "task", "s", "expiration", "time", "." ]
python
train
JasonKessler/scattertext
scattertext/__init__.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/__init__.py#L681-L734
def word_similarity_explorer(corpus, category, category_name, not_category_name, target_term, nlp=None, alpha=0.01, max_p_val=0.1, **kwargs): ''' Parameters ---------- corpus : Corpus Corpus to use. category : str Name of category column as it appears in original data frame. category_name : str Name of category to use. E.g., "5-star reviews." not_category_name : str Name of everything that isn't in category. E.g., "Below 5-star reviews". target_term : str Word or phrase for semantic similarity comparison nlp : spaCy-like parsing function E.g., spacy.load('en'), whitespace_nlp, etc... alpha : float, default = 0.01 Uniform dirichlet prior for p-value calculation max_p_val : float, default = 0.1 Max p-val to use find set of terms for similarity calculation Remaining arguments are from `produce_scattertext_explorer`. Returns ------- str, html of visualization ''' if nlp is None: import spacy nlp = spacy.load('en') base_term = nlp(target_term) scores = np.array([base_term.similarity(nlp(tok)) for tok in corpus._term_idx_store._i2val]) return produce_scattertext_explorer(corpus, category, category_name, not_category_name, scores=scores, sort_by_dist=False, reverse_sort_scores_for_not_category=False, word_vec_use_p_vals=True, term_significance=LogOddsRatioUninformativeDirichletPrior(alpha), max_p_val=max_p_val, p_value_colors=True, **kwargs)
[ "def", "word_similarity_explorer", "(", "corpus", ",", "category", ",", "category_name", ",", "not_category_name", ",", "target_term", ",", "nlp", "=", "None", ",", "alpha", "=", "0.01", ",", "max_p_val", "=", "0.1", ",", "*", "*", "kwargs", ")", ":", "if", "nlp", "is", "None", ":", "import", "spacy", "nlp", "=", "spacy", ".", "load", "(", "'en'", ")", "base_term", "=", "nlp", "(", "target_term", ")", "scores", "=", "np", ".", "array", "(", "[", "base_term", ".", "similarity", "(", "nlp", "(", "tok", ")", ")", "for", "tok", "in", "corpus", ".", "_term_idx_store", ".", "_i2val", "]", ")", "return", "produce_scattertext_explorer", "(", "corpus", ",", "category", ",", "category_name", ",", "not_category_name", ",", "scores", "=", "scores", ",", "sort_by_dist", "=", "False", ",", "reverse_sort_scores_for_not_category", "=", "False", ",", "word_vec_use_p_vals", "=", "True", ",", "term_significance", "=", "LogOddsRatioUninformativeDirichletPrior", "(", "alpha", ")", ",", "max_p_val", "=", "max_p_val", ",", "p_value_colors", "=", "True", ",", "*", "*", "kwargs", ")" ]
Parameters ---------- corpus : Corpus Corpus to use. category : str Name of category column as it appears in original data frame. category_name : str Name of category to use. E.g., "5-star reviews." not_category_name : str Name of everything that isn't in category. E.g., "Below 5-star reviews". target_term : str Word or phrase for semantic similarity comparison nlp : spaCy-like parsing function E.g., spacy.load('en'), whitespace_nlp, etc... alpha : float, default = 0.01 Uniform dirichlet prior for p-value calculation max_p_val : float, default = 0.1 Max p-val to use find set of terms for similarity calculation Remaining arguments are from `produce_scattertext_explorer`. Returns ------- str, html of visualization
[ "Parameters", "----------", "corpus", ":", "Corpus", "Corpus", "to", "use", ".", "category", ":", "str", "Name", "of", "category", "column", "as", "it", "appears", "in", "original", "data", "frame", ".", "category_name", ":", "str", "Name", "of", "category", "to", "use", ".", "E", ".", "g", ".", "5", "-", "star", "reviews", ".", "not_category_name", ":", "str", "Name", "of", "everything", "that", "isn", "t", "in", "category", ".", "E", ".", "g", ".", "Below", "5", "-", "star", "reviews", ".", "target_term", ":", "str", "Word", "or", "phrase", "for", "semantic", "similarity", "comparison", "nlp", ":", "spaCy", "-", "like", "parsing", "function", "E", ".", "g", ".", "spacy", ".", "load", "(", "en", ")", "whitespace_nlp", "etc", "...", "alpha", ":", "float", "default", "=", "0", ".", "01", "Uniform", "dirichlet", "prior", "for", "p", "-", "value", "calculation", "max_p_val", ":", "float", "default", "=", "0", ".", "1", "Max", "p", "-", "val", "to", "use", "find", "set", "of", "terms", "for", "similarity", "calculation", "Remaining", "arguments", "are", "from", "produce_scattertext_explorer", ".", "Returns", "-------", "str", "html", "of", "visualization" ]
python
train
bitesofcode/projexui
projexui/widgets/xpopupwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xpopupwidget.py#L1212-L1281
def showToolTip( text, point = None, anchor = None, parent = None, background = None, foreground = None, key = None, seconds = 5 ): """ Displays a popup widget as a tooltip bubble. :param text | <str> point | <QPoint> || None anchor | <XPopupWidget.Mode.Anchor> || None parent | <QWidget> || None background | <QColor> || None foreground | <QColor> || None key | <str> || None seconds | <int> """ if point is None: point = QCursor.pos() if parent is None: parent = QApplication.activeWindow() if anchor is None and parent is None: anchor = XPopupWidget.Anchor.TopCenter # create a new tooltip widget widget = XPopupWidget(parent) widget.setToolTipMode() widget.setResizable(False) # create the tooltip label label = QLabel(text, widget) label.setOpenExternalLinks(True) label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter) label.setMargin(3) label.setIndent(3) label.adjustSize() widget.setCentralWidget(label) # update the tip label.adjustSize() widget.adjustSize() palette = widget.palette() if not background: background = palette.color(palette.ToolTipBase) if not foreground: foreground = palette.color(palette.ToolTipText) palette.setColor(palette.Window, QColor(background)) palette.setColor(palette.WindowText, QColor(foreground)) widget.setPalette(palette) widget.centralWidget().setPalette(palette) if anchor is None: widget.setAutoCalculateAnchor(True) else: widget.setAnchor(anchor) widget.setAutoCloseOnFocusOut(True) widget.setAttribute(Qt.WA_DeleteOnClose) widget.popup(point) widget.startTimer(1000 * seconds) return widget
[ "def", "showToolTip", "(", "text", ",", "point", "=", "None", ",", "anchor", "=", "None", ",", "parent", "=", "None", ",", "background", "=", "None", ",", "foreground", "=", "None", ",", "key", "=", "None", ",", "seconds", "=", "5", ")", ":", "if", "point", "is", "None", ":", "point", "=", "QCursor", ".", "pos", "(", ")", "if", "parent", "is", "None", ":", "parent", "=", "QApplication", ".", "activeWindow", "(", ")", "if", "anchor", "is", "None", "and", "parent", "is", "None", ":", "anchor", "=", "XPopupWidget", ".", "Anchor", ".", "TopCenter", "# create a new tooltip widget\r", "widget", "=", "XPopupWidget", "(", "parent", ")", "widget", ".", "setToolTipMode", "(", ")", "widget", ".", "setResizable", "(", "False", ")", "# create the tooltip label\r", "label", "=", "QLabel", "(", "text", ",", "widget", ")", "label", ".", "setOpenExternalLinks", "(", "True", ")", "label", ".", "setAlignment", "(", "Qt", ".", "AlignLeft", "|", "Qt", ".", "AlignVCenter", ")", "label", ".", "setMargin", "(", "3", ")", "label", ".", "setIndent", "(", "3", ")", "label", ".", "adjustSize", "(", ")", "widget", ".", "setCentralWidget", "(", "label", ")", "# update the tip\r", "label", ".", "adjustSize", "(", ")", "widget", ".", "adjustSize", "(", ")", "palette", "=", "widget", ".", "palette", "(", ")", "if", "not", "background", ":", "background", "=", "palette", ".", "color", "(", "palette", ".", "ToolTipBase", ")", "if", "not", "foreground", ":", "foreground", "=", "palette", ".", "color", "(", "palette", ".", "ToolTipText", ")", "palette", ".", "setColor", "(", "palette", ".", "Window", ",", "QColor", "(", "background", ")", ")", "palette", ".", "setColor", "(", "palette", ".", "WindowText", ",", "QColor", "(", "foreground", ")", ")", "widget", ".", "setPalette", "(", "palette", ")", "widget", ".", "centralWidget", "(", ")", ".", "setPalette", "(", "palette", ")", "if", "anchor", "is", "None", ":", "widget", ".", "setAutoCalculateAnchor", "(", "True", ")", "else", ":", "widget", ".", "setAnchor", "(", "anchor", ")", "widget", ".", "setAutoCloseOnFocusOut", "(", "True", ")", "widget", ".", "setAttribute", "(", "Qt", ".", "WA_DeleteOnClose", ")", "widget", ".", "popup", "(", "point", ")", "widget", ".", "startTimer", "(", "1000", "*", "seconds", ")", "return", "widget" ]
Displays a popup widget as a tooltip bubble. :param text | <str> point | <QPoint> || None anchor | <XPopupWidget.Mode.Anchor> || None parent | <QWidget> || None background | <QColor> || None foreground | <QColor> || None key | <str> || None seconds | <int>
[ "Displays", "a", "popup", "widget", "as", "a", "tooltip", "bubble", ".", ":", "param", "text", "|", "<str", ">", "point", "|", "<QPoint", ">", "||", "None", "anchor", "|", "<XPopupWidget", ".", "Mode", ".", "Anchor", ">", "||", "None", "parent", "|", "<QWidget", ">", "||", "None", "background", "|", "<QColor", ">", "||", "None", "foreground", "|", "<QColor", ">", "||", "None", "key", "|", "<str", ">", "||", "None", "seconds", "|", "<int", ">" ]
python
train
lpantano/seqcluster
seqcluster/libs/read.py
https://github.com/lpantano/seqcluster/blob/774e23add8cd4fdc83d626cea3bd1f458e7d060d/seqcluster/libs/read.py#L33-L36
def write_data(data, out_file): """write json file from seqcluster cluster""" with open(out_file, 'w') as handle_out: handle_out.write(json.dumps([data], skipkeys=True, indent=2))
[ "def", "write_data", "(", "data", ",", "out_file", ")", ":", "with", "open", "(", "out_file", ",", "'w'", ")", "as", "handle_out", ":", "handle_out", ".", "write", "(", "json", ".", "dumps", "(", "[", "data", "]", ",", "skipkeys", "=", "True", ",", "indent", "=", "2", ")", ")" ]
write json file from seqcluster cluster
[ "write", "json", "file", "from", "seqcluster", "cluster" ]
python
train
Azure/azure-cli-extensions
src/interactive/azext_interactive/azclishell/progress.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/interactive/azext_interactive/azclishell/progress.py#L67-L118
def progress_view(shell): """ updates the view """ while not ShellProgressView.done: _, col = get_window_dim() col = int(col) progress = get_progress_message() if '\n' in progress: prog_list = progress.split('\n') prog_val = len(prog_list[-1]) else: prog_val = len(progress) buffer_size = col - prog_val - 4 if ShellProgressView.progress_bar: doc = u'{}:{}'.format(progress, ShellProgressView.progress_bar) shell.spin_val = -1 counter = 0 ShellProgressView.heart_bar = '' else: if progress and not ShellProgressView.done: heart_bar = ShellProgressView.heart_bar if shell.spin_val >= 0: beat = ShellProgressView.heart_beat_values[_get_heart_frequency()] heart_bar += beat heart_bar = heart_bar[len(beat):] len_beat = len(heart_bar) if len_beat > buffer_size: heart_bar = heart_bar[len_beat - buffer_size:] while len(heart_bar) < buffer_size: beat = ShellProgressView.heart_beat_values[_get_heart_frequency()] heart_bar += beat else: shell.spin_val = 0 counter = 0 while counter < buffer_size: beat = ShellProgressView.heart_beat_values[_get_heart_frequency()] heart_bar += beat counter += len(beat) ShellProgressView.heart_bar = heart_bar doc = u'{}:{}'.format(progress, ShellProgressView.heart_bar) shell.cli.buffers['progress'].reset( initial_document=Document(doc)) shell.cli.request_redraw() sleep(shell.intermediate_sleep) ShellProgressView.done = False ShellProgressView.progress_bar = '' shell.spin_val = -1 sleep(shell.final_sleep) return True
[ "def", "progress_view", "(", "shell", ")", ":", "while", "not", "ShellProgressView", ".", "done", ":", "_", ",", "col", "=", "get_window_dim", "(", ")", "col", "=", "int", "(", "col", ")", "progress", "=", "get_progress_message", "(", ")", "if", "'\\n'", "in", "progress", ":", "prog_list", "=", "progress", ".", "split", "(", "'\\n'", ")", "prog_val", "=", "len", "(", "prog_list", "[", "-", "1", "]", ")", "else", ":", "prog_val", "=", "len", "(", "progress", ")", "buffer_size", "=", "col", "-", "prog_val", "-", "4", "if", "ShellProgressView", ".", "progress_bar", ":", "doc", "=", "u'{}:{}'", ".", "format", "(", "progress", ",", "ShellProgressView", ".", "progress_bar", ")", "shell", ".", "spin_val", "=", "-", "1", "counter", "=", "0", "ShellProgressView", ".", "heart_bar", "=", "''", "else", ":", "if", "progress", "and", "not", "ShellProgressView", ".", "done", ":", "heart_bar", "=", "ShellProgressView", ".", "heart_bar", "if", "shell", ".", "spin_val", ">=", "0", ":", "beat", "=", "ShellProgressView", ".", "heart_beat_values", "[", "_get_heart_frequency", "(", ")", "]", "heart_bar", "+=", "beat", "heart_bar", "=", "heart_bar", "[", "len", "(", "beat", ")", ":", "]", "len_beat", "=", "len", "(", "heart_bar", ")", "if", "len_beat", ">", "buffer_size", ":", "heart_bar", "=", "heart_bar", "[", "len_beat", "-", "buffer_size", ":", "]", "while", "len", "(", "heart_bar", ")", "<", "buffer_size", ":", "beat", "=", "ShellProgressView", ".", "heart_beat_values", "[", "_get_heart_frequency", "(", ")", "]", "heart_bar", "+=", "beat", "else", ":", "shell", ".", "spin_val", "=", "0", "counter", "=", "0", "while", "counter", "<", "buffer_size", ":", "beat", "=", "ShellProgressView", ".", "heart_beat_values", "[", "_get_heart_frequency", "(", ")", "]", "heart_bar", "+=", "beat", "counter", "+=", "len", "(", "beat", ")", "ShellProgressView", ".", "heart_bar", "=", "heart_bar", "doc", "=", "u'{}:{}'", ".", "format", "(", "progress", ",", "ShellProgressView", ".", "heart_bar", ")", "shell", ".", "cli", ".", "buffers", "[", "'progress'", "]", ".", "reset", "(", "initial_document", "=", "Document", "(", "doc", ")", ")", "shell", ".", "cli", ".", "request_redraw", "(", ")", "sleep", "(", "shell", ".", "intermediate_sleep", ")", "ShellProgressView", ".", "done", "=", "False", "ShellProgressView", ".", "progress_bar", "=", "''", "shell", ".", "spin_val", "=", "-", "1", "sleep", "(", "shell", ".", "final_sleep", ")", "return", "True" ]
updates the view
[ "updates", "the", "view" ]
python
train
epfl-idevelop/epfl-ldap
epflldap/utils.py
https://github.com/epfl-idevelop/epfl-ldap/blob/bebb94da3609d358bd83f31672eeaddcda872c5d/epflldap/utils.py#L11-L22
def get_optional_env(key): """ Return the value of an optional environment variable, and use the provided default if it's not set. """ environment_variable_value = os.environ.get(key) if environment_variable_value: return environment_variable_value elif key in CONSTANTS: return CONSTANTS[key] else: raise Exception("The variable {1} is not set".format(key))
[ "def", "get_optional_env", "(", "key", ")", ":", "environment_variable_value", "=", "os", ".", "environ", ".", "get", "(", "key", ")", "if", "environment_variable_value", ":", "return", "environment_variable_value", "elif", "key", "in", "CONSTANTS", ":", "return", "CONSTANTS", "[", "key", "]", "else", ":", "raise", "Exception", "(", "\"The variable {1} is not set\"", ".", "format", "(", "key", ")", ")" ]
Return the value of an optional environment variable, and use the provided default if it's not set.
[ "Return", "the", "value", "of", "an", "optional", "environment", "variable", "and", "use", "the", "provided", "default", "if", "it", "s", "not", "set", "." ]
python
train
racitup/static-ranges
static_ranges.py
https://github.com/racitup/static-ranges/blob/a15c2e2bd6f643279ae046494b8714634dd380a4/static_ranges.py#L110-L122
def check_ranges(cls, ranges, length): """Removes errored ranges""" result = [] for start, end in ranges: if isinstance(start, int) or isinstance(end, int): if isinstance(start, int) and not (0 <= start < length): continue elif isinstance(start, int) and isinstance(end, int) and not (start <= end): continue elif start is None and end == 0: continue result.append( (start,end) ) return result
[ "def", "check_ranges", "(", "cls", ",", "ranges", ",", "length", ")", ":", "result", "=", "[", "]", "for", "start", ",", "end", "in", "ranges", ":", "if", "isinstance", "(", "start", ",", "int", ")", "or", "isinstance", "(", "end", ",", "int", ")", ":", "if", "isinstance", "(", "start", ",", "int", ")", "and", "not", "(", "0", "<=", "start", "<", "length", ")", ":", "continue", "elif", "isinstance", "(", "start", ",", "int", ")", "and", "isinstance", "(", "end", ",", "int", ")", "and", "not", "(", "start", "<=", "end", ")", ":", "continue", "elif", "start", "is", "None", "and", "end", "==", "0", ":", "continue", "result", ".", "append", "(", "(", "start", ",", "end", ")", ")", "return", "result" ]
Removes errored ranges
[ "Removes", "errored", "ranges" ]
python
valid
djgagne/hagelslag
hagelslag/evaluation/ProbabilityMetrics.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/ProbabilityMetrics.py#L495-L502
def crps_climo(self): """ Calculate the climatological CRPS. """ o_bar = self.errors["O"].values / float(self.num_forecasts) crps_c = np.sum(self.num_forecasts * (o_bar ** 2) - o_bar * self.errors["O"].values * 2.0 + self.errors["O_2"].values) / float(self.thresholds.size * self.num_forecasts) return crps_c
[ "def", "crps_climo", "(", "self", ")", ":", "o_bar", "=", "self", ".", "errors", "[", "\"O\"", "]", ".", "values", "/", "float", "(", "self", ".", "num_forecasts", ")", "crps_c", "=", "np", ".", "sum", "(", "self", ".", "num_forecasts", "*", "(", "o_bar", "**", "2", ")", "-", "o_bar", "*", "self", ".", "errors", "[", "\"O\"", "]", ".", "values", "*", "2.0", "+", "self", ".", "errors", "[", "\"O_2\"", "]", ".", "values", ")", "/", "float", "(", "self", ".", "thresholds", ".", "size", "*", "self", ".", "num_forecasts", ")", "return", "crps_c" ]
Calculate the climatological CRPS.
[ "Calculate", "the", "climatological", "CRPS", "." ]
python
train
Metatab/geoid
geoid/core.py
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/core.py#L408-L416
def augment(module_name, base_class): """Call the augment() method for all of the derived classes in the module """ for name, cls in inspect.getmembers(sys.modules[module_name], lambda x : inspect.isclass(x) and issubclass(x, base_class) ): if cls == base_class: continue cls.augment()
[ "def", "augment", "(", "module_name", ",", "base_class", ")", ":", "for", "name", ",", "cls", "in", "inspect", ".", "getmembers", "(", "sys", ".", "modules", "[", "module_name", "]", ",", "lambda", "x", ":", "inspect", ".", "isclass", "(", "x", ")", "and", "issubclass", "(", "x", ",", "base_class", ")", ")", ":", "if", "cls", "==", "base_class", ":", "continue", "cls", ".", "augment", "(", ")" ]
Call the augment() method for all of the derived classes in the module
[ "Call", "the", "augment", "()", "method", "for", "all", "of", "the", "derived", "classes", "in", "the", "module" ]
python
train
SheffieldML/GPyOpt
GPyOpt/acquisitions/LP.py
https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/acquisitions/LP.py#L40-L46
def update_batches(self, X_batch, L, Min): """ Updates the batches internally and pre-computes the """ self.X_batch = X_batch if X_batch is not None: self.r_x0, self.s_x0 = self._hammer_function_precompute(X_batch, L, Min, self.model)
[ "def", "update_batches", "(", "self", ",", "X_batch", ",", "L", ",", "Min", ")", ":", "self", ".", "X_batch", "=", "X_batch", "if", "X_batch", "is", "not", "None", ":", "self", ".", "r_x0", ",", "self", ".", "s_x0", "=", "self", ".", "_hammer_function_precompute", "(", "X_batch", ",", "L", ",", "Min", ",", "self", ".", "model", ")" ]
Updates the batches internally and pre-computes the
[ "Updates", "the", "batches", "internally", "and", "pre", "-", "computes", "the" ]
python
train
astropy/regions
regions/io/core.py
https://github.com/astropy/regions/blob/452d962c417e4ff20d1268f99535c6ff89c83437/regions/io/core.py#L594-L628
def to_region(self): """ Converts to region, ``regions.Region`` object """ coords = self.convert_coords() log.debug(coords) viz_keywords = ['color', 'dash', 'dashlist', 'width', 'font', 'symsize', 'symbol', 'symsize', 'fontsize', 'fontstyle', 'usetex', 'labelpos', 'labeloff', 'linewidth', 'linestyle', 'point', 'textangle', 'fontweight'] if isinstance(coords[0], SkyCoord): reg = self.shape_to_sky_region[self.region_type](*coords) elif isinstance(coords[0], PixCoord): reg = self.shape_to_pixel_region[self.region_type](*coords) else: self._raise_error("No central coordinate") reg.visual = RegionVisual() reg.meta = RegionMeta() # both 'text' and 'label' should be set to the same value, where we # default to the 'text' value since that is the one used by ds9 regions label = self.meta.get('text', self.meta.get('label', "")) if label != '': reg.meta['label'] = label for key in self.meta: if key in viz_keywords: reg.visual[key] = self.meta[key] else: reg.meta[key] = self.meta[key] reg.meta['include'] = self.include return reg
[ "def", "to_region", "(", "self", ")", ":", "coords", "=", "self", ".", "convert_coords", "(", ")", "log", ".", "debug", "(", "coords", ")", "viz_keywords", "=", "[", "'color'", ",", "'dash'", ",", "'dashlist'", ",", "'width'", ",", "'font'", ",", "'symsize'", ",", "'symbol'", ",", "'symsize'", ",", "'fontsize'", ",", "'fontstyle'", ",", "'usetex'", ",", "'labelpos'", ",", "'labeloff'", ",", "'linewidth'", ",", "'linestyle'", ",", "'point'", ",", "'textangle'", ",", "'fontweight'", "]", "if", "isinstance", "(", "coords", "[", "0", "]", ",", "SkyCoord", ")", ":", "reg", "=", "self", ".", "shape_to_sky_region", "[", "self", ".", "region_type", "]", "(", "*", "coords", ")", "elif", "isinstance", "(", "coords", "[", "0", "]", ",", "PixCoord", ")", ":", "reg", "=", "self", ".", "shape_to_pixel_region", "[", "self", ".", "region_type", "]", "(", "*", "coords", ")", "else", ":", "self", ".", "_raise_error", "(", "\"No central coordinate\"", ")", "reg", ".", "visual", "=", "RegionVisual", "(", ")", "reg", ".", "meta", "=", "RegionMeta", "(", ")", "# both 'text' and 'label' should be set to the same value, where we", "# default to the 'text' value since that is the one used by ds9 regions", "label", "=", "self", ".", "meta", ".", "get", "(", "'text'", ",", "self", ".", "meta", ".", "get", "(", "'label'", ",", "\"\"", ")", ")", "if", "label", "!=", "''", ":", "reg", ".", "meta", "[", "'label'", "]", "=", "label", "for", "key", "in", "self", ".", "meta", ":", "if", "key", "in", "viz_keywords", ":", "reg", ".", "visual", "[", "key", "]", "=", "self", ".", "meta", "[", "key", "]", "else", ":", "reg", ".", "meta", "[", "key", "]", "=", "self", ".", "meta", "[", "key", "]", "reg", ".", "meta", "[", "'include'", "]", "=", "self", ".", "include", "return", "reg" ]
Converts to region, ``regions.Region`` object
[ "Converts", "to", "region", "regions", ".", "Region", "object" ]
python
train
mitsei/dlkit
dlkit/handcar/learning/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/sessions.py#L440-L474
def get_objectives_by_query(self, objective_query=None): """Gets a list of Objectives matching the given objective query. arg: objectiveQuery (osid.learning.ObjectiveQuery): the objective query return: (osid.learning.ObjectiveList) - the returned ObjectiveList raise: NullArgument - objectiveQuery is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - objectiveQuery is not of this service compliance: mandatory - This method must be implemented. """ if objective_query is None: raise NullArgument() if 'ancestorObjectiveId' in objective_query._query_terms: url_path = construct_url('objectives', bank_id=self._objective_bank_id, obj_id=objective_query._query_terms['ancestorObjectiveId'].split('=')[1]) url_path += '/children' elif 'descendantObjectiveId' in objective_query._query_terms: url_path = construct_url('objectives', bank_id=self._objective_bank_id, obj_id=objective_query._query_terms['descendantObjectiveId'].split('=')[1]) url_path += '/parents' else: url_path = construct_url('objectives', obj_id=None) for term in objective_query._query_terms: if term not in ['ancestorObjectiveId', 'descendantObjectiveId']: url_path += '&{0}'.format(objective_query._query_terms[term]) url_path = url_path.replace('&', '?', 1) return objects.ObjectiveList(self._get_request(url_path))
[ "def", "get_objectives_by_query", "(", "self", ",", "objective_query", "=", "None", ")", ":", "if", "objective_query", "is", "None", ":", "raise", "NullArgument", "(", ")", "if", "'ancestorObjectiveId'", "in", "objective_query", ".", "_query_terms", ":", "url_path", "=", "construct_url", "(", "'objectives'", ",", "bank_id", "=", "self", ".", "_objective_bank_id", ",", "obj_id", "=", "objective_query", ".", "_query_terms", "[", "'ancestorObjectiveId'", "]", ".", "split", "(", "'='", ")", "[", "1", "]", ")", "url_path", "+=", "'/children'", "elif", "'descendantObjectiveId'", "in", "objective_query", ".", "_query_terms", ":", "url_path", "=", "construct_url", "(", "'objectives'", ",", "bank_id", "=", "self", ".", "_objective_bank_id", ",", "obj_id", "=", "objective_query", ".", "_query_terms", "[", "'descendantObjectiveId'", "]", ".", "split", "(", "'='", ")", "[", "1", "]", ")", "url_path", "+=", "'/parents'", "else", ":", "url_path", "=", "construct_url", "(", "'objectives'", ",", "obj_id", "=", "None", ")", "for", "term", "in", "objective_query", ".", "_query_terms", ":", "if", "term", "not", "in", "[", "'ancestorObjectiveId'", ",", "'descendantObjectiveId'", "]", ":", "url_path", "+=", "'&{0}'", ".", "format", "(", "objective_query", ".", "_query_terms", "[", "term", "]", ")", "url_path", "=", "url_path", ".", "replace", "(", "'&'", ",", "'?'", ",", "1", ")", "return", "objects", ".", "ObjectiveList", "(", "self", ".", "_get_request", "(", "url_path", ")", ")" ]
Gets a list of Objectives matching the given objective query. arg: objectiveQuery (osid.learning.ObjectiveQuery): the objective query return: (osid.learning.ObjectiveList) - the returned ObjectiveList raise: NullArgument - objectiveQuery is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - objectiveQuery is not of this service compliance: mandatory - This method must be implemented.
[ "Gets", "a", "list", "of", "Objectives", "matching", "the", "given", "objective", "query", "." ]
python
train
Toilal/rebulk
rebulk/match.py
https://github.com/Toilal/rebulk/blob/7511a4671f2fd9493e3df1e5177b7656789069e8/rebulk/match.py#L104-L124
def _add_match(self, match): """ Add a match :param match: :type match: Match """ if self.__name_dict is not None: if match.name: _BaseMatches._base_add(self._name_dict[match.name], (match)) if self.__tag_dict is not None: for tag in match.tags: _BaseMatches._base_add(self._tag_dict[tag], match) if self.__start_dict is not None: _BaseMatches._base_add(self._start_dict[match.start], match) if self.__end_dict is not None: _BaseMatches._base_add(self._end_dict[match.end], match) if self.__index_dict is not None: for index in range(*match.span): _BaseMatches._base_add(self._index_dict[index], match) if match.end > self._max_end: self._max_end = match.end
[ "def", "_add_match", "(", "self", ",", "match", ")", ":", "if", "self", ".", "__name_dict", "is", "not", "None", ":", "if", "match", ".", "name", ":", "_BaseMatches", ".", "_base_add", "(", "self", ".", "_name_dict", "[", "match", ".", "name", "]", ",", "(", "match", ")", ")", "if", "self", ".", "__tag_dict", "is", "not", "None", ":", "for", "tag", "in", "match", ".", "tags", ":", "_BaseMatches", ".", "_base_add", "(", "self", ".", "_tag_dict", "[", "tag", "]", ",", "match", ")", "if", "self", ".", "__start_dict", "is", "not", "None", ":", "_BaseMatches", ".", "_base_add", "(", "self", ".", "_start_dict", "[", "match", ".", "start", "]", ",", "match", ")", "if", "self", ".", "__end_dict", "is", "not", "None", ":", "_BaseMatches", ".", "_base_add", "(", "self", ".", "_end_dict", "[", "match", ".", "end", "]", ",", "match", ")", "if", "self", ".", "__index_dict", "is", "not", "None", ":", "for", "index", "in", "range", "(", "*", "match", ".", "span", ")", ":", "_BaseMatches", ".", "_base_add", "(", "self", ".", "_index_dict", "[", "index", "]", ",", "match", ")", "if", "match", ".", "end", ">", "self", ".", "_max_end", ":", "self", ".", "_max_end", "=", "match", ".", "end" ]
Add a match :param match: :type match: Match
[ "Add", "a", "match", ":", "param", "match", ":", ":", "type", "match", ":", "Match" ]
python
train
pygobject/pgi
pgi/overrides/Gtk.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/Gtk.py#L1502-L1512
def set_value(self, treeiter, column, value): """ {{ all }} `value` can also be a Python value and will be converted to a :obj:`GObject.Value` using the corresponding column type (See :obj:`Gtk.ListStore.set_column_types`\\()). """ value = self._convert_value(column, value) Gtk.ListStore.set_value(self, treeiter, column, value)
[ "def", "set_value", "(", "self", ",", "treeiter", ",", "column", ",", "value", ")", ":", "value", "=", "self", ".", "_convert_value", "(", "column", ",", "value", ")", "Gtk", ".", "ListStore", ".", "set_value", "(", "self", ",", "treeiter", ",", "column", ",", "value", ")" ]
{{ all }} `value` can also be a Python value and will be converted to a :obj:`GObject.Value` using the corresponding column type (See :obj:`Gtk.ListStore.set_column_types`\\()).
[ "{{", "all", "}}" ]
python
train
rigetti/pyquil
pyquil/api/_benchmark.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_benchmark.py#L45-L79
def apply_clifford_to_pauli(self, clifford, pauli_in): r""" Given a circuit that consists only of elements of the Clifford group, return its action on a PauliTerm. In particular, for Clifford C, and Pauli P, this returns the PauliTerm representing CPC^{\dagger}. :param Program clifford: A Program that consists only of Clifford operations. :param PauliTerm pauli_in: A PauliTerm to be acted on by clifford via conjugation. :return: A PauliTerm corresponding to clifford * pauli_in * clifford^{\dagger} """ # do nothing if `pauli_in` is the identity if is_identity(pauli_in): return pauli_in indices_and_terms = list(zip(*list(pauli_in.operations_as_set()))) payload = ConjugateByCliffordRequest( clifford=clifford.out(), pauli=rpcq.messages.PauliTerm( indices=list(indices_and_terms[0]), symbols=list(indices_and_terms[1]))) response: ConjugateByCliffordResponse = self.client.call( 'conjugate_pauli_by_clifford', payload) phase_factor, paulis = response.phase, response.pauli pauli_out = PauliTerm("I", 0, 1.j ** phase_factor) clifford_qubits = clifford.get_qubits() pauli_qubits = pauli_in.get_qubits() all_qubits = sorted(set(pauli_qubits).union(set(clifford_qubits))) # The returned pauli will have specified its value on all_qubits, sorted by index. # This is maximal set of qubits that can be affected by this conjugation. for i, pauli in enumerate(paulis): pauli_out *= PauliTerm(pauli, all_qubits[i]) return pauli_out * pauli_in.coefficient
[ "def", "apply_clifford_to_pauli", "(", "self", ",", "clifford", ",", "pauli_in", ")", ":", "# do nothing if `pauli_in` is the identity", "if", "is_identity", "(", "pauli_in", ")", ":", "return", "pauli_in", "indices_and_terms", "=", "list", "(", "zip", "(", "*", "list", "(", "pauli_in", ".", "operations_as_set", "(", ")", ")", ")", ")", "payload", "=", "ConjugateByCliffordRequest", "(", "clifford", "=", "clifford", ".", "out", "(", ")", ",", "pauli", "=", "rpcq", ".", "messages", ".", "PauliTerm", "(", "indices", "=", "list", "(", "indices_and_terms", "[", "0", "]", ")", ",", "symbols", "=", "list", "(", "indices_and_terms", "[", "1", "]", ")", ")", ")", "response", ":", "ConjugateByCliffordResponse", "=", "self", ".", "client", ".", "call", "(", "'conjugate_pauli_by_clifford'", ",", "payload", ")", "phase_factor", ",", "paulis", "=", "response", ".", "phase", ",", "response", ".", "pauli", "pauli_out", "=", "PauliTerm", "(", "\"I\"", ",", "0", ",", "1.j", "**", "phase_factor", ")", "clifford_qubits", "=", "clifford", ".", "get_qubits", "(", ")", "pauli_qubits", "=", "pauli_in", ".", "get_qubits", "(", ")", "all_qubits", "=", "sorted", "(", "set", "(", "pauli_qubits", ")", ".", "union", "(", "set", "(", "clifford_qubits", ")", ")", ")", "# The returned pauli will have specified its value on all_qubits, sorted by index.", "# This is maximal set of qubits that can be affected by this conjugation.", "for", "i", ",", "pauli", "in", "enumerate", "(", "paulis", ")", ":", "pauli_out", "*=", "PauliTerm", "(", "pauli", ",", "all_qubits", "[", "i", "]", ")", "return", "pauli_out", "*", "pauli_in", ".", "coefficient" ]
r""" Given a circuit that consists only of elements of the Clifford group, return its action on a PauliTerm. In particular, for Clifford C, and Pauli P, this returns the PauliTerm representing CPC^{\dagger}. :param Program clifford: A Program that consists only of Clifford operations. :param PauliTerm pauli_in: A PauliTerm to be acted on by clifford via conjugation. :return: A PauliTerm corresponding to clifford * pauli_in * clifford^{\dagger}
[ "r", "Given", "a", "circuit", "that", "consists", "only", "of", "elements", "of", "the", "Clifford", "group", "return", "its", "action", "on", "a", "PauliTerm", "." ]
python
train
synw/dataswim
dataswim/messages.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/messages.py#L26-L31
def status(self, *msg): """ Prints a status message """ label = colors.yellow("STATUS") self._msg(label, *msg)
[ "def", "status", "(", "self", ",", "*", "msg", ")", ":", "label", "=", "colors", ".", "yellow", "(", "\"STATUS\"", ")", "self", ".", "_msg", "(", "label", ",", "*", "msg", ")" ]
Prints a status message
[ "Prints", "a", "status", "message" ]
python
train
androguard/androguard
androguard/core/bytecodes/dvm.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/dvm.py#L2998-L3033
def get_short_string(self): """ Return a shorter formatted String which encodes this method. The returned name has the form: <classname> <methodname> ([arguments ...])<returntype> * All Class names are condensed to the actual name (no package). * Access flags are not returned. * <init> and <clinit> are NOT replaced by the classname! This name might not be unique! :return: str """ def _fmt_classname(cls): arr = "" # Test for arrays while cls.startswith("["): arr += "[" cls = cls[1:] # is a object type if cls.startswith("L"): cls = cls[1:-1] # only return last element if "/" in cls: cls = cls.rsplit("/", 1)[1] return arr + cls clsname = _fmt_classname(self.get_class_name()) param, ret = self.get_descriptor()[1:].split(")") params = map(_fmt_classname, param.split(" ")) desc = "({}){}".format(" ".join(params), _fmt_classname(ret)) return "{cls} {meth} {desc}".format(cls=clsname, meth=self.get_name(), desc=desc)
[ "def", "get_short_string", "(", "self", ")", ":", "def", "_fmt_classname", "(", "cls", ")", ":", "arr", "=", "\"\"", "# Test for arrays", "while", "cls", ".", "startswith", "(", "\"[\"", ")", ":", "arr", "+=", "\"[\"", "cls", "=", "cls", "[", "1", ":", "]", "# is a object type", "if", "cls", ".", "startswith", "(", "\"L\"", ")", ":", "cls", "=", "cls", "[", "1", ":", "-", "1", "]", "# only return last element", "if", "\"/\"", "in", "cls", ":", "cls", "=", "cls", ".", "rsplit", "(", "\"/\"", ",", "1", ")", "[", "1", "]", "return", "arr", "+", "cls", "clsname", "=", "_fmt_classname", "(", "self", ".", "get_class_name", "(", ")", ")", "param", ",", "ret", "=", "self", ".", "get_descriptor", "(", ")", "[", "1", ":", "]", ".", "split", "(", "\")\"", ")", "params", "=", "map", "(", "_fmt_classname", ",", "param", ".", "split", "(", "\" \"", ")", ")", "desc", "=", "\"({}){}\"", ".", "format", "(", "\" \"", ".", "join", "(", "params", ")", ",", "_fmt_classname", "(", "ret", ")", ")", "return", "\"{cls} {meth} {desc}\"", ".", "format", "(", "cls", "=", "clsname", ",", "meth", "=", "self", ".", "get_name", "(", ")", ",", "desc", "=", "desc", ")" ]
Return a shorter formatted String which encodes this method. The returned name has the form: <classname> <methodname> ([arguments ...])<returntype> * All Class names are condensed to the actual name (no package). * Access flags are not returned. * <init> and <clinit> are NOT replaced by the classname! This name might not be unique! :return: str
[ "Return", "a", "shorter", "formatted", "String", "which", "encodes", "this", "method", ".", "The", "returned", "name", "has", "the", "form", ":", "<classname", ">", "<methodname", ">", "(", "[", "arguments", "...", "]", ")", "<returntype", ">" ]
python
train
elsampsa/valkka-live
valkka/mvision/alpr/base.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/mvision/alpr/base.py#L75-L102
def init(self): """Init alpr The LicensePlateDetector object gets instantiated in the multiprocess, so the library is imported in the multiprocess (i.e. "other side of the fork") as well """ # some modules might need to be imported "on the other side of the fork" # .. but the, when importing this module, the import is not tested # # # from openalpr import Alpr from valkka.mvision.alpr.openalpr_fix import Alpr self.movement = MovementDetector() self.alpr = Alpr(self.country, self.conf_file, self.runtime_data) if not self.alpr.is_loaded(): self.alpr = None return self.alpr.set_top_n(self.top_n) self.reset() """ # test in ipython: from valkka.mvision.alpr.openalpr_fix import Alpr country="eu" conf_file="/usr/share/openalpr/config/openalpr.defaults.conf" runtime_data="/usr/share/openalpr/runtime_data" a = Alpr(country, conf_file, runtime_data) a.is_loaded() """
[ "def", "init", "(", "self", ")", ":", "# some modules might need to be imported \"on the other side of the fork\"", "# .. but the, when importing this module, the import is not tested", "#", "# ", "# from openalpr import Alpr", "from", "valkka", ".", "mvision", ".", "alpr", ".", "openalpr_fix", "import", "Alpr", "self", ".", "movement", "=", "MovementDetector", "(", ")", "self", ".", "alpr", "=", "Alpr", "(", "self", ".", "country", ",", "self", ".", "conf_file", ",", "self", ".", "runtime_data", ")", "if", "not", "self", ".", "alpr", ".", "is_loaded", "(", ")", ":", "self", ".", "alpr", "=", "None", "return", "self", ".", "alpr", ".", "set_top_n", "(", "self", ".", "top_n", ")", "self", ".", "reset", "(", ")", "\"\"\"\n # test in ipython:\n from valkka.mvision.alpr.openalpr_fix import Alpr\n country=\"eu\"\n conf_file=\"/usr/share/openalpr/config/openalpr.defaults.conf\"\n runtime_data=\"/usr/share/openalpr/runtime_data\"\n a = Alpr(country, conf_file, runtime_data)\n a.is_loaded() \n \"\"\"" ]
Init alpr The LicensePlateDetector object gets instantiated in the multiprocess, so the library is imported in the multiprocess (i.e. "other side of the fork") as well
[ "Init", "alpr", "The", "LicensePlateDetector", "object", "gets", "instantiated", "in", "the", "multiprocess", "so", "the", "library", "is", "imported", "in", "the", "multiprocess", "(", "i", ".", "e", ".", "other", "side", "of", "the", "fork", ")", "as", "well" ]
python
train
BD2KGenomics/protect
src/protect/pipeline/ProTECT.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/pipeline/ProTECT.py#L436-L449
def parse_config_file(job, config_file, max_cores=None): """ Parse the config file and spawn a ProTECT job for every input sample. :param str config_file: Path to the input config file :param int max_cores: The maximum cores to use for any single high-compute job. """ sample_set, univ_options, processed_tool_inputs = _parse_config_file(job, config_file, max_cores) # Start a job for each sample in the sample set for patient_id in sample_set.keys(): job.addFollowOnJobFn(launch_protect, sample_set[patient_id], univ_options, processed_tool_inputs) return None
[ "def", "parse_config_file", "(", "job", ",", "config_file", ",", "max_cores", "=", "None", ")", ":", "sample_set", ",", "univ_options", ",", "processed_tool_inputs", "=", "_parse_config_file", "(", "job", ",", "config_file", ",", "max_cores", ")", "# Start a job for each sample in the sample set", "for", "patient_id", "in", "sample_set", ".", "keys", "(", ")", ":", "job", ".", "addFollowOnJobFn", "(", "launch_protect", ",", "sample_set", "[", "patient_id", "]", ",", "univ_options", ",", "processed_tool_inputs", ")", "return", "None" ]
Parse the config file and spawn a ProTECT job for every input sample. :param str config_file: Path to the input config file :param int max_cores: The maximum cores to use for any single high-compute job.
[ "Parse", "the", "config", "file", "and", "spawn", "a", "ProTECT", "job", "for", "every", "input", "sample", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/amulet/utils.py#L638-L650
def validate_list_of_identical_dicts(self, list_of_dicts): """Check that all dicts within a list are identical.""" hashes = [] for _dict in list_of_dicts: hashes.append(hash(frozenset(_dict.items()))) self.log.debug('Hashes: {}'.format(hashes)) if len(set(hashes)) == 1: self.log.debug('Dicts within list are identical') else: return 'Dicts within list are not identical' return None
[ "def", "validate_list_of_identical_dicts", "(", "self", ",", "list_of_dicts", ")", ":", "hashes", "=", "[", "]", "for", "_dict", "in", "list_of_dicts", ":", "hashes", ".", "append", "(", "hash", "(", "frozenset", "(", "_dict", ".", "items", "(", ")", ")", ")", ")", "self", ".", "log", ".", "debug", "(", "'Hashes: {}'", ".", "format", "(", "hashes", ")", ")", "if", "len", "(", "set", "(", "hashes", ")", ")", "==", "1", ":", "self", ".", "log", ".", "debug", "(", "'Dicts within list are identical'", ")", "else", ":", "return", "'Dicts within list are not identical'", "return", "None" ]
Check that all dicts within a list are identical.
[ "Check", "that", "all", "dicts", "within", "a", "list", "are", "identical", "." ]
python
train
RI-imaging/qpformat
qpformat/file_formats/series_hdf5_qpimage.py
https://github.com/RI-imaging/qpformat/blob/364e29d7d9e8b9f1d7a4a25c753d1baf9d73d5eb/qpformat/file_formats/series_hdf5_qpimage.py#L49-L60
def get_qpimage_raw(self, idx): """Return QPImage without background correction""" with self._qpseries() as qps: qpi = qps.get_qpimage(index=idx).copy() # Remove previously performed background correction qpi.set_bg_data(None) # Force meta data for key in self.meta_data: qpi[key] = self.meta_data[key] # set identifier qpi["identifier"] = self.get_identifier(idx) return qpi
[ "def", "get_qpimage_raw", "(", "self", ",", "idx", ")", ":", "with", "self", ".", "_qpseries", "(", ")", "as", "qps", ":", "qpi", "=", "qps", ".", "get_qpimage", "(", "index", "=", "idx", ")", ".", "copy", "(", ")", "# Remove previously performed background correction", "qpi", ".", "set_bg_data", "(", "None", ")", "# Force meta data", "for", "key", "in", "self", ".", "meta_data", ":", "qpi", "[", "key", "]", "=", "self", ".", "meta_data", "[", "key", "]", "# set identifier", "qpi", "[", "\"identifier\"", "]", "=", "self", ".", "get_identifier", "(", "idx", ")", "return", "qpi" ]
Return QPImage without background correction
[ "Return", "QPImage", "without", "background", "correction" ]
python
train
limix/glimix-core
glimix_core/_util/random.py
https://github.com/limix/glimix-core/blob/cddd0994591d100499cc41c1f480ddd575e7a980/glimix_core/_util/random.py#L1-L23
def multivariate_normal(random, mean, cov): """ Draw random samples from a multivariate normal distribution. Parameters ---------- random : np.random.RandomState instance Random state. mean : array_like Mean of the n-dimensional distribution. cov : array_like Covariance matrix of the distribution. It must be symmetric and positive-definite for proper sampling. Returns ------- out : ndarray The drawn sample. """ from numpy.linalg import cholesky L = cholesky(cov) return L @ random.randn(L.shape[0]) + mean
[ "def", "multivariate_normal", "(", "random", ",", "mean", ",", "cov", ")", ":", "from", "numpy", ".", "linalg", "import", "cholesky", "L", "=", "cholesky", "(", "cov", ")", "return", "L", "@", "random", ".", "randn", "(", "L", ".", "shape", "[", "0", "]", ")", "+", "mean" ]
Draw random samples from a multivariate normal distribution. Parameters ---------- random : np.random.RandomState instance Random state. mean : array_like Mean of the n-dimensional distribution. cov : array_like Covariance matrix of the distribution. It must be symmetric and positive-definite for proper sampling. Returns ------- out : ndarray The drawn sample.
[ "Draw", "random", "samples", "from", "a", "multivariate", "normal", "distribution", "." ]
python
valid
google-research/batch-ppo
agents/tools/in_graph_batch_env.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/tools/in_graph_batch_env.py#L104-L125
def reset(self, indices=None): """Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations. """ if indices is None: indices = tf.range(len(self._batch_env)) observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ = tf.py_func( self._batch_env.reset, [indices], observ_dtype, name='reset') observ = tf.check_numerics(observ, 'observ') reward = tf.zeros_like(indices, tf.float32) done = tf.zeros_like(indices, tf.bool) with tf.control_dependencies([ tf.scatter_update(self._observ, indices, observ), tf.scatter_update(self._reward, indices, reward), tf.scatter_update(self._done, indices, done)]): return tf.identity(observ)
[ "def", "reset", "(", "self", ",", "indices", "=", "None", ")", ":", "if", "indices", "is", "None", ":", "indices", "=", "tf", ".", "range", "(", "len", "(", "self", ".", "_batch_env", ")", ")", "observ_dtype", "=", "self", ".", "_parse_dtype", "(", "self", ".", "_batch_env", ".", "observation_space", ")", "observ", "=", "tf", ".", "py_func", "(", "self", ".", "_batch_env", ".", "reset", ",", "[", "indices", "]", ",", "observ_dtype", ",", "name", "=", "'reset'", ")", "observ", "=", "tf", ".", "check_numerics", "(", "observ", ",", "'observ'", ")", "reward", "=", "tf", ".", "zeros_like", "(", "indices", ",", "tf", ".", "float32", ")", "done", "=", "tf", ".", "zeros_like", "(", "indices", ",", "tf", ".", "bool", ")", "with", "tf", ".", "control_dependencies", "(", "[", "tf", ".", "scatter_update", "(", "self", ".", "_observ", ",", "indices", ",", "observ", ")", ",", "tf", ".", "scatter_update", "(", "self", ".", "_reward", ",", "indices", ",", "reward", ")", ",", "tf", ".", "scatter_update", "(", "self", ".", "_done", ",", "indices", ",", "done", ")", "]", ")", ":", "return", "tf", ".", "identity", "(", "observ", ")" ]
Reset the batch of environments. Args: indices: The batch indices of the environments to reset; defaults to all. Returns: Batch tensor of the new observations.
[ "Reset", "the", "batch", "of", "environments", "." ]
python
train
codeinn/vcs
vcs/backends/git/inmemory.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/git/inmemory.py#L161-L199
def _get_missing_trees(self, path, root_tree): """ Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree) """ dirpath = posixpath.split(path)[0] dirs = dirpath.split('/') if not dirs or dirs == ['']: return [] def get_tree_for_dir(tree, dirname): for name, mode, id in tree.iteritems(): if name == dirname: obj = self.repository._repo[id] if isinstance(obj, objects.Tree): return obj else: raise RepositoryError("Cannot create directory %s " "at tree %s as path is occupied and is not a " "Tree" % (dirname, tree)) return None trees = [] parent = root_tree for dirname in dirs: tree = get_tree_for_dir(parent, dirname) if tree is None: tree = objects.Tree() dirmode = 040000 parent.add(dirmode, dirname, tree.id) parent = tree # Always append tree trees.append(tree) return trees
[ "def", "_get_missing_trees", "(", "self", ",", "path", ",", "root_tree", ")", ":", "dirpath", "=", "posixpath", ".", "split", "(", "path", ")", "[", "0", "]", "dirs", "=", "dirpath", ".", "split", "(", "'/'", ")", "if", "not", "dirs", "or", "dirs", "==", "[", "''", "]", ":", "return", "[", "]", "def", "get_tree_for_dir", "(", "tree", ",", "dirname", ")", ":", "for", "name", ",", "mode", ",", "id", "in", "tree", ".", "iteritems", "(", ")", ":", "if", "name", "==", "dirname", ":", "obj", "=", "self", ".", "repository", ".", "_repo", "[", "id", "]", "if", "isinstance", "(", "obj", ",", "objects", ".", "Tree", ")", ":", "return", "obj", "else", ":", "raise", "RepositoryError", "(", "\"Cannot create directory %s \"", "\"at tree %s as path is occupied and is not a \"", "\"Tree\"", "%", "(", "dirname", ",", "tree", ")", ")", "return", "None", "trees", "=", "[", "]", "parent", "=", "root_tree", "for", "dirname", "in", "dirs", ":", "tree", "=", "get_tree_for_dir", "(", "parent", ",", "dirname", ")", "if", "tree", "is", "None", ":", "tree", "=", "objects", ".", "Tree", "(", ")", "dirmode", "=", "040000", "parent", ".", "add", "(", "dirmode", ",", "dirname", ",", "tree", ".", "id", ")", "parent", "=", "tree", "# Always append tree", "trees", ".", "append", "(", "tree", ")", "return", "trees" ]
Creates missing ``Tree`` objects for the given path. :param path: path given as a string. It may be a path to a file node (i.e. ``foo/bar/baz.txt``) or directory path - in that case it must end with slash (i.e. ``foo/bar/``). :param root_tree: ``dulwich.objects.Tree`` object from which we start traversing (should be commit's root tree)
[ "Creates", "missing", "Tree", "objects", "for", "the", "given", "path", "." ]
python
train
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L123-L158
def pyfftw_rfftn_empty_aligned(shape, axes, dtype, order='C', n=None): """ Construct an empty byte-aligned array for efficient use by :mod:`pyfftw` functions :func:`pyfftw.interfaces.numpy_fft.rfftn` and :func:`pyfftw.interfaces.numpy_fft.irfftn`. The shape of the empty array is appropriate for the output of :func:`pyfftw.interfaces.numpy_fft.rfftn` applied to an array of the shape specified by parameter `shape`, and for the input of the corresponding :func:`pyfftw.interfaces.numpy_fft.irfftn` call that reverses this operation. Parameters ---------- shape : sequence of ints Output array shape axes : sequence of ints Axes on which the FFT will be computed dtype : dtype Real dtype from which the complex dtype of the output array is derived order : {'C', 'F'}, optional (default 'C') Specify whether arrays should be stored in row-major (C-style) or column-major (Fortran-style) order n : int, optional (default None) Output array should be aligned to n-byte boundary Returns ------- a : ndarray Empty array with required byte-alignment """ ashp = list(shape) raxis = axes[-1] ashp[raxis] = ashp[raxis] // 2 + 1 cdtype = complex_dtype(dtype) return pyfftw.empty_aligned(ashp, cdtype, order, n)
[ "def", "pyfftw_rfftn_empty_aligned", "(", "shape", ",", "axes", ",", "dtype", ",", "order", "=", "'C'", ",", "n", "=", "None", ")", ":", "ashp", "=", "list", "(", "shape", ")", "raxis", "=", "axes", "[", "-", "1", "]", "ashp", "[", "raxis", "]", "=", "ashp", "[", "raxis", "]", "//", "2", "+", "1", "cdtype", "=", "complex_dtype", "(", "dtype", ")", "return", "pyfftw", ".", "empty_aligned", "(", "ashp", ",", "cdtype", ",", "order", ",", "n", ")" ]
Construct an empty byte-aligned array for efficient use by :mod:`pyfftw` functions :func:`pyfftw.interfaces.numpy_fft.rfftn` and :func:`pyfftw.interfaces.numpy_fft.irfftn`. The shape of the empty array is appropriate for the output of :func:`pyfftw.interfaces.numpy_fft.rfftn` applied to an array of the shape specified by parameter `shape`, and for the input of the corresponding :func:`pyfftw.interfaces.numpy_fft.irfftn` call that reverses this operation. Parameters ---------- shape : sequence of ints Output array shape axes : sequence of ints Axes on which the FFT will be computed dtype : dtype Real dtype from which the complex dtype of the output array is derived order : {'C', 'F'}, optional (default 'C') Specify whether arrays should be stored in row-major (C-style) or column-major (Fortran-style) order n : int, optional (default None) Output array should be aligned to n-byte boundary Returns ------- a : ndarray Empty array with required byte-alignment
[ "Construct", "an", "empty", "byte", "-", "aligned", "array", "for", "efficient", "use", "by", ":", "mod", ":", "pyfftw", "functions", ":", "func", ":", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "rfftn", "and", ":", "func", ":", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "irfftn", ".", "The", "shape", "of", "the", "empty", "array", "is", "appropriate", "for", "the", "output", "of", ":", "func", ":", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "rfftn", "applied", "to", "an", "array", "of", "the", "shape", "specified", "by", "parameter", "shape", "and", "for", "the", "input", "of", "the", "corresponding", ":", "func", ":", "pyfftw", ".", "interfaces", ".", "numpy_fft", ".", "irfftn", "call", "that", "reverses", "this", "operation", "." ]
python
train
boriel/zxbasic
asmparse.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L329-L337
def define(self, value, lineno, namespace=None): """ Defines label value. It can be anything. Even an AST """ if self.defined: error(lineno, "label '%s' already defined at line %i" % (self.name, self.lineno)) self.value = value self.lineno = lineno self.namespace = NAMESPACE if namespace is None else namespace
[ "def", "define", "(", "self", ",", "value", ",", "lineno", ",", "namespace", "=", "None", ")", ":", "if", "self", ".", "defined", ":", "error", "(", "lineno", ",", "\"label '%s' already defined at line %i\"", "%", "(", "self", ".", "name", ",", "self", ".", "lineno", ")", ")", "self", ".", "value", "=", "value", "self", ".", "lineno", "=", "lineno", "self", ".", "namespace", "=", "NAMESPACE", "if", "namespace", "is", "None", "else", "namespace" ]
Defines label value. It can be anything. Even an AST
[ "Defines", "label", "value", ".", "It", "can", "be", "anything", ".", "Even", "an", "AST" ]
python
train
Koed00/django-q
django_q/tasks.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L222-L246
def fetch_cached(task_id, wait=0, broker=None): """ Return the processed task from the cache backend """ if not broker: broker = get_broker() start = time() while True: r = broker.cache.get('{}:{}'.format(broker.list_key, task_id)) if r: task = SignedPackage.loads(r) t = Task(id=task['id'], name=task['name'], func=task['func'], hook=task.get('hook'), args=task['args'], kwargs=task['kwargs'], started=task['started'], stopped=task['stopped'], result=task['result'], success=task['success']) return t if (time() - start) * 1000 >= wait >= 0: break sleep(0.01)
[ "def", "fetch_cached", "(", "task_id", ",", "wait", "=", "0", ",", "broker", "=", "None", ")", ":", "if", "not", "broker", ":", "broker", "=", "get_broker", "(", ")", "start", "=", "time", "(", ")", "while", "True", ":", "r", "=", "broker", ".", "cache", ".", "get", "(", "'{}:{}'", ".", "format", "(", "broker", ".", "list_key", ",", "task_id", ")", ")", "if", "r", ":", "task", "=", "SignedPackage", ".", "loads", "(", "r", ")", "t", "=", "Task", "(", "id", "=", "task", "[", "'id'", "]", ",", "name", "=", "task", "[", "'name'", "]", ",", "func", "=", "task", "[", "'func'", "]", ",", "hook", "=", "task", ".", "get", "(", "'hook'", ")", ",", "args", "=", "task", "[", "'args'", "]", ",", "kwargs", "=", "task", "[", "'kwargs'", "]", ",", "started", "=", "task", "[", "'started'", "]", ",", "stopped", "=", "task", "[", "'stopped'", "]", ",", "result", "=", "task", "[", "'result'", "]", ",", "success", "=", "task", "[", "'success'", "]", ")", "return", "t", "if", "(", "time", "(", ")", "-", "start", ")", "*", "1000", ">=", "wait", ">=", "0", ":", "break", "sleep", "(", "0.01", ")" ]
Return the processed task from the cache backend
[ "Return", "the", "processed", "task", "from", "the", "cache", "backend" ]
python
train
chrisjrn/registrasion
registrasion/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/views.py#L629-L672
def checkout(request, user_id=None): ''' Runs the checkout process for the current cart. If the query string contains ``fix_errors=true``, Registrasion will attempt to fix errors preventing the system from checking out, including by cancelling expired discounts and vouchers, and removing any unavailable products. Arguments: user_id (castable to int): If the requesting user is staff, then the user ID can be used to run checkout for another user. Returns: render or redirect: If the invoice is generated successfully, or there's already a valid invoice for the current cart, redirect to ``invoice``. If there are errors when generating the invoice, render ``registrasion/checkout_errors.html`` with the following data:: { "error_list", [str, ...] # The errors to display. } ''' if user_id is not None: if request.user.is_staff: user = User.objects.get(id=int(user_id)) else: raise Http404() else: user = request.user current_cart = CartController.for_user(user) if "fix_errors" in request.GET and request.GET["fix_errors"] == "true": current_cart.fix_simple_errors() try: current_invoice = InvoiceController.for_cart(current_cart.cart) except ValidationError as ve: return _checkout_errors(request, ve) return redirect("invoice", current_invoice.invoice.id)
[ "def", "checkout", "(", "request", ",", "user_id", "=", "None", ")", ":", "if", "user_id", "is", "not", "None", ":", "if", "request", ".", "user", ".", "is_staff", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "id", "=", "int", "(", "user_id", ")", ")", "else", ":", "raise", "Http404", "(", ")", "else", ":", "user", "=", "request", ".", "user", "current_cart", "=", "CartController", ".", "for_user", "(", "user", ")", "if", "\"fix_errors\"", "in", "request", ".", "GET", "and", "request", ".", "GET", "[", "\"fix_errors\"", "]", "==", "\"true\"", ":", "current_cart", ".", "fix_simple_errors", "(", ")", "try", ":", "current_invoice", "=", "InvoiceController", ".", "for_cart", "(", "current_cart", ".", "cart", ")", "except", "ValidationError", "as", "ve", ":", "return", "_checkout_errors", "(", "request", ",", "ve", ")", "return", "redirect", "(", "\"invoice\"", ",", "current_invoice", ".", "invoice", ".", "id", ")" ]
Runs the checkout process for the current cart. If the query string contains ``fix_errors=true``, Registrasion will attempt to fix errors preventing the system from checking out, including by cancelling expired discounts and vouchers, and removing any unavailable products. Arguments: user_id (castable to int): If the requesting user is staff, then the user ID can be used to run checkout for another user. Returns: render or redirect: If the invoice is generated successfully, or there's already a valid invoice for the current cart, redirect to ``invoice``. If there are errors when generating the invoice, render ``registrasion/checkout_errors.html`` with the following data:: { "error_list", [str, ...] # The errors to display. }
[ "Runs", "the", "checkout", "process", "for", "the", "current", "cart", "." ]
python
test
pachyderm/python-pachyderm
src/python_pachyderm/pfs_client.py
https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L60-L69
def inspect_repo(self, repo_name): """ Returns info about a specific Repo. Params: * repo_name: Name of the repo. """ req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name)) res = self.stub.InspectRepo(req, metadata=self.metadata) return res
[ "def", "inspect_repo", "(", "self", ",", "repo_name", ")", ":", "req", "=", "proto", ".", "InspectRepoRequest", "(", "repo", "=", "proto", ".", "Repo", "(", "name", "=", "repo_name", ")", ")", "res", "=", "self", ".", "stub", ".", "InspectRepo", "(", "req", ",", "metadata", "=", "self", ".", "metadata", ")", "return", "res" ]
Returns info about a specific Repo. Params: * repo_name: Name of the repo.
[ "Returns", "info", "about", "a", "specific", "Repo", ".", "Params", ":", "*", "repo_name", ":", "Name", "of", "the", "repo", "." ]
python
train
angr/angr
angr/sim_type.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_type.py#L1006-L1015
def do_preprocess(defn): """ Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible? """ from pycparser.ply import lex, cpp lexer = lex.lex(cpp) p = cpp.Preprocessor(lexer) # p.add_path(dir) will add dir to the include search path p.parse(defn) return ''.join(tok.value for tok in p.parser if tok.type not in p.ignore)
[ "def", "do_preprocess", "(", "defn", ")", ":", "from", "pycparser", ".", "ply", "import", "lex", ",", "cpp", "lexer", "=", "lex", ".", "lex", "(", "cpp", ")", "p", "=", "cpp", ".", "Preprocessor", "(", "lexer", ")", "# p.add_path(dir) will add dir to the include search path", "p", ".", "parse", "(", "defn", ")", "return", "''", ".", "join", "(", "tok", ".", "value", "for", "tok", "in", "p", ".", "parser", "if", "tok", ".", "type", "not", "in", "p", ".", "ignore", ")" ]
Run a string through the C preprocessor that ships with pycparser but is weirdly inaccessible?
[ "Run", "a", "string", "through", "the", "C", "preprocessor", "that", "ships", "with", "pycparser", "but", "is", "weirdly", "inaccessible?" ]
python
train
matllubos/django-is-core
is_core/utils/__init__.py
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/__init__.py#L301-L310
def get_obj_url(request, obj): """ Returns object URL if current logged user has permissions to see the object """ if (is_callable(getattr(obj, 'get_absolute_url', None)) and (not hasattr(obj, 'can_see_edit_link') or (is_callable(getattr(obj, 'can_see_edit_link', None)) and obj.can_see_edit_link(request)))): return call_method_with_unknown_input(obj.get_absolute_url, request=request) else: return get_url_from_model_core(request, obj)
[ "def", "get_obj_url", "(", "request", ",", "obj", ")", ":", "if", "(", "is_callable", "(", "getattr", "(", "obj", ",", "'get_absolute_url'", ",", "None", ")", ")", "and", "(", "not", "hasattr", "(", "obj", ",", "'can_see_edit_link'", ")", "or", "(", "is_callable", "(", "getattr", "(", "obj", ",", "'can_see_edit_link'", ",", "None", ")", ")", "and", "obj", ".", "can_see_edit_link", "(", "request", ")", ")", ")", ")", ":", "return", "call_method_with_unknown_input", "(", "obj", ".", "get_absolute_url", ",", "request", "=", "request", ")", "else", ":", "return", "get_url_from_model_core", "(", "request", ",", "obj", ")" ]
Returns object URL if current logged user has permissions to see the object
[ "Returns", "object", "URL", "if", "current", "logged", "user", "has", "permissions", "to", "see", "the", "object" ]
python
train
Yelp/uwsgi_metrics
uwsgi_metrics/metrics.py
https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/metrics.py#L208-L216
def counter(module, name, count=1): """ Record an event's occurence in a counter: :: counter(__name__, 'my_counter') """ counter = get_metric('counters', module, name, Counter()) counter.inc(count)
[ "def", "counter", "(", "module", ",", "name", ",", "count", "=", "1", ")", ":", "counter", "=", "get_metric", "(", "'counters'", ",", "module", ",", "name", ",", "Counter", "(", ")", ")", "counter", ".", "inc", "(", "count", ")" ]
Record an event's occurence in a counter: :: counter(__name__, 'my_counter')
[ "Record", "an", "event", "s", "occurence", "in", "a", "counter", ":", "::" ]
python
train
tensorpack/tensorpack
tensorpack/tfutils/dependency.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/tfutils/dependency.py#L16-L38
def dependency_of_targets(targets, op): """ Check that op is in the subgraph induced by the dependencies of targets. The result is memoized. This is useful if some SessionRunHooks should be run only together with certain ops. Args: targets: a tuple of ops or tensors. The targets to find dependencies of. op (tf.Operation or tf.Tensor): Returns: bool: True if any one of `targets` depend on `op`. """ # TODO tensorarray? sparsetensor? if isinstance(op, tf.Tensor): op = op.op assert isinstance(op, tf.Operation), op from tensorflow.contrib.graph_editor import get_backward_walk_ops # alternative implementation can use graph_util.extract_sub_graph dependent_ops = get_backward_walk_ops(targets, control_inputs=True) return op in dependent_ops
[ "def", "dependency_of_targets", "(", "targets", ",", "op", ")", ":", "# TODO tensorarray? sparsetensor?", "if", "isinstance", "(", "op", ",", "tf", ".", "Tensor", ")", ":", "op", "=", "op", ".", "op", "assert", "isinstance", "(", "op", ",", "tf", ".", "Operation", ")", ",", "op", "from", "tensorflow", ".", "contrib", ".", "graph_editor", "import", "get_backward_walk_ops", "# alternative implementation can use graph_util.extract_sub_graph", "dependent_ops", "=", "get_backward_walk_ops", "(", "targets", ",", "control_inputs", "=", "True", ")", "return", "op", "in", "dependent_ops" ]
Check that op is in the subgraph induced by the dependencies of targets. The result is memoized. This is useful if some SessionRunHooks should be run only together with certain ops. Args: targets: a tuple of ops or tensors. The targets to find dependencies of. op (tf.Operation or tf.Tensor): Returns: bool: True if any one of `targets` depend on `op`.
[ "Check", "that", "op", "is", "in", "the", "subgraph", "induced", "by", "the", "dependencies", "of", "targets", ".", "The", "result", "is", "memoized", "." ]
python
train
wmayner/pyphi
pyphi/compute/parallel.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/compute/parallel.py#L24-L46
def get_num_processes(): """Return the number of processes to use in parallel.""" cpu_count = multiprocessing.cpu_count() if config.NUMBER_OF_CORES == 0: raise ValueError( 'Invalid NUMBER_OF_CORES; value may not be 0.') if config.NUMBER_OF_CORES > cpu_count: log.info('Requesting %s cores; only %s available', config.NUMBER_OF_CORES, cpu_count) return cpu_count if config.NUMBER_OF_CORES < 0: num = cpu_count + config.NUMBER_OF_CORES + 1 if num <= 0: raise ValueError( 'Invalid NUMBER_OF_CORES; negative value is too negative: ' 'requesting {} cores, {} available.'.format(num, cpu_count)) return num return config.NUMBER_OF_CORES
[ "def", "get_num_processes", "(", ")", ":", "cpu_count", "=", "multiprocessing", ".", "cpu_count", "(", ")", "if", "config", ".", "NUMBER_OF_CORES", "==", "0", ":", "raise", "ValueError", "(", "'Invalid NUMBER_OF_CORES; value may not be 0.'", ")", "if", "config", ".", "NUMBER_OF_CORES", ">", "cpu_count", ":", "log", ".", "info", "(", "'Requesting %s cores; only %s available'", ",", "config", ".", "NUMBER_OF_CORES", ",", "cpu_count", ")", "return", "cpu_count", "if", "config", ".", "NUMBER_OF_CORES", "<", "0", ":", "num", "=", "cpu_count", "+", "config", ".", "NUMBER_OF_CORES", "+", "1", "if", "num", "<=", "0", ":", "raise", "ValueError", "(", "'Invalid NUMBER_OF_CORES; negative value is too negative: '", "'requesting {} cores, {} available.'", ".", "format", "(", "num", ",", "cpu_count", ")", ")", "return", "num", "return", "config", ".", "NUMBER_OF_CORES" ]
Return the number of processes to use in parallel.
[ "Return", "the", "number", "of", "processes", "to", "use", "in", "parallel", "." ]
python
train
saltstack/salt
salt/utils/gitfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/gitfs.py#L2146-L2269
def init_remotes(self, remotes, per_remote_overrides=(), per_remote_only=PER_REMOTE_ONLY, global_only=GLOBAL_ONLY): ''' Initialize remotes ''' # The global versions of the auth params (gitfs_user, # gitfs_password, etc.) default to empty strings. If any of them # are defined and the provider is not one that supports auth, then # error out and do not proceed. override_params = copy.deepcopy(per_remote_overrides) global_auth_params = [ '{0}_{1}'.format(self.role, x) for x in AUTH_PARAMS if self.opts['{0}_{1}'.format(self.role, x)] ] if self.provider in AUTH_PROVIDERS: override_params += AUTH_PARAMS elif global_auth_params: msg = ( '{0} authentication was configured, but the \'{1}\' ' '{0}_provider does not support authentication. The ' 'providers for which authentication is supported in {0} ' 'are: {2}.'.format( self.role, self.provider, ', '.join(AUTH_PROVIDERS) ) ) if self.role == 'gitfs': msg += ( ' See the GitFS Walkthrough in the Salt documentation ' 'for further information.' ) log.critical(msg) failhard(self.role) per_remote_defaults = {} global_values = set(override_params) global_values.update(set(global_only)) for param in global_values: key = '{0}_{1}'.format(self.role, param) if key not in self.opts: log.critical( 'Key \'%s\' not present in global configuration. This is ' 'a bug, please report it.', key ) failhard(self.role) per_remote_defaults[param] = enforce_types(key, self.opts[key]) self.remotes = [] for remote in remotes: repo_obj = self.git_providers[self.provider]( self.opts, remote, per_remote_defaults, per_remote_only, override_params, self.cache_root, self.role ) if hasattr(repo_obj, 'repo'): # Sanity check and assign the credential parameter repo_obj.verify_auth() repo_obj.setup_callbacks() if self.opts['__role'] == 'minion' and repo_obj.new: # Perform initial fetch on masterless minion repo_obj.fetch() # Reverse map to be used when running envs() to detect the # available envs. repo_obj.saltenv_revmap = {} for saltenv, saltenv_conf in six.iteritems(repo_obj.saltenv): if 'ref' in saltenv_conf: ref = saltenv_conf['ref'] repo_obj.saltenv_revmap.setdefault( ref, []).append(saltenv) if saltenv == 'base': # Remove redundant 'ref' config for base saltenv repo_obj.saltenv[saltenv].pop('ref') if ref != repo_obj.base: log.warning( 'The \'base\' environment has been ' 'defined in the \'saltenv\' param for %s ' 'remote %s and will override the ' 'branch/tag specified by %s_base (or a ' 'per-remote \'base\' parameter).', self.role, repo_obj.id, self.role ) # Rewrite 'base' config param repo_obj.base = ref # Build list of all envs defined by ref mappings in the # per-remote 'saltenv' param. We won't add any matching envs # from the global saltenv map to the revmap. all_envs = [] for env_names in six.itervalues(repo_obj.saltenv_revmap): all_envs.extend(env_names) # Add the global saltenv map to the reverse map, skipping envs # explicitly mapped in the per-remote 'saltenv' param. for key, conf in six.iteritems(repo_obj.global_saltenv): if key not in all_envs and 'ref' in conf: repo_obj.saltenv_revmap.setdefault( conf['ref'], []).append(key) self.remotes.append(repo_obj) # Don't allow collisions in cachedir naming cachedir_map = {} for repo in self.remotes: cachedir_map.setdefault(repo.cachedir, []).append(repo.id) collisions = [x for x in cachedir_map if len(cachedir_map[x]) > 1] if collisions: for dirname in collisions: log.critical( 'The following %s remotes have conflicting cachedirs: ' '%s. Resolve this using a per-remote parameter called ' '\'name\'.', self.role, ', '.join(cachedir_map[dirname]) ) failhard(self.role) if any(x.new for x in self.remotes): self.write_remote_map()
[ "def", "init_remotes", "(", "self", ",", "remotes", ",", "per_remote_overrides", "=", "(", ")", ",", "per_remote_only", "=", "PER_REMOTE_ONLY", ",", "global_only", "=", "GLOBAL_ONLY", ")", ":", "# The global versions of the auth params (gitfs_user,", "# gitfs_password, etc.) default to empty strings. If any of them", "# are defined and the provider is not one that supports auth, then", "# error out and do not proceed.", "override_params", "=", "copy", ".", "deepcopy", "(", "per_remote_overrides", ")", "global_auth_params", "=", "[", "'{0}_{1}'", ".", "format", "(", "self", ".", "role", ",", "x", ")", "for", "x", "in", "AUTH_PARAMS", "if", "self", ".", "opts", "[", "'{0}_{1}'", ".", "format", "(", "self", ".", "role", ",", "x", ")", "]", "]", "if", "self", ".", "provider", "in", "AUTH_PROVIDERS", ":", "override_params", "+=", "AUTH_PARAMS", "elif", "global_auth_params", ":", "msg", "=", "(", "'{0} authentication was configured, but the \\'{1}\\' '", "'{0}_provider does not support authentication. The '", "'providers for which authentication is supported in {0} '", "'are: {2}.'", ".", "format", "(", "self", ".", "role", ",", "self", ".", "provider", ",", "', '", ".", "join", "(", "AUTH_PROVIDERS", ")", ")", ")", "if", "self", ".", "role", "==", "'gitfs'", ":", "msg", "+=", "(", "' See the GitFS Walkthrough in the Salt documentation '", "'for further information.'", ")", "log", ".", "critical", "(", "msg", ")", "failhard", "(", "self", ".", "role", ")", "per_remote_defaults", "=", "{", "}", "global_values", "=", "set", "(", "override_params", ")", "global_values", ".", "update", "(", "set", "(", "global_only", ")", ")", "for", "param", "in", "global_values", ":", "key", "=", "'{0}_{1}'", ".", "format", "(", "self", ".", "role", ",", "param", ")", "if", "key", "not", "in", "self", ".", "opts", ":", "log", ".", "critical", "(", "'Key \\'%s\\' not present in global configuration. This is '", "'a bug, please report it.'", ",", "key", ")", "failhard", "(", "self", ".", "role", ")", "per_remote_defaults", "[", "param", "]", "=", "enforce_types", "(", "key", ",", "self", ".", "opts", "[", "key", "]", ")", "self", ".", "remotes", "=", "[", "]", "for", "remote", "in", "remotes", ":", "repo_obj", "=", "self", ".", "git_providers", "[", "self", ".", "provider", "]", "(", "self", ".", "opts", ",", "remote", ",", "per_remote_defaults", ",", "per_remote_only", ",", "override_params", ",", "self", ".", "cache_root", ",", "self", ".", "role", ")", "if", "hasattr", "(", "repo_obj", ",", "'repo'", ")", ":", "# Sanity check and assign the credential parameter", "repo_obj", ".", "verify_auth", "(", ")", "repo_obj", ".", "setup_callbacks", "(", ")", "if", "self", ".", "opts", "[", "'__role'", "]", "==", "'minion'", "and", "repo_obj", ".", "new", ":", "# Perform initial fetch on masterless minion", "repo_obj", ".", "fetch", "(", ")", "# Reverse map to be used when running envs() to detect the", "# available envs.", "repo_obj", ".", "saltenv_revmap", "=", "{", "}", "for", "saltenv", ",", "saltenv_conf", "in", "six", ".", "iteritems", "(", "repo_obj", ".", "saltenv", ")", ":", "if", "'ref'", "in", "saltenv_conf", ":", "ref", "=", "saltenv_conf", "[", "'ref'", "]", "repo_obj", ".", "saltenv_revmap", ".", "setdefault", "(", "ref", ",", "[", "]", ")", ".", "append", "(", "saltenv", ")", "if", "saltenv", "==", "'base'", ":", "# Remove redundant 'ref' config for base saltenv", "repo_obj", ".", "saltenv", "[", "saltenv", "]", ".", "pop", "(", "'ref'", ")", "if", "ref", "!=", "repo_obj", ".", "base", ":", "log", ".", "warning", "(", "'The \\'base\\' environment has been '", "'defined in the \\'saltenv\\' param for %s '", "'remote %s and will override the '", "'branch/tag specified by %s_base (or a '", "'per-remote \\'base\\' parameter).'", ",", "self", ".", "role", ",", "repo_obj", ".", "id", ",", "self", ".", "role", ")", "# Rewrite 'base' config param", "repo_obj", ".", "base", "=", "ref", "# Build list of all envs defined by ref mappings in the", "# per-remote 'saltenv' param. We won't add any matching envs", "# from the global saltenv map to the revmap.", "all_envs", "=", "[", "]", "for", "env_names", "in", "six", ".", "itervalues", "(", "repo_obj", ".", "saltenv_revmap", ")", ":", "all_envs", ".", "extend", "(", "env_names", ")", "# Add the global saltenv map to the reverse map, skipping envs", "# explicitly mapped in the per-remote 'saltenv' param.", "for", "key", ",", "conf", "in", "six", ".", "iteritems", "(", "repo_obj", ".", "global_saltenv", ")", ":", "if", "key", "not", "in", "all_envs", "and", "'ref'", "in", "conf", ":", "repo_obj", ".", "saltenv_revmap", ".", "setdefault", "(", "conf", "[", "'ref'", "]", ",", "[", "]", ")", ".", "append", "(", "key", ")", "self", ".", "remotes", ".", "append", "(", "repo_obj", ")", "# Don't allow collisions in cachedir naming", "cachedir_map", "=", "{", "}", "for", "repo", "in", "self", ".", "remotes", ":", "cachedir_map", ".", "setdefault", "(", "repo", ".", "cachedir", ",", "[", "]", ")", ".", "append", "(", "repo", ".", "id", ")", "collisions", "=", "[", "x", "for", "x", "in", "cachedir_map", "if", "len", "(", "cachedir_map", "[", "x", "]", ")", ">", "1", "]", "if", "collisions", ":", "for", "dirname", "in", "collisions", ":", "log", ".", "critical", "(", "'The following %s remotes have conflicting cachedirs: '", "'%s. Resolve this using a per-remote parameter called '", "'\\'name\\'.'", ",", "self", ".", "role", ",", "', '", ".", "join", "(", "cachedir_map", "[", "dirname", "]", ")", ")", "failhard", "(", "self", ".", "role", ")", "if", "any", "(", "x", ".", "new", "for", "x", "in", "self", ".", "remotes", ")", ":", "self", ".", "write_remote_map", "(", ")" ]
Initialize remotes
[ "Initialize", "remotes" ]
python
train
iskandr/fancyimpute
fancyimpute/iterative_imputer.py
https://github.com/iskandr/fancyimpute/blob/9f0837d387c7303d5c8c925a9989ca77a1a96e3e/fancyimpute/iterative_imputer.py#L817-L863
def _initial_imputation(self, X): """Perform initial imputation for input X. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. Returns ------- Xt : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. mask_missing_values : ndarray, shape (n_samples, n_features) Input data's missing indicator matrix, where "n_samples" is the number of samples and "n_features" is the number of features. """ # TODO: change False to "allow-nan" if is_scalar_nan(self.missing_values): force_all_finite = False # "allow-nan" else: force_all_finite = True X = check_array(X, dtype=FLOAT_DTYPES, order="F", force_all_finite=force_all_finite) _check_inputs_dtype(X, self.missing_values) mask_missing_values = _get_mask(X, self.missing_values) if self.initial_imputer_ is None: self.initial_imputer_ = _SimpleImputer( missing_values=self.missing_values, strategy=self.initial_strategy) X_filled = self.initial_imputer_.fit_transform(X) else: X_filled = self.initial_imputer_.transform(X) valid_mask = np.flatnonzero(np.logical_not( np.isnan(self.initial_imputer_.statistics_))) Xt = X[:, valid_mask] mask_missing_values = mask_missing_values[:, valid_mask] return Xt, X_filled, mask_missing_values
[ "def", "_initial_imputation", "(", "self", ",", "X", ")", ":", "# TODO: change False to \"allow-nan\"", "if", "is_scalar_nan", "(", "self", ".", "missing_values", ")", ":", "force_all_finite", "=", "False", "# \"allow-nan\"", "else", ":", "force_all_finite", "=", "True", "X", "=", "check_array", "(", "X", ",", "dtype", "=", "FLOAT_DTYPES", ",", "order", "=", "\"F\"", ",", "force_all_finite", "=", "force_all_finite", ")", "_check_inputs_dtype", "(", "X", ",", "self", ".", "missing_values", ")", "mask_missing_values", "=", "_get_mask", "(", "X", ",", "self", ".", "missing_values", ")", "if", "self", ".", "initial_imputer_", "is", "None", ":", "self", ".", "initial_imputer_", "=", "_SimpleImputer", "(", "missing_values", "=", "self", ".", "missing_values", ",", "strategy", "=", "self", ".", "initial_strategy", ")", "X_filled", "=", "self", ".", "initial_imputer_", ".", "fit_transform", "(", "X", ")", "else", ":", "X_filled", "=", "self", ".", "initial_imputer_", ".", "transform", "(", "X", ")", "valid_mask", "=", "np", ".", "flatnonzero", "(", "np", ".", "logical_not", "(", "np", ".", "isnan", "(", "self", ".", "initial_imputer_", ".", "statistics_", ")", ")", ")", "Xt", "=", "X", "[", ":", ",", "valid_mask", "]", "mask_missing_values", "=", "mask_missing_values", "[", ":", ",", "valid_mask", "]", "return", "Xt", ",", "X_filled", ",", "mask_missing_values" ]
Perform initial imputation for input X. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. Returns ------- Xt : ndarray, shape (n_samples, n_features) Input data, where "n_samples" is the number of samples and "n_features" is the number of features. X_filled : ndarray, shape (n_samples, n_features) Input data with the most recent imputations. mask_missing_values : ndarray, shape (n_samples, n_features) Input data's missing indicator matrix, where "n_samples" is the number of samples and "n_features" is the number of features.
[ "Perform", "initial", "imputation", "for", "input", "X", "." ]
python
train
autokey/autokey
lib/autokey/scripting.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/scripting.py#L934-L961
def wait_for_exist(self, title, timeOut=5): """ Wait for window with the given title to be created Usage: C{window.wait_for_exist(title, timeOut=5)} If the window is in existence, returns True. Otherwise, returns False if the window has not been created by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean """ regex = re.compile(title) waited = 0 while waited <= timeOut: retCode, output = self._run_wmctrl(["-l"]) for line in output.split('\n'): if regex.match(line[14:].split(' ', 1)[-1]): return True if timeOut == 0: break # zero length timeout, if not matched go straight to end time.sleep(0.3) waited += 0.3 return False
[ "def", "wait_for_exist", "(", "self", ",", "title", ",", "timeOut", "=", "5", ")", ":", "regex", "=", "re", ".", "compile", "(", "title", ")", "waited", "=", "0", "while", "waited", "<=", "timeOut", ":", "retCode", ",", "output", "=", "self", ".", "_run_wmctrl", "(", "[", "\"-l\"", "]", ")", "for", "line", "in", "output", ".", "split", "(", "'\\n'", ")", ":", "if", "regex", ".", "match", "(", "line", "[", "14", ":", "]", ".", "split", "(", "' '", ",", "1", ")", "[", "-", "1", "]", ")", ":", "return", "True", "if", "timeOut", "==", "0", ":", "break", "# zero length timeout, if not matched go straight to end", "time", ".", "sleep", "(", "0.3", ")", "waited", "+=", "0.3", "return", "False" ]
Wait for window with the given title to be created Usage: C{window.wait_for_exist(title, timeOut=5)} If the window is in existence, returns True. Otherwise, returns False if the window has not been created by the time the timeout has elapsed. @param title: title to match against (as a regular expression) @param timeOut: period (seconds) to wait before giving up @rtype: boolean
[ "Wait", "for", "window", "with", "the", "given", "title", "to", "be", "created", "Usage", ":", "C", "{", "window", ".", "wait_for_exist", "(", "title", "timeOut", "=", "5", ")", "}" ]
python
train
boriel/zxbasic
asmparse.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L1380-L1384
def p_expr_addr(p): """ expr : ADDR """ # The current instruction address p[0] = Expr.makenode(Container(MEMORY.org, p.lineno(1)))
[ "def", "p_expr_addr", "(", "p", ")", ":", "# The current instruction address", "p", "[", "0", "]", "=", "Expr", ".", "makenode", "(", "Container", "(", "MEMORY", ".", "org", ",", "p", ".", "lineno", "(", "1", ")", ")", ")" ]
expr : ADDR
[ "expr", ":", "ADDR" ]
python
train
faucamp/python-gsmmodem
gsmmodem/modem.py
https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/modem.py#L568-L573
def smsc(self, smscNumber): """ Set the default SMSC number to use when sending SMS messages """ if smscNumber != self._smscNumber: if self.alive: self.write('AT+CSCA="{0}"'.format(smscNumber)) self._smscNumber = smscNumber
[ "def", "smsc", "(", "self", ",", "smscNumber", ")", ":", "if", "smscNumber", "!=", "self", ".", "_smscNumber", ":", "if", "self", ".", "alive", ":", "self", ".", "write", "(", "'AT+CSCA=\"{0}\"'", ".", "format", "(", "smscNumber", ")", ")", "self", ".", "_smscNumber", "=", "smscNumber" ]
Set the default SMSC number to use when sending SMS messages
[ "Set", "the", "default", "SMSC", "number", "to", "use", "when", "sending", "SMS", "messages" ]
python
train
craffel/mir_eval
mir_eval/beat.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L236-L335
def goto(reference_beats, estimated_beats, goto_threshold=0.35, goto_mu=0.2, goto_sigma=0.2): """Calculate Goto's score, a binary 1 or 0 depending on some specific heuristic criteria Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds goto_threshold : float Threshold of beat error for a beat to be "correct" (Default value = 0.35) goto_mu : float The mean of the beat errors in the continuously correct track must be less than this (Default value = 0.2) goto_sigma : float The std of the beat errors in the continuously correct track must be less than this (Default value = 0.2) Returns ------- goto_score : float Either 1.0 or 0.0 if some specific criteria are met """ validate(reference_beats, estimated_beats) # When estimated beats are empty, no beats are correct; metric is 0 if estimated_beats.size == 0 or reference_beats.size == 0: return 0. # Error for each beat beat_error = np.ones(reference_beats.shape[0]) # Flag for whether the reference and estimated beats are paired paired = np.zeros(reference_beats.shape[0]) # Keep track of Goto's three criteria goto_criteria = 0 for n in range(1, reference_beats.shape[0]-1): # Get previous inner-reference-beat-interval previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1]) # Window start - in the middle of the current beat and the previous window_min = reference_beats[n] - previous_interval # Next inter-reference-beat-interval next_interval = 0.5*(reference_beats[n+1] - reference_beats[n]) # Window end - in the middle of the current beat and the next window_max = reference_beats[n] + next_interval # Get estimated beats in the window beats_in_window = np.logical_and((estimated_beats >= window_min), (estimated_beats < window_max)) # False negative/positive if beats_in_window.sum() == 0 or beats_in_window.sum() > 1: paired[n] = 0 beat_error[n] = 1 else: # Single beat is paired! paired[n] = 1 # Get offset of the estimated beat and the reference beat offset = estimated_beats[beats_in_window] - reference_beats[n] # Scale by previous or next interval if offset < 0: beat_error[n] = offset/previous_interval else: beat_error[n] = offset/next_interval # Get indices of incorrect beats incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold) # All beats are correct (first and last will be 0 so always correct) if incorrect_beats.shape[0] < 3: # Get the track of correct beats track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1] goto_criteria = 1 else: # Get the track of maximal length track_len = np.max(np.diff(incorrect_beats)) track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0] # Is the track length at least 25% of the song? if track_len - 1 > .25*(reference_beats.shape[0] - 2): goto_criteria = 1 start_beat = incorrect_beats[track_start] end_beat = incorrect_beats[track_start + 1] track = beat_error[start_beat:end_beat + 1] # If we have a track if goto_criteria: # Are mean and std of the track less than the required thresholds? if np.mean(np.abs(track)) < goto_mu \ and np.std(track, ddof=1) < goto_sigma: goto_criteria = 3 # If all criteria are met, score is 100%! return 1.0*(goto_criteria == 3)
[ "def", "goto", "(", "reference_beats", ",", "estimated_beats", ",", "goto_threshold", "=", "0.35", ",", "goto_mu", "=", "0.2", ",", "goto_sigma", "=", "0.2", ")", ":", "validate", "(", "reference_beats", ",", "estimated_beats", ")", "# When estimated beats are empty, no beats are correct; metric is 0", "if", "estimated_beats", ".", "size", "==", "0", "or", "reference_beats", ".", "size", "==", "0", ":", "return", "0.", "# Error for each beat", "beat_error", "=", "np", ".", "ones", "(", "reference_beats", ".", "shape", "[", "0", "]", ")", "# Flag for whether the reference and estimated beats are paired", "paired", "=", "np", ".", "zeros", "(", "reference_beats", ".", "shape", "[", "0", "]", ")", "# Keep track of Goto's three criteria", "goto_criteria", "=", "0", "for", "n", "in", "range", "(", "1", ",", "reference_beats", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "# Get previous inner-reference-beat-interval", "previous_interval", "=", "0.5", "*", "(", "reference_beats", "[", "n", "]", "-", "reference_beats", "[", "n", "-", "1", "]", ")", "# Window start - in the middle of the current beat and the previous", "window_min", "=", "reference_beats", "[", "n", "]", "-", "previous_interval", "# Next inter-reference-beat-interval", "next_interval", "=", "0.5", "*", "(", "reference_beats", "[", "n", "+", "1", "]", "-", "reference_beats", "[", "n", "]", ")", "# Window end - in the middle of the current beat and the next", "window_max", "=", "reference_beats", "[", "n", "]", "+", "next_interval", "# Get estimated beats in the window", "beats_in_window", "=", "np", ".", "logical_and", "(", "(", "estimated_beats", ">=", "window_min", ")", ",", "(", "estimated_beats", "<", "window_max", ")", ")", "# False negative/positive", "if", "beats_in_window", ".", "sum", "(", ")", "==", "0", "or", "beats_in_window", ".", "sum", "(", ")", ">", "1", ":", "paired", "[", "n", "]", "=", "0", "beat_error", "[", "n", "]", "=", "1", "else", ":", "# Single beat is paired!", "paired", "[", "n", "]", "=", "1", "# Get offset of the estimated beat and the reference beat", "offset", "=", "estimated_beats", "[", "beats_in_window", "]", "-", "reference_beats", "[", "n", "]", "# Scale by previous or next interval", "if", "offset", "<", "0", ":", "beat_error", "[", "n", "]", "=", "offset", "/", "previous_interval", "else", ":", "beat_error", "[", "n", "]", "=", "offset", "/", "next_interval", "# Get indices of incorrect beats", "incorrect_beats", "=", "np", ".", "flatnonzero", "(", "np", ".", "abs", "(", "beat_error", ")", ">", "goto_threshold", ")", "# All beats are correct (first and last will be 0 so always correct)", "if", "incorrect_beats", ".", "shape", "[", "0", "]", "<", "3", ":", "# Get the track of correct beats", "track", "=", "beat_error", "[", "incorrect_beats", "[", "0", "]", "+", "1", ":", "incorrect_beats", "[", "-", "1", "]", "-", "1", "]", "goto_criteria", "=", "1", "else", ":", "# Get the track of maximal length", "track_len", "=", "np", ".", "max", "(", "np", ".", "diff", "(", "incorrect_beats", ")", ")", "track_start", "=", "np", ".", "flatnonzero", "(", "np", ".", "diff", "(", "incorrect_beats", ")", "==", "track_len", ")", "[", "0", "]", "# Is the track length at least 25% of the song?", "if", "track_len", "-", "1", ">", ".25", "*", "(", "reference_beats", ".", "shape", "[", "0", "]", "-", "2", ")", ":", "goto_criteria", "=", "1", "start_beat", "=", "incorrect_beats", "[", "track_start", "]", "end_beat", "=", "incorrect_beats", "[", "track_start", "+", "1", "]", "track", "=", "beat_error", "[", "start_beat", ":", "end_beat", "+", "1", "]", "# If we have a track", "if", "goto_criteria", ":", "# Are mean and std of the track less than the required thresholds?", "if", "np", ".", "mean", "(", "np", ".", "abs", "(", "track", ")", ")", "<", "goto_mu", "and", "np", ".", "std", "(", "track", ",", "ddof", "=", "1", ")", "<", "goto_sigma", ":", "goto_criteria", "=", "3", "# If all criteria are met, score is 100%!", "return", "1.0", "*", "(", "goto_criteria", "==", "3", ")" ]
Calculate Goto's score, a binary 1 or 0 depending on some specific heuristic criteria Examples -------- >>> reference_beats = mir_eval.io.load_events('reference.txt') >>> reference_beats = mir_eval.beat.trim_beats(reference_beats) >>> estimated_beats = mir_eval.io.load_events('estimated.txt') >>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats) >>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats) Parameters ---------- reference_beats : np.ndarray reference beat times, in seconds estimated_beats : np.ndarray query beat times, in seconds goto_threshold : float Threshold of beat error for a beat to be "correct" (Default value = 0.35) goto_mu : float The mean of the beat errors in the continuously correct track must be less than this (Default value = 0.2) goto_sigma : float The std of the beat errors in the continuously correct track must be less than this (Default value = 0.2) Returns ------- goto_score : float Either 1.0 or 0.0 if some specific criteria are met
[ "Calculate", "Goto", "s", "score", "a", "binary", "1", "or", "0", "depending", "on", "some", "specific", "heuristic", "criteria" ]
python
train
box/rotunicode
rotunicode/rotunicode.py
https://github.com/box/rotunicode/blob/6149b6bb5bb50d322db248acfdb910dc3cb1bcc2/rotunicode/rotunicode.py#L76-L99
def decode(cls, string, errors='strict'): """Return the decoded version of a string. :param string: The input string to decode. :type string: `basestring` :param errors: The error handling scheme. Only 'strict' is supported. :type errors: `basestring` :return: Tuple of decoded string and number of input bytes consumed. :rtype: `tuple` (`unicode`, `int`) """ if errors != 'strict': raise UnicodeError('Unsupported error handling {0}'.format(errors)) unicode_string = cls._ensure_unicode_string(string) decoded = unicode_string.translate(cls._decoding_table) return decoded, len(string)
[ "def", "decode", "(", "cls", ",", "string", ",", "errors", "=", "'strict'", ")", ":", "if", "errors", "!=", "'strict'", ":", "raise", "UnicodeError", "(", "'Unsupported error handling {0}'", ".", "format", "(", "errors", ")", ")", "unicode_string", "=", "cls", ".", "_ensure_unicode_string", "(", "string", ")", "decoded", "=", "unicode_string", ".", "translate", "(", "cls", ".", "_decoding_table", ")", "return", "decoded", ",", "len", "(", "string", ")" ]
Return the decoded version of a string. :param string: The input string to decode. :type string: `basestring` :param errors: The error handling scheme. Only 'strict' is supported. :type errors: `basestring` :return: Tuple of decoded string and number of input bytes consumed. :rtype: `tuple` (`unicode`, `int`)
[ "Return", "the", "decoded", "version", "of", "a", "string", "." ]
python
train
akfullfo/taskforce
taskforce/task.py
https://github.com/akfullfo/taskforce/blob/bc6dd744bd33546447d085dbd18a350532220193/taskforce/task.py#L2456-L2478
def manage(self): """ Manage the task to handle restarts, reconfiguration, etc. Returns True to request a shorter period before the next call, False if nothing special is needed. """ log = self._params.get('log', self._discard) if self._stopping: log.debug("Task '%s', stopping, retrying stop()", self._name) return self.stop() now = time.time() if self._started and self._limit: if now > self._limit: log.debug("Task '%s', time limit exceeded by %s, stopping", self._name, deltafmt(now - self._limit)) return self.stop() else: log.debug("Task '%s', time limit remaining %s", self._name, deltafmt(self._limit - now)) if self._legion.is_exiting(): log.debug("Not managing '%s', legion is exiting", self._name) return False log.debug("managing '%s'", self._name) return self._start()
[ "def", "manage", "(", "self", ")", ":", "log", "=", "self", ".", "_params", ".", "get", "(", "'log'", ",", "self", ".", "_discard", ")", "if", "self", ".", "_stopping", ":", "log", ".", "debug", "(", "\"Task '%s', stopping, retrying stop()\"", ",", "self", ".", "_name", ")", "return", "self", ".", "stop", "(", ")", "now", "=", "time", ".", "time", "(", ")", "if", "self", ".", "_started", "and", "self", ".", "_limit", ":", "if", "now", ">", "self", ".", "_limit", ":", "log", ".", "debug", "(", "\"Task '%s', time limit exceeded by %s, stopping\"", ",", "self", ".", "_name", ",", "deltafmt", "(", "now", "-", "self", ".", "_limit", ")", ")", "return", "self", ".", "stop", "(", ")", "else", ":", "log", ".", "debug", "(", "\"Task '%s', time limit remaining %s\"", ",", "self", ".", "_name", ",", "deltafmt", "(", "self", ".", "_limit", "-", "now", ")", ")", "if", "self", ".", "_legion", ".", "is_exiting", "(", ")", ":", "log", ".", "debug", "(", "\"Not managing '%s', legion is exiting\"", ",", "self", ".", "_name", ")", "return", "False", "log", ".", "debug", "(", "\"managing '%s'\"", ",", "self", ".", "_name", ")", "return", "self", ".", "_start", "(", ")" ]
Manage the task to handle restarts, reconfiguration, etc. Returns True to request a shorter period before the next call, False if nothing special is needed.
[ "Manage", "the", "task", "to", "handle", "restarts", "reconfiguration", "etc", "." ]
python
train
tanghaibao/jcvi
jcvi/compara/pad.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/pad.py#L100-L160
def pad(args): """ %prog pad blastfile cdtfile --qbed q.pad.bed --sbed s.pad.bed Test and reconstruct candidate PADs. """ from jcvi.formats.cdt import CDT p = OptionParser(pad.__doc__) p.set_beds() p.add_option("--cutoff", default=.3, type="float", help="The clustering cutoff to call similar [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) cutoff = opts.cutoff blastfile, cdtfile = args qbed, sbed, qorder, sorder, is_self = check_beds(blastfile, p, opts) cdt = CDT(cdtfile) qparts = list(cdt.iter_partitions(cutoff=cutoff)) sparts = list(cdt.iter_partitions(cutoff=cutoff, gtr=False)) qid, sid = {}, {} for i, part in enumerate(qparts): qid.update(dict((x, i) for x in part)) for i, part in enumerate(sparts): sid.update(dict((x, i) for x in part)) # Without writing files, conversion from PAD to merged PAD is done in memory for q in qbed: q.seqid = qid[q.seqid] for s in sbed: s.seqid = sid[s.seqid] qnames = range(len(qparts)) snames = range(len(sparts)) logmp = make_arrays(blastfile, qbed, sbed, qnames, snames) m, n = logmp.shape pvalue_cutoff = 1e-30 cutoff = - log(pvalue_cutoff) significant = [] for i in xrange(m): for j in xrange(n): score = logmp[i, j] if score < cutoff: continue significant.append((qparts[i], sparts[j], score)) for a, b, score in significant: print("|".join(a), "|".join(b), score) logging.debug("Collected {0} PAR comparisons significant at (P < {1}).".\ format(len(significant), pvalue_cutoff)) return significant
[ "def", "pad", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "cdt", "import", "CDT", "p", "=", "OptionParser", "(", "pad", ".", "__doc__", ")", "p", ".", "set_beds", "(", ")", "p", ".", "add_option", "(", "\"--cutoff\"", ",", "default", "=", ".3", ",", "type", "=", "\"float\"", ",", "help", "=", "\"The clustering cutoff to call similar [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "2", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "cutoff", "=", "opts", ".", "cutoff", "blastfile", ",", "cdtfile", "=", "args", "qbed", ",", "sbed", ",", "qorder", ",", "sorder", ",", "is_self", "=", "check_beds", "(", "blastfile", ",", "p", ",", "opts", ")", "cdt", "=", "CDT", "(", "cdtfile", ")", "qparts", "=", "list", "(", "cdt", ".", "iter_partitions", "(", "cutoff", "=", "cutoff", ")", ")", "sparts", "=", "list", "(", "cdt", ".", "iter_partitions", "(", "cutoff", "=", "cutoff", ",", "gtr", "=", "False", ")", ")", "qid", ",", "sid", "=", "{", "}", ",", "{", "}", "for", "i", ",", "part", "in", "enumerate", "(", "qparts", ")", ":", "qid", ".", "update", "(", "dict", "(", "(", "x", ",", "i", ")", "for", "x", "in", "part", ")", ")", "for", "i", ",", "part", "in", "enumerate", "(", "sparts", ")", ":", "sid", ".", "update", "(", "dict", "(", "(", "x", ",", "i", ")", "for", "x", "in", "part", ")", ")", "# Without writing files, conversion from PAD to merged PAD is done in memory", "for", "q", "in", "qbed", ":", "q", ".", "seqid", "=", "qid", "[", "q", ".", "seqid", "]", "for", "s", "in", "sbed", ":", "s", ".", "seqid", "=", "sid", "[", "s", ".", "seqid", "]", "qnames", "=", "range", "(", "len", "(", "qparts", ")", ")", "snames", "=", "range", "(", "len", "(", "sparts", ")", ")", "logmp", "=", "make_arrays", "(", "blastfile", ",", "qbed", ",", "sbed", ",", "qnames", ",", "snames", ")", "m", ",", "n", "=", "logmp", ".", "shape", "pvalue_cutoff", "=", "1e-30", "cutoff", "=", "-", "log", "(", "pvalue_cutoff", ")", "significant", "=", "[", "]", "for", "i", "in", "xrange", "(", "m", ")", ":", "for", "j", "in", "xrange", "(", "n", ")", ":", "score", "=", "logmp", "[", "i", ",", "j", "]", "if", "score", "<", "cutoff", ":", "continue", "significant", ".", "append", "(", "(", "qparts", "[", "i", "]", ",", "sparts", "[", "j", "]", ",", "score", ")", ")", "for", "a", ",", "b", ",", "score", "in", "significant", ":", "print", "(", "\"|\"", ".", "join", "(", "a", ")", ",", "\"|\"", ".", "join", "(", "b", ")", ",", "score", ")", "logging", ".", "debug", "(", "\"Collected {0} PAR comparisons significant at (P < {1}).\"", ".", "format", "(", "len", "(", "significant", ")", ",", "pvalue_cutoff", ")", ")", "return", "significant" ]
%prog pad blastfile cdtfile --qbed q.pad.bed --sbed s.pad.bed Test and reconstruct candidate PADs.
[ "%prog", "pad", "blastfile", "cdtfile", "--", "qbed", "q", ".", "pad", ".", "bed", "--", "sbed", "s", ".", "pad", ".", "bed" ]
python
train
SuperCowPowers/workbench
workbench/workers/url.py
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/workers/url.py#L14-L19
def execute(self, input_data): ''' Execute the URL worker ''' string_output = input_data['strings']['string_list'] flatten = ' '.join(string_output) urls = self.url_match.findall(flatten) return {'url_list': urls}
[ "def", "execute", "(", "self", ",", "input_data", ")", ":", "string_output", "=", "input_data", "[", "'strings'", "]", "[", "'string_list'", "]", "flatten", "=", "' '", ".", "join", "(", "string_output", ")", "urls", "=", "self", ".", "url_match", ".", "findall", "(", "flatten", ")", "return", "{", "'url_list'", ":", "urls", "}" ]
Execute the URL worker
[ "Execute", "the", "URL", "worker" ]
python
train
log2timeline/plaso
plaso/output/shared_elastic.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/shared_elastic.py#L85-L103
def _CreateIndexIfNotExists(self, index_name, mappings): """Creates an Elasticsearch index if it does not exist. Args: index_name (str): mame of the index. mappings (dict[str, object]): mappings of the index. Raises: RuntimeError: if the Elasticsearch index cannot be created. """ try: if not self._client.indices.exists(index_name): self._client.indices.create( body={'mappings': mappings}, index=index_name) except elasticsearch.exceptions.ConnectionError as exception: raise RuntimeError( 'Unable to create Elasticsearch index with error: {0!s}'.format( exception))
[ "def", "_CreateIndexIfNotExists", "(", "self", ",", "index_name", ",", "mappings", ")", ":", "try", ":", "if", "not", "self", ".", "_client", ".", "indices", ".", "exists", "(", "index_name", ")", ":", "self", ".", "_client", ".", "indices", ".", "create", "(", "body", "=", "{", "'mappings'", ":", "mappings", "}", ",", "index", "=", "index_name", ")", "except", "elasticsearch", ".", "exceptions", ".", "ConnectionError", "as", "exception", ":", "raise", "RuntimeError", "(", "'Unable to create Elasticsearch index with error: {0!s}'", ".", "format", "(", "exception", ")", ")" ]
Creates an Elasticsearch index if it does not exist. Args: index_name (str): mame of the index. mappings (dict[str, object]): mappings of the index. Raises: RuntimeError: if the Elasticsearch index cannot be created.
[ "Creates", "an", "Elasticsearch", "index", "if", "it", "does", "not", "exist", "." ]
python
train
confluentinc/confluent-kafka-python
tools/download-s3.py
https://github.com/confluentinc/confluent-kafka-python/blob/5a8aeb741609e61eaccafff2a67fa494dd549e8b/tools/download-s3.py#L46-L54
def download(self, dirpath): """ Download artifact from S3 and store in dirpath directory. If the artifact is already downloaded nothing is done. """ if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0: return print('Downloading %s -> %s' % (self.path, self.lpath)) if dry_run: return self.arts.s3_bucket.download_file(self.path, self.lpath)
[ "def", "download", "(", "self", ",", "dirpath", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "lpath", ")", "and", "os", ".", "path", ".", "getsize", "(", "self", ".", "lpath", ")", ">", "0", ":", "return", "print", "(", "'Downloading %s -> %s'", "%", "(", "self", ".", "path", ",", "self", ".", "lpath", ")", ")", "if", "dry_run", ":", "return", "self", ".", "arts", ".", "s3_bucket", ".", "download_file", "(", "self", ".", "path", ",", "self", ".", "lpath", ")" ]
Download artifact from S3 and store in dirpath directory. If the artifact is already downloaded nothing is done.
[ "Download", "artifact", "from", "S3", "and", "store", "in", "dirpath", "directory", ".", "If", "the", "artifact", "is", "already", "downloaded", "nothing", "is", "done", "." ]
python
train
mikicz/arca
arca/backend/docker.py
https://github.com/mikicz/arca/blob/e67fdc00be473ecf8ec16d024e1a3f2c47ca882c/arca/backend/docker.py#L181-L249
def get_image_tag(self, requirements_option: RequirementsOptions, requirements_hash: Optional[str], dependencies: Optional[List[str]]) -> str: """ Returns the tag for images with the dependencies and requirements installed. 64-byte hexadecimal strings cannot be used as docker tags, so the prefixes are necessary. Double hashing the dependencies and requirements hash to make the final tag shorter. Prefixes: * Image type: * i – Inherited image * a – Arca base image * Requirements: * r – Does have some kind of requirements * s – Doesn't have requirements * Dependencies: * d – Does have dependencies * e – Doesn't have dependencies Possible outputs: * Inherited images: * `ise` – no requirements * `ide_<hash(requirements)>` – with requirements * From Arca base image: * `<Arca version>_<Python version>_ase` – no requirements and no dependencies * `<Arca version>_<Python version>_asd_<hash(dependencies)>` – only dependencies * `<Arca version>_<Python version>_are_<hash(requirements)>` – only requirements * `<Arca version>_<Python version>_ard_<hash(hash(dependencies) + hash(requirements))>` – both requirements and dependencies """ prefix = "" if self.inherit_image is None: prefix = "{}_{}_".format(arca.__version__, self.get_python_version()) prefix += "i" if self.inherit_image is not None else "a" prefix += "r" if requirements_option != RequirementsOptions.no_requirements else "s" prefix += "d" if dependencies is not None else "e" if self.inherit_image is not None: if requirements_hash: return prefix + "_" + requirements_hash return prefix if dependencies is None: dependencies_hash = "" else: dependencies_hash = self.get_dependencies_hash(dependencies) if requirements_hash and dependencies_hash: return prefix + "_" + hashlib.sha256(bytes(requirements_hash + dependencies_hash, "utf-8")).hexdigest() elif requirements_hash: return f"{prefix}_{requirements_hash}" elif dependencies_hash: return f"{prefix}_{dependencies_hash}" else: return prefix
[ "def", "get_image_tag", "(", "self", ",", "requirements_option", ":", "RequirementsOptions", ",", "requirements_hash", ":", "Optional", "[", "str", "]", ",", "dependencies", ":", "Optional", "[", "List", "[", "str", "]", "]", ")", "->", "str", ":", "prefix", "=", "\"\"", "if", "self", ".", "inherit_image", "is", "None", ":", "prefix", "=", "\"{}_{}_\"", ".", "format", "(", "arca", ".", "__version__", ",", "self", ".", "get_python_version", "(", ")", ")", "prefix", "+=", "\"i\"", "if", "self", ".", "inherit_image", "is", "not", "None", "else", "\"a\"", "prefix", "+=", "\"r\"", "if", "requirements_option", "!=", "RequirementsOptions", ".", "no_requirements", "else", "\"s\"", "prefix", "+=", "\"d\"", "if", "dependencies", "is", "not", "None", "else", "\"e\"", "if", "self", ".", "inherit_image", "is", "not", "None", ":", "if", "requirements_hash", ":", "return", "prefix", "+", "\"_\"", "+", "requirements_hash", "return", "prefix", "if", "dependencies", "is", "None", ":", "dependencies_hash", "=", "\"\"", "else", ":", "dependencies_hash", "=", "self", ".", "get_dependencies_hash", "(", "dependencies", ")", "if", "requirements_hash", "and", "dependencies_hash", ":", "return", "prefix", "+", "\"_\"", "+", "hashlib", ".", "sha256", "(", "bytes", "(", "requirements_hash", "+", "dependencies_hash", ",", "\"utf-8\"", ")", ")", ".", "hexdigest", "(", ")", "elif", "requirements_hash", ":", "return", "f\"{prefix}_{requirements_hash}\"", "elif", "dependencies_hash", ":", "return", "f\"{prefix}_{dependencies_hash}\"", "else", ":", "return", "prefix" ]
Returns the tag for images with the dependencies and requirements installed. 64-byte hexadecimal strings cannot be used as docker tags, so the prefixes are necessary. Double hashing the dependencies and requirements hash to make the final tag shorter. Prefixes: * Image type: * i – Inherited image * a – Arca base image * Requirements: * r – Does have some kind of requirements * s – Doesn't have requirements * Dependencies: * d – Does have dependencies * e – Doesn't have dependencies Possible outputs: * Inherited images: * `ise` – no requirements * `ide_<hash(requirements)>` – with requirements * From Arca base image: * `<Arca version>_<Python version>_ase` – no requirements and no dependencies * `<Arca version>_<Python version>_asd_<hash(dependencies)>` – only dependencies * `<Arca version>_<Python version>_are_<hash(requirements)>` – only requirements * `<Arca version>_<Python version>_ard_<hash(hash(dependencies) + hash(requirements))>` – both requirements and dependencies
[ "Returns", "the", "tag", "for", "images", "with", "the", "dependencies", "and", "requirements", "installed", "." ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L1844-L1868
def close(self): """close(self)""" if self.isClosed: raise ValueError("operation illegal for closed doc") if hasattr(self, '_outline') and self._outline: self._dropOutline(self._outline) self._outline = None self._reset_page_refs() self.metadata = None self.stream = None self.isClosed = True self.openErrCode = 0 self.openErrMsg = '' self.FontInfos = [] for gmap in self.Graftmaps: self.Graftmaps[gmap] = None self.Graftmaps = {} self.ShownPages = {} val = _fitz.Document_close(self) self.thisown = False return val
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "isClosed", ":", "raise", "ValueError", "(", "\"operation illegal for closed doc\"", ")", "if", "hasattr", "(", "self", ",", "'_outline'", ")", "and", "self", ".", "_outline", ":", "self", ".", "_dropOutline", "(", "self", ".", "_outline", ")", "self", ".", "_outline", "=", "None", "self", ".", "_reset_page_refs", "(", ")", "self", ".", "metadata", "=", "None", "self", ".", "stream", "=", "None", "self", ".", "isClosed", "=", "True", "self", ".", "openErrCode", "=", "0", "self", ".", "openErrMsg", "=", "''", "self", ".", "FontInfos", "=", "[", "]", "for", "gmap", "in", "self", ".", "Graftmaps", ":", "self", ".", "Graftmaps", "[", "gmap", "]", "=", "None", "self", ".", "Graftmaps", "=", "{", "}", "self", ".", "ShownPages", "=", "{", "}", "val", "=", "_fitz", ".", "Document_close", "(", "self", ")", "self", ".", "thisown", "=", "False", "return", "val" ]
close(self)
[ "close", "(", "self", ")" ]
python
train
jumpscale7/python-consistent-toml
contoml/__init__.py
https://github.com/jumpscale7/python-consistent-toml/blob/a0149c65313ccb8170aa99a0cc498e76231292b9/contoml/__init__.py#L26-L38
def dumps(value): """ Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module. """ from contoml.file.file import TOMLFile if not isinstance(value, TOMLFile): raise RuntimeError("Can only dump a TOMLFile instance loaded by load() or loads()") return value.dumps()
[ "def", "dumps", "(", "value", ")", ":", "from", "contoml", ".", "file", ".", "file", "import", "TOMLFile", "if", "not", "isinstance", "(", "value", ",", "TOMLFile", ")", ":", "raise", "RuntimeError", "(", "\"Can only dump a TOMLFile instance loaded by load() or loads()\"", ")", "return", "value", ".", "dumps", "(", ")" ]
Dumps a data structure to TOML source code. The given value must be either a dict of dict values, a dict, or a TOML file constructed by this module.
[ "Dumps", "a", "data", "structure", "to", "TOML", "source", "code", "." ]
python
train
xapple/fasta
fasta/__init__.py
https://github.com/xapple/fasta/blob/a827c3138812d555203be45187ffae1277dd0d76/fasta/__init__.py#L356-L364
def graphs(self): """Sorry for the black magic. The result is an object whose attributes are all the graphs found in graphs.py initialized with this instance as only argument.""" result = Dummy() for graph in graphs.__all__: cls = getattr(graphs, graph) setattr(result, cls.short_name, cls(self)) return result
[ "def", "graphs", "(", "self", ")", ":", "result", "=", "Dummy", "(", ")", "for", "graph", "in", "graphs", ".", "__all__", ":", "cls", "=", "getattr", "(", "graphs", ",", "graph", ")", "setattr", "(", "result", ",", "cls", ".", "short_name", ",", "cls", "(", "self", ")", ")", "return", "result" ]
Sorry for the black magic. The result is an object whose attributes are all the graphs found in graphs.py initialized with this instance as only argument.
[ "Sorry", "for", "the", "black", "magic", ".", "The", "result", "is", "an", "object", "whose", "attributes", "are", "all", "the", "graphs", "found", "in", "graphs", ".", "py", "initialized", "with", "this", "instance", "as", "only", "argument", "." ]
python
train
townsenddw/jhubctl
jhubctl/clusters/providers/aws/aws.py
https://github.com/townsenddw/jhubctl/blob/c8c20f86a16e9d01dd90e4607d81423417cc773b/jhubctl/clusters/providers/aws/aws.py#L328-L341
def create_vpc(self): """Create a virtual private cloud on Amazon's Web services configured for deploying JupyterHubs. """ self.create_stack( self.vpc_name, 'amazon-eks-vpc.yaml', parameters=define_parameters( VpcBlock="10.42.0.0/16", Subnet01Block="10.42.1.0/24", Subnet02Block="10.42.2.0/24", Subnet03Block="10.42.3.0/24" ) )
[ "def", "create_vpc", "(", "self", ")", ":", "self", ".", "create_stack", "(", "self", ".", "vpc_name", ",", "'amazon-eks-vpc.yaml'", ",", "parameters", "=", "define_parameters", "(", "VpcBlock", "=", "\"10.42.0.0/16\"", ",", "Subnet01Block", "=", "\"10.42.1.0/24\"", ",", "Subnet02Block", "=", "\"10.42.2.0/24\"", ",", "Subnet03Block", "=", "\"10.42.3.0/24\"", ")", ")" ]
Create a virtual private cloud on Amazon's Web services configured for deploying JupyterHubs.
[ "Create", "a", "virtual", "private", "cloud", "on", "Amazon", "s", "Web", "services", "configured", "for", "deploying", "JupyterHubs", "." ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/wallet/search.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/wallet/search.py#L164-L175
async def close(self) -> None: """ Close search. """ LOGGER.debug('StorageRecordSearch.close >>>') if self._handle: await non_secrets.close_wallet_search(self.handle) self._handle = None LOGGER.debug('StorageRecordSearch.close <<<')
[ "async", "def", "close", "(", "self", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'StorageRecordSearch.close >>>'", ")", "if", "self", ".", "_handle", ":", "await", "non_secrets", ".", "close_wallet_search", "(", "self", ".", "handle", ")", "self", ".", "_handle", "=", "None", "LOGGER", ".", "debug", "(", "'StorageRecordSearch.close <<<'", ")" ]
Close search.
[ "Close", "search", "." ]
python
train
LonamiWebs/Telethon
telethon_examples/gui.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/gui.py#L182-L209
async def sign_in(self, event=None): """ Note the `event` argument. This is required since this callback may be called from a ``widget.bind`` (such as ``'<Return>'``), which sends information about the event we don't care about. This callback logs out if authorized, signs in if a code was sent or a bot token is input, or sends the code otherwise. """ self.sign_in_label.configure(text='Working...') self.sign_in_entry.configure(state=tkinter.DISABLED) if await self.cl.is_user_authorized(): await self.cl.log_out() self.destroy() return value = self.sign_in_entry.get().strip() if self.code: self.set_signed_in(await self.cl.sign_in(code=value)) elif ':' in value: self.set_signed_in(await self.cl.sign_in(bot_token=value)) else: self.code = await self.cl.send_code_request(value) self.sign_in_label.configure(text='Code:') self.sign_in_entry.configure(state=tkinter.NORMAL) self.sign_in_entry.delete(0, tkinter.END) self.sign_in_entry.focus() return
[ "async", "def", "sign_in", "(", "self", ",", "event", "=", "None", ")", ":", "self", ".", "sign_in_label", ".", "configure", "(", "text", "=", "'Working...'", ")", "self", ".", "sign_in_entry", ".", "configure", "(", "state", "=", "tkinter", ".", "DISABLED", ")", "if", "await", "self", ".", "cl", ".", "is_user_authorized", "(", ")", ":", "await", "self", ".", "cl", ".", "log_out", "(", ")", "self", ".", "destroy", "(", ")", "return", "value", "=", "self", ".", "sign_in_entry", ".", "get", "(", ")", ".", "strip", "(", ")", "if", "self", ".", "code", ":", "self", ".", "set_signed_in", "(", "await", "self", ".", "cl", ".", "sign_in", "(", "code", "=", "value", ")", ")", "elif", "':'", "in", "value", ":", "self", ".", "set_signed_in", "(", "await", "self", ".", "cl", ".", "sign_in", "(", "bot_token", "=", "value", ")", ")", "else", ":", "self", ".", "code", "=", "await", "self", ".", "cl", ".", "send_code_request", "(", "value", ")", "self", ".", "sign_in_label", ".", "configure", "(", "text", "=", "'Code:'", ")", "self", ".", "sign_in_entry", ".", "configure", "(", "state", "=", "tkinter", ".", "NORMAL", ")", "self", ".", "sign_in_entry", ".", "delete", "(", "0", ",", "tkinter", ".", "END", ")", "self", ".", "sign_in_entry", ".", "focus", "(", ")", "return" ]
Note the `event` argument. This is required since this callback may be called from a ``widget.bind`` (such as ``'<Return>'``), which sends information about the event we don't care about. This callback logs out if authorized, signs in if a code was sent or a bot token is input, or sends the code otherwise.
[ "Note", "the", "event", "argument", ".", "This", "is", "required", "since", "this", "callback", "may", "be", "called", "from", "a", "widget", ".", "bind", "(", "such", "as", "<Return", ">", ")", "which", "sends", "information", "about", "the", "event", "we", "don", "t", "care", "about", "." ]
python
train
RedisJSON/rejson-py
rejson/client.py
https://github.com/RedisJSON/rejson-py/blob/55f0adf3adc40f5a769e28e541dbbf5377b90ec6/rejson/client.py#L89-L98
def setDecoder(self, decoder): """ Sets the client's decoder ``decoder`` should be an instance of a ``json.JSONDecoder`` class """ if not decoder: self._decoder = json.JSONDecoder() else: self._decoder = decoder self._decode = self._decoder.decode
[ "def", "setDecoder", "(", "self", ",", "decoder", ")", ":", "if", "not", "decoder", ":", "self", ".", "_decoder", "=", "json", ".", "JSONDecoder", "(", ")", "else", ":", "self", ".", "_decoder", "=", "decoder", "self", ".", "_decode", "=", "self", ".", "_decoder", ".", "decode" ]
Sets the client's decoder ``decoder`` should be an instance of a ``json.JSONDecoder`` class
[ "Sets", "the", "client", "s", "decoder", "decoder", "should", "be", "an", "instance", "of", "a", "json", ".", "JSONDecoder", "class" ]
python
train
tmontaigu/pylas
pylas/point/format.py
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/format.py#L102-L105
def num_extra_bytes(self): """ Returns the number of extra bytes """ return sum(np.dtype(extra_dim[1]).itemsize for extra_dim in self.extra_dims)
[ "def", "num_extra_bytes", "(", "self", ")", ":", "return", "sum", "(", "np", ".", "dtype", "(", "extra_dim", "[", "1", "]", ")", ".", "itemsize", "for", "extra_dim", "in", "self", ".", "extra_dims", ")" ]
Returns the number of extra bytes
[ "Returns", "the", "number", "of", "extra", "bytes" ]
python
test
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L2563-L2584
def connect_options_namespaced_service_proxy(self, name, namespace, **kwargs): """ connect OPTIONS requests to proxy of Service This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_options_namespaced_service_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_options_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) else: (data) = self.connect_options_namespaced_service_proxy_with_http_info(name, namespace, **kwargs) return data
[ "def", "connect_options_namespaced_service_proxy", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "connect_options_namespaced_service_proxy_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "connect_options_namespaced_service_proxy_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
connect OPTIONS requests to proxy of Service This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_options_namespaced_service_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ServiceProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy. :return: str If the method is called asynchronously, returns the request thread.
[ "connect", "OPTIONS", "requests", "to", "proxy", "of", "Service", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "connect_options_namespaced_service_proxy", "(", "name", "namespace", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L134-L174
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "loaded_dates", "=", "[", "]", "loaded_indices", "=", "[", "]", "for", "t", ",", "timestamp", "in", "enumerate", "(", "self", ".", "all_dates", ")", ":", "date_str", "=", "timestamp", ".", "date", "(", ")", ".", "strftime", "(", "\"%Y%m%d\"", ")", "full_path", "=", "self", ".", "path_start", "+", "date_str", "+", "\"/\"", "if", "self", ".", "variable", "in", "os", ".", "listdir", "(", "full_path", ")", ":", "full_path", "+=", "self", ".", "variable", "+", "\"/\"", "data_files", "=", "sorted", "(", "os", ".", "listdir", "(", "full_path", ")", ")", "file_dates", "=", "pd", ".", "to_datetime", "(", "[", "d", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", "0", ":", "13", "]", "for", "d", "in", "data_files", "]", ")", "if", "timestamp", "in", "file_dates", ":", "data_file", "=", "data_files", "[", "np", ".", "where", "(", "timestamp", "==", "file_dates", ")", "[", "0", "]", "[", "0", "]", "]", "print", "(", "full_path", "+", "data_file", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gunzip\"", ",", "full_path", "+", "data_file", "]", ")", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", "[", ":", "-", "3", "]", ")", "else", ":", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", ")", "var_name", "=", "sorted", "(", "file_obj", ".", "variables", ".", "keys", "(", ")", ")", "[", "0", "]", "data", ".", "append", "(", "file_obj", ".", "variables", "[", "var_name", "]", "[", ":", "]", ")", "if", "self", ".", "lon", "is", "None", ":", "self", ".", "lon", "=", "file_obj", ".", "variables", "[", "\"lon_0\"", "]", "[", ":", "]", "# Translates longitude values from 0:360 to -180:180", "if", "np", ".", "count_nonzero", "(", "self", ".", "lon", ">", "180", ")", ">", "0", ":", "self", ".", "lon", "-=", "360", "self", ".", "lat", "=", "file_obj", ".", "variables", "[", "\"lat_0\"", "]", "[", ":", "]", "file_obj", ".", "close", "(", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "[", ":", "-", "3", "]", "]", ")", "else", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "]", ")", "loaded_dates", ".", "append", "(", "timestamp", ")", "loaded_indices", ".", "append", "(", "t", ")", "if", "len", "(", "loaded_dates", ")", ">", "0", ":", "self", ".", "loaded_dates", "=", "pd", ".", "DatetimeIndex", "(", "loaded_dates", ")", "self", ".", "data", "=", "np", ".", "ones", "(", "(", "self", ".", "all_dates", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "1", "]", ")", ")", "*", "-", "9999", "self", ".", "data", "[", "loaded_indices", "]", "=", "np", ".", "array", "(", "data", ")" ]
Loads data from MRMS GRIB2 files and handles compression duties if files are compressed.
[ "Loads", "data", "from", "MRMS", "GRIB2", "files", "and", "handles", "compression", "duties", "if", "files", "are", "compressed", "." ]
python
train
commandprompt/Simpycity
simpycity/config.py
https://github.com/commandprompt/Simpycity/blob/a2aa90c31f5ae89f19efcb52025e8088d2205337/simpycity/config.py#L19-L28
def dsn(): """ Return a libpq connection string using the variables defined in this file. """ configs = {'host': host, 'port': port, 'dbname': database, 'user': user, 'password': password} return ' '.join(['{0}={1}'.format(_[0], _[1]) for _ in configs.items() if _[1] is not None and _[1] != ''])
[ "def", "dsn", "(", ")", ":", "configs", "=", "{", "'host'", ":", "host", ",", "'port'", ":", "port", ",", "'dbname'", ":", "database", ",", "'user'", ":", "user", ",", "'password'", ":", "password", "}", "return", "' '", ".", "join", "(", "[", "'{0}={1}'", ".", "format", "(", "_", "[", "0", "]", ",", "_", "[", "1", "]", ")", "for", "_", "in", "configs", ".", "items", "(", ")", "if", "_", "[", "1", "]", "is", "not", "None", "and", "_", "[", "1", "]", "!=", "''", "]", ")" ]
Return a libpq connection string using the variables defined in this file.
[ "Return", "a", "libpq", "connection", "string", "using", "the", "variables", "defined", "in", "this", "file", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/msm/core.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/msm/core.py#L140-L153
def _parse_ergodic_cutoff(self): """Get a numeric value from the ergodic_cutoff input, which can be 'on' or 'off'. """ ec_is_str = isinstance(self.ergodic_cutoff, str) if ec_is_str and self.ergodic_cutoff.lower() == 'on': if self.sliding_window: return 1.0 / self.lag_time else: return 1.0 elif ec_is_str and self.ergodic_cutoff.lower() == 'off': return 0.0 else: return self.ergodic_cutoff
[ "def", "_parse_ergodic_cutoff", "(", "self", ")", ":", "ec_is_str", "=", "isinstance", "(", "self", ".", "ergodic_cutoff", ",", "str", ")", "if", "ec_is_str", "and", "self", ".", "ergodic_cutoff", ".", "lower", "(", ")", "==", "'on'", ":", "if", "self", ".", "sliding_window", ":", "return", "1.0", "/", "self", ".", "lag_time", "else", ":", "return", "1.0", "elif", "ec_is_str", "and", "self", ".", "ergodic_cutoff", ".", "lower", "(", ")", "==", "'off'", ":", "return", "0.0", "else", ":", "return", "self", ".", "ergodic_cutoff" ]
Get a numeric value from the ergodic_cutoff input, which can be 'on' or 'off'.
[ "Get", "a", "numeric", "value", "from", "the", "ergodic_cutoff", "input", "which", "can", "be", "on", "or", "off", "." ]
python
train
peo3/cgroup-utils
cgutils/cgroup.py
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L917-L935
def scan_cgroups(subsys_name, filters=list()): """ It returns a control group hierarchy which belong to the subsys_name. When collecting cgroups, filters are applied to the cgroups. See pydoc of apply_filters method of CGroup for more information about the filters. """ status = SubsystemStatus() if subsys_name not in status.get_all(): raise NoSuchSubsystemError("No such subsystem found: " + subsys_name) if subsys_name not in status.get_available(): raise EnvironmentError("Disabled in the kernel: " + subsys_name) if subsys_name not in status.get_enabled(): raise EnvironmentError("Not enabled in the system: " + subsys_name) subsystem = _get_subsystem(subsys_name) mount_point = status.get_path(subsys_name) return _scan_cgroups_recursive(subsystem, mount_point, mount_point, filters)
[ "def", "scan_cgroups", "(", "subsys_name", ",", "filters", "=", "list", "(", ")", ")", ":", "status", "=", "SubsystemStatus", "(", ")", "if", "subsys_name", "not", "in", "status", ".", "get_all", "(", ")", ":", "raise", "NoSuchSubsystemError", "(", "\"No such subsystem found: \"", "+", "subsys_name", ")", "if", "subsys_name", "not", "in", "status", ".", "get_available", "(", ")", ":", "raise", "EnvironmentError", "(", "\"Disabled in the kernel: \"", "+", "subsys_name", ")", "if", "subsys_name", "not", "in", "status", ".", "get_enabled", "(", ")", ":", "raise", "EnvironmentError", "(", "\"Not enabled in the system: \"", "+", "subsys_name", ")", "subsystem", "=", "_get_subsystem", "(", "subsys_name", ")", "mount_point", "=", "status", ".", "get_path", "(", "subsys_name", ")", "return", "_scan_cgroups_recursive", "(", "subsystem", ",", "mount_point", ",", "mount_point", ",", "filters", ")" ]
It returns a control group hierarchy which belong to the subsys_name. When collecting cgroups, filters are applied to the cgroups. See pydoc of apply_filters method of CGroup for more information about the filters.
[ "It", "returns", "a", "control", "group", "hierarchy", "which", "belong", "to", "the", "subsys_name", ".", "When", "collecting", "cgroups", "filters", "are", "applied", "to", "the", "cgroups", ".", "See", "pydoc", "of", "apply_filters", "method", "of", "CGroup", "for", "more", "information", "about", "the", "filters", "." ]
python
train
nicolargo/glances
glances/plugins/glances_plugin.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_plugin.py#L162-L167
def reset_stats_history(self): """Reset the stats history (dict of GlancesAttribute).""" if self.history_enable(): reset_list = [a['name'] for a in self.get_items_history_list()] logger.debug("Reset history for plugin {} (items: {})".format(self.plugin_name, reset_list)) self.stats_history.reset()
[ "def", "reset_stats_history", "(", "self", ")", ":", "if", "self", ".", "history_enable", "(", ")", ":", "reset_list", "=", "[", "a", "[", "'name'", "]", "for", "a", "in", "self", ".", "get_items_history_list", "(", ")", "]", "logger", ".", "debug", "(", "\"Reset history for plugin {} (items: {})\"", ".", "format", "(", "self", ".", "plugin_name", ",", "reset_list", ")", ")", "self", ".", "stats_history", ".", "reset", "(", ")" ]
Reset the stats history (dict of GlancesAttribute).
[ "Reset", "the", "stats", "history", "(", "dict", "of", "GlancesAttribute", ")", "." ]
python
train
buriburisuri/sugartensor
sugartensor/sg_transform.py
https://github.com/buriburisuri/sugartensor/blob/d2c039954777c7fbe3eb0c2ae40c45c9854deb40/sugartensor/sg_transform.py#L218-L237
def sg_concat(tensor, opt): r"""Concatenates tensors along a axis. See `tf.concat()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: target: A `Tensor`. Must have the same rank as `tensor`, and all dimensions except `opt.dim` must be equal. axis : Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.target is not None, 'target is mandatory.' opt += tf.sg_opt(axis=tensor.get_shape().ndims-1) target = opt.target if isinstance(opt.target, (tuple, list)) else [opt.target] return tf.concat([tensor] + target, opt.axis, name=opt.name)
[ "def", "sg_concat", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "target", "is", "not", "None", ",", "'target is mandatory.'", "opt", "+=", "tf", ".", "sg_opt", "(", "axis", "=", "tensor", ".", "get_shape", "(", ")", ".", "ndims", "-", "1", ")", "target", "=", "opt", ".", "target", "if", "isinstance", "(", "opt", ".", "target", ",", "(", "tuple", ",", "list", ")", ")", "else", "[", "opt", ".", "target", "]", "return", "tf", ".", "concat", "(", "[", "tensor", "]", "+", "target", ",", "opt", ".", "axis", ",", "name", "=", "opt", ".", "name", ")" ]
r"""Concatenates tensors along a axis. See `tf.concat()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: target: A `Tensor`. Must have the same rank as `tensor`, and all dimensions except `opt.dim` must be equal. axis : Target axis. Default is the last one. name: If provided, replace current tensor's name. Returns: A `Tensor`.
[ "r", "Concatenates", "tensors", "along", "a", "axis", "." ]
python
train
Telefonica/toolium
toolium/driver_wrappers_pool.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/driver_wrappers_pool.py#L270-L299
def configure_common_directories(cls, tc_config_files): """Configure common config and output folders for all tests :param tc_config_files: test case specific config files """ if cls.config_directory is None: # Get config directory from properties config_directory = cls.get_configured_value('Config_directory', tc_config_files.config_directory, 'conf') prop_filenames = cls.get_configured_value('Config_prop_filenames', tc_config_files.config_properties_filenames, 'properties.cfg') cls.config_directory = cls._find_parent_directory(config_directory, prop_filenames.split(';')[0]) # Get output directory from properties and create it cls.output_directory = cls.get_configured_value('Output_directory', tc_config_files.output_directory, 'output') if not os.path.isabs(cls.output_directory): # If output directory is relative, we use the same path as config directory cls.output_directory = os.path.join(os.path.dirname(cls.config_directory), cls.output_directory) if not os.path.exists(cls.output_directory): os.makedirs(cls.output_directory) # Get visual baseline directory from properties default_baseline = os.path.join(cls.output_directory, 'visualtests', 'baseline') cls.visual_baseline_directory = cls.get_configured_value('Visual_baseline_directory', tc_config_files.visual_baseline_directory, default_baseline) if not os.path.isabs(cls.visual_baseline_directory): # If baseline directory is relative, we use the same path as config directory cls.visual_baseline_directory = os.path.join(os.path.dirname(cls.config_directory), cls.visual_baseline_directory)
[ "def", "configure_common_directories", "(", "cls", ",", "tc_config_files", ")", ":", "if", "cls", ".", "config_directory", "is", "None", ":", "# Get config directory from properties", "config_directory", "=", "cls", ".", "get_configured_value", "(", "'Config_directory'", ",", "tc_config_files", ".", "config_directory", ",", "'conf'", ")", "prop_filenames", "=", "cls", ".", "get_configured_value", "(", "'Config_prop_filenames'", ",", "tc_config_files", ".", "config_properties_filenames", ",", "'properties.cfg'", ")", "cls", ".", "config_directory", "=", "cls", ".", "_find_parent_directory", "(", "config_directory", ",", "prop_filenames", ".", "split", "(", "';'", ")", "[", "0", "]", ")", "# Get output directory from properties and create it", "cls", ".", "output_directory", "=", "cls", ".", "get_configured_value", "(", "'Output_directory'", ",", "tc_config_files", ".", "output_directory", ",", "'output'", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "cls", ".", "output_directory", ")", ":", "# If output directory is relative, we use the same path as config directory", "cls", ".", "output_directory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "cls", ".", "config_directory", ")", ",", "cls", ".", "output_directory", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "cls", ".", "output_directory", ")", ":", "os", ".", "makedirs", "(", "cls", ".", "output_directory", ")", "# Get visual baseline directory from properties", "default_baseline", "=", "os", ".", "path", ".", "join", "(", "cls", ".", "output_directory", ",", "'visualtests'", ",", "'baseline'", ")", "cls", ".", "visual_baseline_directory", "=", "cls", ".", "get_configured_value", "(", "'Visual_baseline_directory'", ",", "tc_config_files", ".", "visual_baseline_directory", ",", "default_baseline", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "cls", ".", "visual_baseline_directory", ")", ":", "# If baseline directory is relative, we use the same path as config directory", "cls", ".", "visual_baseline_directory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "cls", ".", "config_directory", ")", ",", "cls", ".", "visual_baseline_directory", ")" ]
Configure common config and output folders for all tests :param tc_config_files: test case specific config files
[ "Configure", "common", "config", "and", "output", "folders", "for", "all", "tests" ]
python
train
twisted/epsilon
epsilon/ampauth.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/ampauth.py#L182-L188
def passwordLogin(self, username): """ Generate a new challenge for the given username. """ self.challenge = secureRandom(16) self.username = username return {'challenge': self.challenge}
[ "def", "passwordLogin", "(", "self", ",", "username", ")", ":", "self", ".", "challenge", "=", "secureRandom", "(", "16", ")", "self", ".", "username", "=", "username", "return", "{", "'challenge'", ":", "self", ".", "challenge", "}" ]
Generate a new challenge for the given username.
[ "Generate", "a", "new", "challenge", "for", "the", "given", "username", "." ]
python
train
beregond/super_state_machine
super_state_machine/machines.py
https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L103-L122
def _check_state_value(cls): """Check initial state value - if is proper and translate it. Initial state is required. """ state_value = cls.context.get_config('initial_state', None) state_value = state_value or getattr( cls.context.new_class, cls.context.state_name, None ) if not state_value: raise ValueError( "Empty state is disallowed, yet no initial state is given!" ) state_value = ( cls.context .new_meta['translator'] .translate(state_value) ) cls.context.state_value = state_value
[ "def", "_check_state_value", "(", "cls", ")", ":", "state_value", "=", "cls", ".", "context", ".", "get_config", "(", "'initial_state'", ",", "None", ")", "state_value", "=", "state_value", "or", "getattr", "(", "cls", ".", "context", ".", "new_class", ",", "cls", ".", "context", ".", "state_name", ",", "None", ")", "if", "not", "state_value", ":", "raise", "ValueError", "(", "\"Empty state is disallowed, yet no initial state is given!\"", ")", "state_value", "=", "(", "cls", ".", "context", ".", "new_meta", "[", "'translator'", "]", ".", "translate", "(", "state_value", ")", ")", "cls", ".", "context", ".", "state_value", "=", "state_value" ]
Check initial state value - if is proper and translate it. Initial state is required.
[ "Check", "initial", "state", "value", "-", "if", "is", "proper", "and", "translate", "it", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/font/propertiesframe.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/font/propertiesframe.py#L72-L75
def _on_click(self): """Handles clicks and calls callback.""" if callable(self.__callback): self.__callback((self.bold, self.italic, self.underline, self.overstrike))
[ "def", "_on_click", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "__callback", ")", ":", "self", ".", "__callback", "(", "(", "self", ".", "bold", ",", "self", ".", "italic", ",", "self", ".", "underline", ",", "self", ".", "overstrike", ")", ")" ]
Handles clicks and calls callback.
[ "Handles", "clicks", "and", "calls", "callback", "." ]
python
train