repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
tgbugs/pyontutils
ilxutils/ilxutils/interlex_ingestion.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/interlex_ingestion.py#L86-L101
def fragment_search(self, fragement:str) -> List[dict]: ''' Returns the rows in InterLex associated with the fragment Note: Pressumed to have duplicate fragements in InterLex Args: fragment: The fragment_id of the curie pertaining to the ontology Returns: None or List[dict] ''' fragement = self.extract_fragment(fragement) ilx_rows = self.fragment2rows.get(fragement) if not ilx_rows: return None else: return ilx_rows
[ "def", "fragment_search", "(", "self", ",", "fragement", ":", "str", ")", "->", "List", "[", "dict", "]", ":", "fragement", "=", "self", ".", "extract_fragment", "(", "fragement", ")", "ilx_rows", "=", "self", ".", "fragment2rows", ".", "get", "(", "fragement", ")", "if", "not", "ilx_rows", ":", "return", "None", "else", ":", "return", "ilx_rows" ]
Returns the rows in InterLex associated with the fragment Note: Pressumed to have duplicate fragements in InterLex Args: fragment: The fragment_id of the curie pertaining to the ontology Returns: None or List[dict]
[ "Returns", "the", "rows", "in", "InterLex", "associated", "with", "the", "fragment" ]
python
train
tk0miya/tk.phpautodoc
src/phply/phpparse.py
https://github.com/tk0miya/tk.phpautodoc/blob/cf789f64abaf76351485cee231a075227e665fb6/src/phply/phpparse.py#L305-L311
def p_elseif_list(p): '''elseif_list : empty | elseif_list ELSEIF LPAREN expr RPAREN statement''' if len(p) == 2: p[0] = [] else: p[0] = p[1] + [ast.ElseIf(p[4], p[6], lineno=p.lineno(2))]
[ "def", "p_elseif_list", "(", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "p", "[", "0", "]", "=", "[", "]", "else", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "+", "[", "ast", ".", "ElseIf", "(", "p", "[", "4", "]", ",", "p", "[", "6", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "2", ")", ")", "]" ]
elseif_list : empty | elseif_list ELSEIF LPAREN expr RPAREN statement
[ "elseif_list", ":", "empty", "|", "elseif_list", "ELSEIF", "LPAREN", "expr", "RPAREN", "statement" ]
python
train
census-instrumentation/opencensus-python
contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/contrib/opencensus-ext-stackdriver/opencensus/ext/stackdriver/trace_exporter/__init__.py#L77-L115
def set_monitored_resource_attributes(span): """Set labels to span that can be used for tracing. :param span: Span object """ resource = monitored_resource.get_instance() if resource is not None: resource_type = resource.get_type() resource_labels = resource.get_labels() if resource_type == 'gke_container': resource_type = 'k8s_container' set_attribute_label(span, resource_type, resource_labels, 'project_id') set_attribute_label(span, resource_type, resource_labels, 'cluster_name') set_attribute_label(span, resource_type, resource_labels, 'container_name') set_attribute_label(span, resource_type, resource_labels, 'namespace_id', 'namespace_name') set_attribute_label(span, resource_type, resource_labels, 'pod_id', 'pod_name') set_attribute_label(span, resource_type, resource_labels, 'zone', 'location') elif resource_type == 'gce_instance': set_attribute_label(span, resource_type, resource_labels, 'project_id') set_attribute_label(span, resource_type, resource_labels, 'instance_id') set_attribute_label(span, resource_type, resource_labels, 'zone') elif resource_type == 'aws_ec2_instance': set_attribute_label(span, resource_type, resource_labels, 'aws_account') set_attribute_label(span, resource_type, resource_labels, 'instance_id') set_attribute_label(span, resource_type, resource_labels, 'region', label_value_prefix='aws:')
[ "def", "set_monitored_resource_attributes", "(", "span", ")", ":", "resource", "=", "monitored_resource", ".", "get_instance", "(", ")", "if", "resource", "is", "not", "None", ":", "resource_type", "=", "resource", ".", "get_type", "(", ")", "resource_labels", "=", "resource", ".", "get_labels", "(", ")", "if", "resource_type", "==", "'gke_container'", ":", "resource_type", "=", "'k8s_container'", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'project_id'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'cluster_name'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'container_name'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'namespace_id'", ",", "'namespace_name'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'pod_id'", ",", "'pod_name'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'zone'", ",", "'location'", ")", "elif", "resource_type", "==", "'gce_instance'", ":", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'project_id'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'instance_id'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'zone'", ")", "elif", "resource_type", "==", "'aws_ec2_instance'", ":", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'aws_account'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'instance_id'", ")", "set_attribute_label", "(", "span", ",", "resource_type", ",", "resource_labels", ",", "'region'", ",", "label_value_prefix", "=", "'aws:'", ")" ]
Set labels to span that can be used for tracing. :param span: Span object
[ "Set", "labels", "to", "span", "that", "can", "be", "used", "for", "tracing", ".", ":", "param", "span", ":", "Span", "object" ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L3940-L3952
def libvlc_log_get_context(ctx): '''Gets debugging information about a log message: the name of the VLC module emitting the message and the message location within the source code. The returned module name and file name will be NULL if unknown. The returned line number will similarly be zero if unknown. @param ctx: message context (as passed to the @ref libvlc_log_cb callback). @return: module module name storage (or NULL), file source code file name storage (or NULL), line source code file line number storage (or NULL). @version: LibVLC 2.1.0 or later. ''' f = _Cfunctions.get('libvlc_log_get_context', None) or \ _Cfunction('libvlc_log_get_context', ((1,), (2,), (2,), (2,),), None, None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint)) return f(ctx)
[ "def", "libvlc_log_get_context", "(", "ctx", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_log_get_context'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_log_get_context'", ",", "(", "(", "1", ",", ")", ",", "(", "2", ",", ")", ",", "(", "2", ",", ")", ",", "(", "2", ",", ")", ",", ")", ",", "None", ",", "None", ",", "Log_ptr", ",", "ListPOINTER", "(", "ctypes", ".", "c_char_p", ")", ",", "ListPOINTER", "(", "ctypes", ".", "c_char_p", ")", ",", "ctypes", ".", "POINTER", "(", "ctypes", ".", "c_uint", ")", ")", "return", "f", "(", "ctx", ")" ]
Gets debugging information about a log message: the name of the VLC module emitting the message and the message location within the source code. The returned module name and file name will be NULL if unknown. The returned line number will similarly be zero if unknown. @param ctx: message context (as passed to the @ref libvlc_log_cb callback). @return: module module name storage (or NULL), file source code file name storage (or NULL), line source code file line number storage (or NULL). @version: LibVLC 2.1.0 or later.
[ "Gets", "debugging", "information", "about", "a", "log", "message", ":", "the", "name", "of", "the", "VLC", "module", "emitting", "the", "message", "and", "the", "message", "location", "within", "the", "source", "code", ".", "The", "returned", "module", "name", "and", "file", "name", "will", "be", "NULL", "if", "unknown", ".", "The", "returned", "line", "number", "will", "similarly", "be", "zero", "if", "unknown", "." ]
python
train
marrow/uri
uri/uri.py
https://github.com/marrow/uri/blob/1d8220f11111920cd625a0a32ba6a354edead825/uri/uri.py#L256-L270
def resolve(self, uri=None, **parts): """Attempt to resolve a new URI given an updated URI, partial or complete.""" if uri: result = self.__class__(urljoin(str(self), str(uri))) else: result = self.__class__(self) for part, value in parts.items(): if part not in self.__all_parts__: raise TypeError("Unknown URI component: " + part) setattr(result, part, value) return result
[ "def", "resolve", "(", "self", ",", "uri", "=", "None", ",", "*", "*", "parts", ")", ":", "if", "uri", ":", "result", "=", "self", ".", "__class__", "(", "urljoin", "(", "str", "(", "self", ")", ",", "str", "(", "uri", ")", ")", ")", "else", ":", "result", "=", "self", ".", "__class__", "(", "self", ")", "for", "part", ",", "value", "in", "parts", ".", "items", "(", ")", ":", "if", "part", "not", "in", "self", ".", "__all_parts__", ":", "raise", "TypeError", "(", "\"Unknown URI component: \"", "+", "part", ")", "setattr", "(", "result", ",", "part", ",", "value", ")", "return", "result" ]
Attempt to resolve a new URI given an updated URI, partial or complete.
[ "Attempt", "to", "resolve", "a", "new", "URI", "given", "an", "updated", "URI", "partial", "or", "complete", "." ]
python
train
fabioz/PyDev.Debugger
_pydevd_frame_eval/pydevd_modify_bytecode.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_frame_eval/pydevd_modify_bytecode.py#L46-L79
def _modify_new_lines(code_to_modify, offset, code_to_insert): """ Update new lines: the bytecode inserted should be the last instruction of the previous line. :return: bytes sequence of code with updated lines offsets """ # There's a nice overview of co_lnotab in # https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt new_list = list(code_to_modify.co_lnotab) if not new_list: # Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to # tracing). return None # As all numbers are relative, what we want is to hide the code we inserted in the previous line # (it should be the last thing right before we increment the line so that we have a line event # right after the inserted code). bytecode_delta = len(code_to_insert) byte_increments = code_to_modify.co_lnotab[0::2] line_increments = code_to_modify.co_lnotab[1::2] if offset == 0: new_list[0] += bytecode_delta else: addr = 0 it = zip(byte_increments, line_increments) for i, (byte_incr, _line_incr) in enumerate(it): addr += byte_incr if addr == offset: new_list[i * 2] += bytecode_delta break return bytes(new_list)
[ "def", "_modify_new_lines", "(", "code_to_modify", ",", "offset", ",", "code_to_insert", ")", ":", "# There's a nice overview of co_lnotab in", "# https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt", "new_list", "=", "list", "(", "code_to_modify", ".", "co_lnotab", ")", "if", "not", "new_list", ":", "# Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to", "# tracing).", "return", "None", "# As all numbers are relative, what we want is to hide the code we inserted in the previous line", "# (it should be the last thing right before we increment the line so that we have a line event", "# right after the inserted code).", "bytecode_delta", "=", "len", "(", "code_to_insert", ")", "byte_increments", "=", "code_to_modify", ".", "co_lnotab", "[", "0", ":", ":", "2", "]", "line_increments", "=", "code_to_modify", ".", "co_lnotab", "[", "1", ":", ":", "2", "]", "if", "offset", "==", "0", ":", "new_list", "[", "0", "]", "+=", "bytecode_delta", "else", ":", "addr", "=", "0", "it", "=", "zip", "(", "byte_increments", ",", "line_increments", ")", "for", "i", ",", "(", "byte_incr", ",", "_line_incr", ")", "in", "enumerate", "(", "it", ")", ":", "addr", "+=", "byte_incr", "if", "addr", "==", "offset", ":", "new_list", "[", "i", "*", "2", "]", "+=", "bytecode_delta", "break", "return", "bytes", "(", "new_list", ")" ]
Update new lines: the bytecode inserted should be the last instruction of the previous line. :return: bytes sequence of code with updated lines offsets
[ "Update", "new", "lines", ":", "the", "bytecode", "inserted", "should", "be", "the", "last", "instruction", "of", "the", "previous", "line", ".", ":", "return", ":", "bytes", "sequence", "of", "code", "with", "updated", "lines", "offsets" ]
python
train
Jaymon/captain
captain/__init__.py
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/__init__.py#L229-L248
def run(self, raw_args): """parse and import the script, and then run the script's main function""" parser = self.parser args, kwargs = parser.parse_callback_args(raw_args) callback = kwargs.pop("main_callback") if parser.has_injected_quiet(): levels = kwargs.pop("quiet_inject", "") logging.inject_quiet(levels) try: ret_code = callback(*args, **kwargs) ret_code = int(ret_code) if ret_code else 0 except ArgError as e: # https://hg.python.org/cpython/file/2.7/Lib/argparse.py#l2374 echo.err("{}: error: {}", parser.prog, str(e)) ret_code = 2 return ret_code
[ "def", "run", "(", "self", ",", "raw_args", ")", ":", "parser", "=", "self", ".", "parser", "args", ",", "kwargs", "=", "parser", ".", "parse_callback_args", "(", "raw_args", ")", "callback", "=", "kwargs", ".", "pop", "(", "\"main_callback\"", ")", "if", "parser", ".", "has_injected_quiet", "(", ")", ":", "levels", "=", "kwargs", ".", "pop", "(", "\"quiet_inject\"", ",", "\"\"", ")", "logging", ".", "inject_quiet", "(", "levels", ")", "try", ":", "ret_code", "=", "callback", "(", "*", "args", ",", "*", "*", "kwargs", ")", "ret_code", "=", "int", "(", "ret_code", ")", "if", "ret_code", "else", "0", "except", "ArgError", "as", "e", ":", "# https://hg.python.org/cpython/file/2.7/Lib/argparse.py#l2374", "echo", ".", "err", "(", "\"{}: error: {}\"", ",", "parser", ".", "prog", ",", "str", "(", "e", ")", ")", "ret_code", "=", "2", "return", "ret_code" ]
parse and import the script, and then run the script's main function
[ "parse", "and", "import", "the", "script", "and", "then", "run", "the", "script", "s", "main", "function" ]
python
valid
ronaldguillen/wave
wave/views.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/views.py#L223-L229
def get_view_name(self): """ Return the view name, as used in OPTIONS responses and in the browsable API. """ func = self.settings.VIEW_NAME_FUNCTION return func(self.__class__, getattr(self, 'suffix', None))
[ "def", "get_view_name", "(", "self", ")", ":", "func", "=", "self", ".", "settings", ".", "VIEW_NAME_FUNCTION", "return", "func", "(", "self", ".", "__class__", ",", "getattr", "(", "self", ",", "'suffix'", ",", "None", ")", ")" ]
Return the view name, as used in OPTIONS responses and in the browsable API.
[ "Return", "the", "view", "name", "as", "used", "in", "OPTIONS", "responses", "and", "in", "the", "browsable", "API", "." ]
python
train
wummel/linkchecker
linkcheck/plugins/locationinfo.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/plugins/locationinfo.py#L43-L48
def check(self, url_data): """Try to ask GeoIP database for country info.""" location = get_location(url_data.host) if location: url_data.add_info(_("URL is located in %(location)s.") % {"location": _(location)})
[ "def", "check", "(", "self", ",", "url_data", ")", ":", "location", "=", "get_location", "(", "url_data", ".", "host", ")", "if", "location", ":", "url_data", ".", "add_info", "(", "_", "(", "\"URL is located in %(location)s.\"", ")", "%", "{", "\"location\"", ":", "_", "(", "location", ")", "}", ")" ]
Try to ask GeoIP database for country info.
[ "Try", "to", "ask", "GeoIP", "database", "for", "country", "info", "." ]
python
train
jaraco/path.py
path/__init__.py
https://github.com/jaraco/path.py/blob/bbe7d99e7a64a004f866ace9ec12bd9b296908f5/path/__init__.py#L860-L869
def _hash(self, hash_name): """ Returns a hash object for the file at the current path. `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``) that's available in the :mod:`hashlib` module. """ m = hashlib.new(hash_name) for chunk in self.chunks(8192, mode="rb"): m.update(chunk) return m
[ "def", "_hash", "(", "self", ",", "hash_name", ")", ":", "m", "=", "hashlib", ".", "new", "(", "hash_name", ")", "for", "chunk", "in", "self", ".", "chunks", "(", "8192", ",", "mode", "=", "\"rb\"", ")", ":", "m", ".", "update", "(", "chunk", ")", "return", "m" ]
Returns a hash object for the file at the current path. `hash_name` should be a hash algo name (such as ``'md5'`` or ``'sha1'``) that's available in the :mod:`hashlib` module.
[ "Returns", "a", "hash", "object", "for", "the", "file", "at", "the", "current", "path", "." ]
python
train
costastf/locationsharinglib
_CI/library/patch.py
https://github.com/costastf/locationsharinglib/blob/dcd74b0cdb59b951345df84987238763e50ef282/_CI/library/patch.py#L123-L134
def xisabs(filename): """ Cross-platform version of `os.path.isabs()` Returns True if `filename` is absolute on Linux, OS X or Windows. """ if filename.startswith(b'/'): # Linux/Unix return True elif filename.startswith(b'\\'): # Windows return True elif re.match(b'\\w:[\\\\/]', filename): # Windows return True return False
[ "def", "xisabs", "(", "filename", ")", ":", "if", "filename", ".", "startswith", "(", "b'/'", ")", ":", "# Linux/Unix", "return", "True", "elif", "filename", ".", "startswith", "(", "b'\\\\'", ")", ":", "# Windows", "return", "True", "elif", "re", ".", "match", "(", "b'\\\\w:[\\\\\\\\/]'", ",", "filename", ")", ":", "# Windows", "return", "True", "return", "False" ]
Cross-platform version of `os.path.isabs()` Returns True if `filename` is absolute on Linux, OS X or Windows.
[ "Cross", "-", "platform", "version", "of", "os", ".", "path", ".", "isabs", "()", "Returns", "True", "if", "filename", "is", "absolute", "on", "Linux", "OS", "X", "or", "Windows", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/tcp.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/tcp.py#L576-L584
def handle_error(self, error=None): """Trap for TCPServer errors, otherwise continue.""" if _debug: TCPServerActor._debug("handle_error %r", error) # pass along to the director if error is not None: self.director.actor_error(self, error) else: TCPServer.handle_error(self)
[ "def", "handle_error", "(", "self", ",", "error", "=", "None", ")", ":", "if", "_debug", ":", "TCPServerActor", ".", "_debug", "(", "\"handle_error %r\"", ",", "error", ")", "# pass along to the director", "if", "error", "is", "not", "None", ":", "self", ".", "director", ".", "actor_error", "(", "self", ",", "error", ")", "else", ":", "TCPServer", ".", "handle_error", "(", "self", ")" ]
Trap for TCPServer errors, otherwise continue.
[ "Trap", "for", "TCPServer", "errors", "otherwise", "continue", "." ]
python
train
zetaops/pyoko
pyoko/model.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/model.py#L457-L504
def save(self, internal=False, meta=None, index_fields=None): """ Save's object to DB. Do not override this method, use pre_save and post_save methods. Args: internal (bool): True if called within model. Used to prevent unneccessary calls to pre_save and post_save methods. meta (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int'). bin is used for string fields, int is used for integer fields. [('lorem','bin'),('dolar','int')] Returns: Saved model instance. """ for f in self.on_save: f(self) if not (internal or self._pre_save_hook_called): self._pre_save_hook_called = True self.pre_save() if not self.deleted: self._handle_uniqueness() if not self.exist: self.pre_creation() old_data = self._data.copy() if self.just_created is None: self.setattrs(just_created=not self.exist) if self._just_created is None: self.setattrs(_just_created=self.just_created) self.objects.save_model(self, meta_data=meta, index_fields=index_fields) self._handle_changed_fields(old_data) self._process_relations(internal) if not (internal or self._post_save_hook_called): self._post_save_hook_called = True self.post_save() if self._just_created: self.setattrs(just_created=self._just_created, _just_created=False) self.post_creation() self._pre_save_hook_called = False self._post_save_hook_called = False if not internal: self._initial_data = self.clean_value() return self
[ "def", "save", "(", "self", ",", "internal", "=", "False", ",", "meta", "=", "None", ",", "index_fields", "=", "None", ")", ":", "for", "f", "in", "self", ".", "on_save", ":", "f", "(", "self", ")", "if", "not", "(", "internal", "or", "self", ".", "_pre_save_hook_called", ")", ":", "self", ".", "_pre_save_hook_called", "=", "True", "self", ".", "pre_save", "(", ")", "if", "not", "self", ".", "deleted", ":", "self", ".", "_handle_uniqueness", "(", ")", "if", "not", "self", ".", "exist", ":", "self", ".", "pre_creation", "(", ")", "old_data", "=", "self", ".", "_data", ".", "copy", "(", ")", "if", "self", ".", "just_created", "is", "None", ":", "self", ".", "setattrs", "(", "just_created", "=", "not", "self", ".", "exist", ")", "if", "self", ".", "_just_created", "is", "None", ":", "self", ".", "setattrs", "(", "_just_created", "=", "self", ".", "just_created", ")", "self", ".", "objects", ".", "save_model", "(", "self", ",", "meta_data", "=", "meta", ",", "index_fields", "=", "index_fields", ")", "self", ".", "_handle_changed_fields", "(", "old_data", ")", "self", ".", "_process_relations", "(", "internal", ")", "if", "not", "(", "internal", "or", "self", ".", "_post_save_hook_called", ")", ":", "self", ".", "_post_save_hook_called", "=", "True", "self", ".", "post_save", "(", ")", "if", "self", ".", "_just_created", ":", "self", ".", "setattrs", "(", "just_created", "=", "self", ".", "_just_created", ",", "_just_created", "=", "False", ")", "self", ".", "post_creation", "(", ")", "self", ".", "_pre_save_hook_called", "=", "False", "self", ".", "_post_save_hook_called", "=", "False", "if", "not", "internal", ":", "self", ".", "_initial_data", "=", "self", ".", "clean_value", "(", ")", "return", "self" ]
Save's object to DB. Do not override this method, use pre_save and post_save methods. Args: internal (bool): True if called within model. Used to prevent unneccessary calls to pre_save and post_save methods. meta (dict): JSON serializable meta data for logging of save operation. {'lorem': 'ipsum', 'dolar': 5} index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int'). bin is used for string fields, int is used for integer fields. [('lorem','bin'),('dolar','int')] Returns: Saved model instance.
[ "Save", "s", "object", "to", "DB", "." ]
python
train
mastro35/flows
flows/Actions/PassOnInterval.py
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/PassOnInterval.py#L71-L73
def verify_day(self, now): '''Verify the day''' return self.day == "*" or str(now.day) in self.day.split(" ")
[ "def", "verify_day", "(", "self", ",", "now", ")", ":", "return", "self", ".", "day", "==", "\"*\"", "or", "str", "(", "now", ".", "day", ")", "in", "self", ".", "day", ".", "split", "(", "\" \"", ")" ]
Verify the day
[ "Verify", "the", "day" ]
python
train
Parquery/icontract
icontract/_checkers.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_checkers.py#L252-L346
def decorate_with_checker(func: CallableT) -> CallableT: """Decorate the function with a checker that verifies the preconditions and postconditions.""" assert not hasattr(func, "__preconditions__"), \ "Expected func to have no list of preconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postconditions__"), \ "Expected func to have no list of postconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postcondition_snapshots__"), \ "Expected func to have no list of postcondition snapshots (there should be only a single contract checker " \ "per function)." sign = inspect.signature(func) param_names = list(sign.parameters.keys()) # Determine the default argument values. kwdefaults = dict() # type: Dict[str, Any] # Add to the defaults all the values that are needed by the contracts. for param in sign.parameters.values(): if param.default != inspect.Parameter.empty: kwdefaults[param.name] = param.default def wrapper(*args, **kwargs): """Wrap func by checking the preconditions and postconditions.""" preconditions = getattr(wrapper, "__preconditions__") # type: List[List[Contract]] snapshots = getattr(wrapper, "__postcondition_snapshots__") # type: List[Snapshot] postconditions = getattr(wrapper, "__postconditions__") # type: List[Contract] resolved_kwargs = _kwargs_from_call(param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs) if postconditions: if 'result' in resolved_kwargs: raise TypeError("Unexpected argument 'result' in a function decorated with postconditions.") if 'OLD' in resolved_kwargs: raise TypeError("Unexpected argument 'OLD' in a function decorated with postconditions.") # Assert the preconditions in groups. This is necessary to implement "require else" logic when a class # weakens the preconditions of its base class. violation_err = None # type: Optional[ViolationError] for group in preconditions: violation_err = None try: for contract in group: _assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs) break except ViolationError as err: violation_err = err if violation_err is not None: raise violation_err # pylint: disable=raising-bad-type # Capture the snapshots if postconditions: old_as_mapping = dict() # type: MutableMapping[str, Any] for snap in snapshots: # This assert is just a last defense. # Conflicting snapshot names should have been caught before, either during the decoration or # in the meta-class. assert snap.name not in old_as_mapping, "Snapshots with the conflicting name: {}" old_as_mapping[snap.name] = _capture_snapshot(a_snapshot=snap, resolved_kwargs=resolved_kwargs) resolved_kwargs['OLD'] = _Old(mapping=old_as_mapping) # Execute the wrapped function result = func(*args, **kwargs) if postconditions: resolved_kwargs['result'] = result # Assert the postconditions as a conjunction for contract in postconditions: _assert_postcondition(contract=contract, resolved_kwargs=resolved_kwargs) return result # type: ignore # Copy __doc__ and other properties so that doctests can run functools.update_wrapper(wrapper=wrapper, wrapped=func) assert not hasattr(wrapper, "__preconditions__"), "Expected no preconditions set on a pristine contract checker." assert not hasattr(wrapper, "__postcondition_snapshots__"), \ "Expected no postcondition snapshots set on a pristine contract checker." assert not hasattr(wrapper, "__postconditions__"), "Expected no postconditions set on a pristine contract checker." # Precondition is a list of condition groups (i.e. disjunctive normal form): # each group consists of AND'ed preconditions, while the groups are OR'ed. # # This is necessary in order to implement "require else" logic when a class weakens the preconditions of # its base class. setattr(wrapper, "__preconditions__", []) setattr(wrapper, "__postcondition_snapshots__", []) setattr(wrapper, "__postconditions__", []) return wrapper
[ "def", "decorate_with_checker", "(", "func", ":", "CallableT", ")", "->", "CallableT", ":", "assert", "not", "hasattr", "(", "func", ",", "\"__preconditions__\"", ")", ",", "\"Expected func to have no list of preconditions (there should be only a single contract checker per function).\"", "assert", "not", "hasattr", "(", "func", ",", "\"__postconditions__\"", ")", ",", "\"Expected func to have no list of postconditions (there should be only a single contract checker per function).\"", "assert", "not", "hasattr", "(", "func", ",", "\"__postcondition_snapshots__\"", ")", ",", "\"Expected func to have no list of postcondition snapshots (there should be only a single contract checker \"", "\"per function).\"", "sign", "=", "inspect", ".", "signature", "(", "func", ")", "param_names", "=", "list", "(", "sign", ".", "parameters", ".", "keys", "(", ")", ")", "# Determine the default argument values.", "kwdefaults", "=", "dict", "(", ")", "# type: Dict[str, Any]", "# Add to the defaults all the values that are needed by the contracts.", "for", "param", "in", "sign", ".", "parameters", ".", "values", "(", ")", ":", "if", "param", ".", "default", "!=", "inspect", ".", "Parameter", ".", "empty", ":", "kwdefaults", "[", "param", ".", "name", "]", "=", "param", ".", "default", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrap func by checking the preconditions and postconditions.\"\"\"", "preconditions", "=", "getattr", "(", "wrapper", ",", "\"__preconditions__\"", ")", "# type: List[List[Contract]]", "snapshots", "=", "getattr", "(", "wrapper", ",", "\"__postcondition_snapshots__\"", ")", "# type: List[Snapshot]", "postconditions", "=", "getattr", "(", "wrapper", ",", "\"__postconditions__\"", ")", "# type: List[Contract]", "resolved_kwargs", "=", "_kwargs_from_call", "(", "param_names", "=", "param_names", ",", "kwdefaults", "=", "kwdefaults", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")", "if", "postconditions", ":", "if", "'result'", "in", "resolved_kwargs", ":", "raise", "TypeError", "(", "\"Unexpected argument 'result' in a function decorated with postconditions.\"", ")", "if", "'OLD'", "in", "resolved_kwargs", ":", "raise", "TypeError", "(", "\"Unexpected argument 'OLD' in a function decorated with postconditions.\"", ")", "# Assert the preconditions in groups. This is necessary to implement \"require else\" logic when a class", "# weakens the preconditions of its base class.", "violation_err", "=", "None", "# type: Optional[ViolationError]", "for", "group", "in", "preconditions", ":", "violation_err", "=", "None", "try", ":", "for", "contract", "in", "group", ":", "_assert_precondition", "(", "contract", "=", "contract", ",", "resolved_kwargs", "=", "resolved_kwargs", ")", "break", "except", "ViolationError", "as", "err", ":", "violation_err", "=", "err", "if", "violation_err", "is", "not", "None", ":", "raise", "violation_err", "# pylint: disable=raising-bad-type", "# Capture the snapshots", "if", "postconditions", ":", "old_as_mapping", "=", "dict", "(", ")", "# type: MutableMapping[str, Any]", "for", "snap", "in", "snapshots", ":", "# This assert is just a last defense.", "# Conflicting snapshot names should have been caught before, either during the decoration or", "# in the meta-class.", "assert", "snap", ".", "name", "not", "in", "old_as_mapping", ",", "\"Snapshots with the conflicting name: {}\"", "old_as_mapping", "[", "snap", ".", "name", "]", "=", "_capture_snapshot", "(", "a_snapshot", "=", "snap", ",", "resolved_kwargs", "=", "resolved_kwargs", ")", "resolved_kwargs", "[", "'OLD'", "]", "=", "_Old", "(", "mapping", "=", "old_as_mapping", ")", "# Execute the wrapped function", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "postconditions", ":", "resolved_kwargs", "[", "'result'", "]", "=", "result", "# Assert the postconditions as a conjunction", "for", "contract", "in", "postconditions", ":", "_assert_postcondition", "(", "contract", "=", "contract", ",", "resolved_kwargs", "=", "resolved_kwargs", ")", "return", "result", "# type: ignore", "# Copy __doc__ and other properties so that doctests can run", "functools", ".", "update_wrapper", "(", "wrapper", "=", "wrapper", ",", "wrapped", "=", "func", ")", "assert", "not", "hasattr", "(", "wrapper", ",", "\"__preconditions__\"", ")", ",", "\"Expected no preconditions set on a pristine contract checker.\"", "assert", "not", "hasattr", "(", "wrapper", ",", "\"__postcondition_snapshots__\"", ")", ",", "\"Expected no postcondition snapshots set on a pristine contract checker.\"", "assert", "not", "hasattr", "(", "wrapper", ",", "\"__postconditions__\"", ")", ",", "\"Expected no postconditions set on a pristine contract checker.\"", "# Precondition is a list of condition groups (i.e. disjunctive normal form):", "# each group consists of AND'ed preconditions, while the groups are OR'ed.", "#", "# This is necessary in order to implement \"require else\" logic when a class weakens the preconditions of", "# its base class.", "setattr", "(", "wrapper", ",", "\"__preconditions__\"", ",", "[", "]", ")", "setattr", "(", "wrapper", ",", "\"__postcondition_snapshots__\"", ",", "[", "]", ")", "setattr", "(", "wrapper", ",", "\"__postconditions__\"", ",", "[", "]", ")", "return", "wrapper" ]
Decorate the function with a checker that verifies the preconditions and postconditions.
[ "Decorate", "the", "function", "with", "a", "checker", "that", "verifies", "the", "preconditions", "and", "postconditions", "." ]
python
train
ndokter/dsmr_parser
dsmr_parser/clients/protocol.py
https://github.com/ndokter/dsmr_parser/blob/c04b0a5add58ce70153eede1a87ca171876b61c7/dsmr_parser/clients/protocol.py#L39-L46
def create_dsmr_reader(port, dsmr_version, telegram_callback, loop=None): """Creates a DSMR asyncio protocol coroutine using serial port.""" protocol, serial_settings = create_dsmr_protocol( dsmr_version, telegram_callback, loop=None) serial_settings['url'] = port conn = create_serial_connection(loop, protocol, **serial_settings) return conn
[ "def", "create_dsmr_reader", "(", "port", ",", "dsmr_version", ",", "telegram_callback", ",", "loop", "=", "None", ")", ":", "protocol", ",", "serial_settings", "=", "create_dsmr_protocol", "(", "dsmr_version", ",", "telegram_callback", ",", "loop", "=", "None", ")", "serial_settings", "[", "'url'", "]", "=", "port", "conn", "=", "create_serial_connection", "(", "loop", ",", "protocol", ",", "*", "*", "serial_settings", ")", "return", "conn" ]
Creates a DSMR asyncio protocol coroutine using serial port.
[ "Creates", "a", "DSMR", "asyncio", "protocol", "coroutine", "using", "serial", "port", "." ]
python
test
tmux-python/libtmux
libtmux/common.py
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/common.py#L353-L378
def get_by_id(self, id): """ Return object based on ``child_id_attribute``. Parameters ---------- val : str Returns ------- object Notes ----- Based on `.get()`_ from `backbone.js`_. .. _backbone.js: http://backbonejs.org/ .. _.get(): http://backbonejs.org/#Collection-get """ for child in self.children: if child[self.child_id_attribute] == id: return child else: continue return None
[ "def", "get_by_id", "(", "self", ",", "id", ")", ":", "for", "child", "in", "self", ".", "children", ":", "if", "child", "[", "self", ".", "child_id_attribute", "]", "==", "id", ":", "return", "child", "else", ":", "continue", "return", "None" ]
Return object based on ``child_id_attribute``. Parameters ---------- val : str Returns ------- object Notes ----- Based on `.get()`_ from `backbone.js`_. .. _backbone.js: http://backbonejs.org/ .. _.get(): http://backbonejs.org/#Collection-get
[ "Return", "object", "based", "on", "child_id_attribute", "." ]
python
train
graphql-python/graphql-core-next
graphql/language/parser.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/language/parser.py#L105-L121
def parse_value(source: SourceType, **options: dict) -> ValueNode: """Parse the AST for a given string containing a GraphQL value. Throws GraphQLError if a syntax error is encountered. This is useful within tools that operate upon GraphQL Values directly and in isolation of complete GraphQL documents. Consider providing the results to the utility function: `value_from_ast()`. """ if isinstance(source, str): source = Source(source) lexer = Lexer(source, **options) expect_token(lexer, TokenKind.SOF) value = parse_value_literal(lexer, False) expect_token(lexer, TokenKind.EOF) return value
[ "def", "parse_value", "(", "source", ":", "SourceType", ",", "*", "*", "options", ":", "dict", ")", "->", "ValueNode", ":", "if", "isinstance", "(", "source", ",", "str", ")", ":", "source", "=", "Source", "(", "source", ")", "lexer", "=", "Lexer", "(", "source", ",", "*", "*", "options", ")", "expect_token", "(", "lexer", ",", "TokenKind", ".", "SOF", ")", "value", "=", "parse_value_literal", "(", "lexer", ",", "False", ")", "expect_token", "(", "lexer", ",", "TokenKind", ".", "EOF", ")", "return", "value" ]
Parse the AST for a given string containing a GraphQL value. Throws GraphQLError if a syntax error is encountered. This is useful within tools that operate upon GraphQL Values directly and in isolation of complete GraphQL documents. Consider providing the results to the utility function: `value_from_ast()`.
[ "Parse", "the", "AST", "for", "a", "given", "string", "containing", "a", "GraphQL", "value", "." ]
python
train
5monkeys/content-io
cio/backends/base.py
https://github.com/5monkeys/content-io/blob/8c8519c74cbadab871f7151c0e02252cb5753759/cio/backends/base.py#L26-L34
def get(self, uri): """ Return node for uri or None if not exists: {uri: x, content: y} """ cache_key = self._build_cache_key(uri) value = self._get(cache_key) if value is not None: return self._decode_node(uri, value)
[ "def", "get", "(", "self", ",", "uri", ")", ":", "cache_key", "=", "self", ".", "_build_cache_key", "(", "uri", ")", "value", "=", "self", ".", "_get", "(", "cache_key", ")", "if", "value", "is", "not", "None", ":", "return", "self", ".", "_decode_node", "(", "uri", ",", "value", ")" ]
Return node for uri or None if not exists: {uri: x, content: y}
[ "Return", "node", "for", "uri", "or", "None", "if", "not", "exists", ":", "{", "uri", ":", "x", "content", ":", "y", "}" ]
python
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/api.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/api.py#L280-L297
def set_action(self, action, message): """Set the action to be taken for this object. Assign an special "action" to this object to be taken in consideration in Holding Pen. The widget is referred to by a string with the filename minus extension. A message is also needed to tell the user the action required in a textual way. :param action: name of the action to add (i.e. "approval") :type action: string :param message: message to show to the user :type message: string """ self.extra_data["_action"] = action self.extra_data["_message"] = message
[ "def", "set_action", "(", "self", ",", "action", ",", "message", ")", ":", "self", ".", "extra_data", "[", "\"_action\"", "]", "=", "action", "self", ".", "extra_data", "[", "\"_message\"", "]", "=", "message" ]
Set the action to be taken for this object. Assign an special "action" to this object to be taken in consideration in Holding Pen. The widget is referred to by a string with the filename minus extension. A message is also needed to tell the user the action required in a textual way. :param action: name of the action to add (i.e. "approval") :type action: string :param message: message to show to the user :type message: string
[ "Set", "the", "action", "to", "be", "taken", "for", "this", "object", "." ]
python
train
martinkosir/neverbounce-python
neverbounce/client.py
https://github.com/martinkosir/neverbounce-python/blob/8d8b3f381dbff2a753a8770fac0d2bfab80d5bec/neverbounce/client.py#L18-L25
def verify(self, email): """ Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object. """ resp = self._call(endpoint='single', data={'email': email}) return VerifiedEmail(email, resp['result'])
[ "def", "verify", "(", "self", ",", "email", ")", ":", "resp", "=", "self", ".", "_call", "(", "endpoint", "=", "'single'", ",", "data", "=", "{", "'email'", ":", "email", "}", ")", "return", "VerifiedEmail", "(", "email", ",", "resp", "[", "'result'", "]", ")" ]
Verify a single email address. :param str email: Email address to verify. :return: A VerifiedEmail object.
[ "Verify", "a", "single", "email", "address", ".", ":", "param", "str", "email", ":", "Email", "address", "to", "verify", ".", ":", "return", ":", "A", "VerifiedEmail", "object", "." ]
python
train
Mxit/python-mxit
mxit/services.py
https://github.com/Mxit/python-mxit/blob/6b18a54ef6fbfe1f9d94755ba3d4ad77743c8b0c/mxit/services.py#L71-L84
def get_user_id(self, mxit_id, scope='profile/public'): """ Retrieve the Mxit user's internal "user ID" No user authentication required """ user_id = _get( token=self.oauth.get_app_token(scope), uri='/user/lookup/' + urllib.quote(mxit_id) ) if user_id.startswith('"') and user_id.endswith('"'): user_id = user_id[1:-1] return user_id
[ "def", "get_user_id", "(", "self", ",", "mxit_id", ",", "scope", "=", "'profile/public'", ")", ":", "user_id", "=", "_get", "(", "token", "=", "self", ".", "oauth", ".", "get_app_token", "(", "scope", ")", ",", "uri", "=", "'/user/lookup/'", "+", "urllib", ".", "quote", "(", "mxit_id", ")", ")", "if", "user_id", ".", "startswith", "(", "'\"'", ")", "and", "user_id", ".", "endswith", "(", "'\"'", ")", ":", "user_id", "=", "user_id", "[", "1", ":", "-", "1", "]", "return", "user_id" ]
Retrieve the Mxit user's internal "user ID" No user authentication required
[ "Retrieve", "the", "Mxit", "user", "s", "internal", "user", "ID", "No", "user", "authentication", "required" ]
python
train
shkarupa-alex/tfunicode
tfunicode/python/ops/__init__.py
https://github.com/shkarupa-alex/tfunicode/blob/72ee2f484b6202394dcda3db47245bc78ae2267d/tfunicode/python/ops/__init__.py#L26-L51
def _combine_sparse_successor(parent_indices, parent_shape, child_indices, child_values, child_shape, name=None): """Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding first `SparseTensor`'s values. Args: parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape child_indices: 2D int64 `Tensor` with child `SparseTensor` indices child_values: 1D int64 `Tensor` with child `SparseTensor` values child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape name: A name for the operation (optional). Returns: `SparseTensor` with an additional dimension of size 1 added. """ with ops.name_scope(name, "CombineSparseSuccessor", [parent_indices, parent_shape, child_indices, child_values, child_shape]): indices, values, shape = ops_module.combine_sparse_successor( parent_indices, parent_shape, child_indices, child_values, child_shape ) return tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
[ "def", "_combine_sparse_successor", "(", "parent_indices", ",", "parent_shape", ",", "child_indices", ",", "child_values", ",", "child_shape", ",", "name", "=", "None", ")", ":", "with", "ops", ".", "name_scope", "(", "name", ",", "\"CombineSparseSuccessor\"", ",", "[", "parent_indices", ",", "parent_shape", ",", "child_indices", ",", "child_values", ",", "child_shape", "]", ")", ":", "indices", ",", "values", ",", "shape", "=", "ops_module", ".", "combine_sparse_successor", "(", "parent_indices", ",", "parent_shape", ",", "child_indices", ",", "child_values", ",", "child_shape", ")", "return", "tf", ".", "SparseTensor", "(", "indices", "=", "indices", ",", "values", "=", "values", ",", "dense_shape", "=", "shape", ")" ]
Combines two string `SparseTensor`s, where second `SparseTensor` is the result of expanding first `SparseTensor`'s values. Args: parent_indices: 2D int64 `Tensor` with parent `SparseTensor` indices parent_shape: 1D int64 `Tensor` with parent `SparseTensor` dense_shape child_indices: 2D int64 `Tensor` with child `SparseTensor` indices child_values: 1D int64 `Tensor` with child `SparseTensor` values child_shape: 1D int64 `Tensor` with child `SparseTensor` dense_shape name: A name for the operation (optional). Returns: `SparseTensor` with an additional dimension of size 1 added.
[ "Combines", "two", "string", "SparseTensor", "s", "where", "second", "SparseTensor", "is", "the", "result", "of", "expanding", "first", "SparseTensor", "s", "values", "." ]
python
train
portfors-lab/sparkle
sparkle/stim/auto_parameter_model.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/stim/auto_parameter_model.py#L131-L150
def nStepsForParam(self, param): """Gets the number of steps *parameter* will yeild :param param: parameter to get the expansion count for :type param: dict """ if param['parameter'] == 'filename': return len(param['names']) else: if param['step'] > 0: if abs(param['start'] - param['stop']) < param['step']: return 0 # print 'range', param['start'] - param['stop'] nsteps = np.around(abs(param['start'] - param['stop']), 4) / float(param['step']) nsteps = int(np.ceil(nsteps)+1) elif param['start'] == param['stop']: nsteps = 1 else: nsteps = 0 return nsteps
[ "def", "nStepsForParam", "(", "self", ",", "param", ")", ":", "if", "param", "[", "'parameter'", "]", "==", "'filename'", ":", "return", "len", "(", "param", "[", "'names'", "]", ")", "else", ":", "if", "param", "[", "'step'", "]", ">", "0", ":", "if", "abs", "(", "param", "[", "'start'", "]", "-", "param", "[", "'stop'", "]", ")", "<", "param", "[", "'step'", "]", ":", "return", "0", "# print 'range', param['start'] - param['stop']", "nsteps", "=", "np", ".", "around", "(", "abs", "(", "param", "[", "'start'", "]", "-", "param", "[", "'stop'", "]", ")", ",", "4", ")", "/", "float", "(", "param", "[", "'step'", "]", ")", "nsteps", "=", "int", "(", "np", ".", "ceil", "(", "nsteps", ")", "+", "1", ")", "elif", "param", "[", "'start'", "]", "==", "param", "[", "'stop'", "]", ":", "nsteps", "=", "1", "else", ":", "nsteps", "=", "0", "return", "nsteps" ]
Gets the number of steps *parameter* will yeild :param param: parameter to get the expansion count for :type param: dict
[ "Gets", "the", "number", "of", "steps", "*", "parameter", "*", "will", "yeild" ]
python
train
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L2252-L2256
def macros_attachment_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/macros#show-macro-attachment" api_path = "/api/v2/macros/attachments/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "macros_attachment_show", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/macros/attachments/{id}.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/macros#show-macro-attachment
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "macros#show", "-", "macro", "-", "attachment" ]
python
train
collectiveacuity/labPack
labpack/messaging/telegram.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/messaging/telegram.py#L229-L254
def _validate_type(self, file_name, extension_map, method_title, argument_title): ''' a helper method to validate extension type of file :param file_name: string with file name to test :param extension_map: dictionary with extensions names and regex patterns :param method_title: string with title of feeder method :param argument_title: string with title of argument key from feeder method :return: string with file extension ''' # validate file extension from labpack.parsing.regex import labRegex file_extension = '' ext_types = labRegex(extension_map) file_mapping = ext_types.map(file_name)[0] extension_list = [] for key, value in file_mapping.items(): if isinstance(value, bool): extension_list.append('.%s' % key) if value and isinstance(value, bool): file_extension = '.%s' + key if not file_extension: raise ValueError('%s(%s=%s) must be one of %s file types.' % (method_title, argument_title, file_name, extension_list)) return file_extension
[ "def", "_validate_type", "(", "self", ",", "file_name", ",", "extension_map", ",", "method_title", ",", "argument_title", ")", ":", "# validate file extension\r", "from", "labpack", ".", "parsing", ".", "regex", "import", "labRegex", "file_extension", "=", "''", "ext_types", "=", "labRegex", "(", "extension_map", ")", "file_mapping", "=", "ext_types", ".", "map", "(", "file_name", ")", "[", "0", "]", "extension_list", "=", "[", "]", "for", "key", ",", "value", "in", "file_mapping", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "extension_list", ".", "append", "(", "'.%s'", "%", "key", ")", "if", "value", "and", "isinstance", "(", "value", ",", "bool", ")", ":", "file_extension", "=", "'.%s'", "+", "key", "if", "not", "file_extension", ":", "raise", "ValueError", "(", "'%s(%s=%s) must be one of %s file types.'", "%", "(", "method_title", ",", "argument_title", ",", "file_name", ",", "extension_list", ")", ")", "return", "file_extension" ]
a helper method to validate extension type of file :param file_name: string with file name to test :param extension_map: dictionary with extensions names and regex patterns :param method_title: string with title of feeder method :param argument_title: string with title of argument key from feeder method :return: string with file extension
[ "a", "helper", "method", "to", "validate", "extension", "type", "of", "file", ":", "param", "file_name", ":", "string", "with", "file", "name", "to", "test", ":", "param", "extension_map", ":", "dictionary", "with", "extensions", "names", "and", "regex", "patterns", ":", "param", "method_title", ":", "string", "with", "title", "of", "feeder", "method", ":", "param", "argument_title", ":", "string", "with", "title", "of", "argument", "key", "from", "feeder", "method", ":", "return", ":", "string", "with", "file", "extension" ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/src/mapreduce/records.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/records.py#L289-L338
def read(self): """Reads record from current position in reader. Returns: original bytes stored in a single record. """ data = None while True: last_offset = self.tell() try: (chunk, record_type) = self.__try_read_record() if record_type == _RECORD_TYPE_NONE: self.__sync() elif record_type == _RECORD_TYPE_FULL: if data is not None: logging.warning( "Ordering corruption: Got FULL record while already " "in a chunk at offset %d", last_offset) return chunk elif record_type == _RECORD_TYPE_FIRST: if data is not None: logging.warning( "Ordering corruption: Got FIRST record while already " "in a chunk at offset %d", last_offset) data = chunk elif record_type == _RECORD_TYPE_MIDDLE: if data is None: logging.warning( "Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d", last_offset) else: data += chunk elif record_type == _RECORD_TYPE_LAST: if data is None: logging.warning( "Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d", last_offset) else: result = data + chunk data = None return result else: raise errors.InvalidRecordError( "Unsupported record type: %s" % record_type) except errors.InvalidRecordError, e: logging.warning("Invalid record encountered at %s (%s). Syncing to " "the next block", last_offset, e) data = None self.__sync()
[ "def", "read", "(", "self", ")", ":", "data", "=", "None", "while", "True", ":", "last_offset", "=", "self", ".", "tell", "(", ")", "try", ":", "(", "chunk", ",", "record_type", ")", "=", "self", ".", "__try_read_record", "(", ")", "if", "record_type", "==", "_RECORD_TYPE_NONE", ":", "self", ".", "__sync", "(", ")", "elif", "record_type", "==", "_RECORD_TYPE_FULL", ":", "if", "data", "is", "not", "None", ":", "logging", ".", "warning", "(", "\"Ordering corruption: Got FULL record while already \"", "\"in a chunk at offset %d\"", ",", "last_offset", ")", "return", "chunk", "elif", "record_type", "==", "_RECORD_TYPE_FIRST", ":", "if", "data", "is", "not", "None", ":", "logging", ".", "warning", "(", "\"Ordering corruption: Got FIRST record while already \"", "\"in a chunk at offset %d\"", ",", "last_offset", ")", "data", "=", "chunk", "elif", "record_type", "==", "_RECORD_TYPE_MIDDLE", ":", "if", "data", "is", "None", ":", "logging", ".", "warning", "(", "\"Ordering corruption: Got MIDDLE record before FIRST \"", "\"record at offset %d\"", ",", "last_offset", ")", "else", ":", "data", "+=", "chunk", "elif", "record_type", "==", "_RECORD_TYPE_LAST", ":", "if", "data", "is", "None", ":", "logging", ".", "warning", "(", "\"Ordering corruption: Got LAST record but no chunk is in \"", "\"progress at offset %d\"", ",", "last_offset", ")", "else", ":", "result", "=", "data", "+", "chunk", "data", "=", "None", "return", "result", "else", ":", "raise", "errors", ".", "InvalidRecordError", "(", "\"Unsupported record type: %s\"", "%", "record_type", ")", "except", "errors", ".", "InvalidRecordError", ",", "e", ":", "logging", ".", "warning", "(", "\"Invalid record encountered at %s (%s). Syncing to \"", "\"the next block\"", ",", "last_offset", ",", "e", ")", "data", "=", "None", "self", ".", "__sync", "(", ")" ]
Reads record from current position in reader. Returns: original bytes stored in a single record.
[ "Reads", "record", "from", "current", "position", "in", "reader", "." ]
python
train
kencochrane/django-defender
defender/utils.py
https://github.com/kencochrane/django-defender/blob/e3e547dbb83235e0d564a6d64652c7df00412ff2/defender/utils.py#L304-L310
def is_user_already_locked(username): """Is this username already locked?""" if username is None: return False if config.DISABLE_USERNAME_LOCKOUT: return False return REDIS_SERVER.get(get_username_blocked_cache_key(username))
[ "def", "is_user_already_locked", "(", "username", ")", ":", "if", "username", "is", "None", ":", "return", "False", "if", "config", ".", "DISABLE_USERNAME_LOCKOUT", ":", "return", "False", "return", "REDIS_SERVER", ".", "get", "(", "get_username_blocked_cache_key", "(", "username", ")", ")" ]
Is this username already locked?
[ "Is", "this", "username", "already", "locked?" ]
python
train
mayfield/shellish
shellish/layout/table.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L671-L702
def _column_pack_filter(self, next_filter): """ Top-align column data irrespective of original row alignment. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], ["1b", "2b"], [<blank>, "2c"], [<blank>, "2d"] ] """ next(next_filter) col_count = len(self.widths) queues = [collections.deque() for _ in range(col_count)] while True: try: row = (yield) except GeneratorExit: break for col, queue in zip(row, queues): if col is not None: queue.append(col) if all(queues): next_filter.send([x.popleft() for x in queues]) blanks = list(map(self._get_blank_cell, range(col_count))) while any(queues): next_filter.send([q.popleft() if q else blank for q, blank in zip(queues, blanks)])
[ "def", "_column_pack_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "col_count", "=", "len", "(", "self", ".", "widths", ")", "queues", "=", "[", "collections", ".", "deque", "(", ")", "for", "_", "in", "range", "(", "col_count", ")", "]", "while", "True", ":", "try", ":", "row", "=", "(", "yield", ")", "except", "GeneratorExit", ":", "break", "for", "col", ",", "queue", "in", "zip", "(", "row", ",", "queues", ")", ":", "if", "col", "is", "not", "None", ":", "queue", ".", "append", "(", "col", ")", "if", "all", "(", "queues", ")", ":", "next_filter", ".", "send", "(", "[", "x", ".", "popleft", "(", ")", "for", "x", "in", "queues", "]", ")", "blanks", "=", "list", "(", "map", "(", "self", ".", "_get_blank_cell", ",", "range", "(", "col_count", ")", ")", ")", "while", "any", "(", "queues", ")", ":", "next_filter", ".", "send", "(", "[", "q", ".", "popleft", "(", ")", "if", "q", "else", "blank", "for", "q", ",", "blank", "in", "zip", "(", "queues", ",", "blanks", ")", "]", ")" ]
Top-align column data irrespective of original row alignment. E.g. INPUT: [ ["1a", "2a"], [None, "2b"], ["1b", "2c"], [None, "2d"] ] OUTPUT: [ ["1a", "2a"], ["1b", "2b"], [<blank>, "2c"], [<blank>, "2d"] ]
[ "Top", "-", "align", "column", "data", "irrespective", "of", "original", "row", "alignment", ".", "E", ".", "g", ".", "INPUT", ":", "[", "[", "1a", "2a", "]", "[", "None", "2b", "]", "[", "1b", "2c", "]", "[", "None", "2d", "]", "]", "OUTPUT", ":", "[", "[", "1a", "2a", "]", "[", "1b", "2b", "]", "[", "<blank", ">", "2c", "]", "[", "<blank", ">", "2d", "]", "]" ]
python
train
DLR-RM/RAFCON
source/rafcon/utils/type_helpers.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/utils/type_helpers.py#L33-L70
def convert_string_to_type(string_value): """Converts a string into a type or class :param string_value: the string to be converted, e.g. "int" :return: The type derived from string_value, e.g. int """ # If the parameter is already a type, return it if string_value in ['None', type(None).__name__]: return type(None) if isinstance(string_value, type) or isclass(string_value): return string_value # Get object associated with string # First check whether we are having a built in type (int, str, etc) if sys.version_info >= (3,): import builtins as builtins23 else: import __builtin__ as builtins23 if hasattr(builtins23, string_value): obj = getattr(builtins23, string_value) if type(obj) is type: return obj # If not, try to locate the module try: obj = locate(string_value) except ErrorDuringImport as e: raise ValueError("Unknown type '{0}'".format(e)) # Check whether object is a type if type(obj) is type: return locate(string_value) # Check whether object is a class if isclass(obj): return obj # Raise error if none is the case raise ValueError("Unknown type '{0}'".format(string_value))
[ "def", "convert_string_to_type", "(", "string_value", ")", ":", "# If the parameter is already a type, return it", "if", "string_value", "in", "[", "'None'", ",", "type", "(", "None", ")", ".", "__name__", "]", ":", "return", "type", "(", "None", ")", "if", "isinstance", "(", "string_value", ",", "type", ")", "or", "isclass", "(", "string_value", ")", ":", "return", "string_value", "# Get object associated with string", "# First check whether we are having a built in type (int, str, etc)", "if", "sys", ".", "version_info", ">=", "(", "3", ",", ")", ":", "import", "builtins", "as", "builtins23", "else", ":", "import", "__builtin__", "as", "builtins23", "if", "hasattr", "(", "builtins23", ",", "string_value", ")", ":", "obj", "=", "getattr", "(", "builtins23", ",", "string_value", ")", "if", "type", "(", "obj", ")", "is", "type", ":", "return", "obj", "# If not, try to locate the module", "try", ":", "obj", "=", "locate", "(", "string_value", ")", "except", "ErrorDuringImport", "as", "e", ":", "raise", "ValueError", "(", "\"Unknown type '{0}'\"", ".", "format", "(", "e", ")", ")", "# Check whether object is a type", "if", "type", "(", "obj", ")", "is", "type", ":", "return", "locate", "(", "string_value", ")", "# Check whether object is a class", "if", "isclass", "(", "obj", ")", ":", "return", "obj", "# Raise error if none is the case", "raise", "ValueError", "(", "\"Unknown type '{0}'\"", ".", "format", "(", "string_value", ")", ")" ]
Converts a string into a type or class :param string_value: the string to be converted, e.g. "int" :return: The type derived from string_value, e.g. int
[ "Converts", "a", "string", "into", "a", "type", "or", "class" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/classifier/class_prediction_error.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/classifier/class_prediction_error.py#L128-L145
def draw(self): """ Renders the class prediction error across the axis. """ indices = np.arange(len(self.classes_)) prev = np.zeros(len(self.classes_)) colors = resolve_colors( colors=self.colors, n_colors=len(self.classes_)) for idx, row in enumerate(self.predictions_): self.ax.bar(indices, row, label=self.classes_[idx], bottom=prev, color=colors[idx]) prev += row return self.ax
[ "def", "draw", "(", "self", ")", ":", "indices", "=", "np", ".", "arange", "(", "len", "(", "self", ".", "classes_", ")", ")", "prev", "=", "np", ".", "zeros", "(", "len", "(", "self", ".", "classes_", ")", ")", "colors", "=", "resolve_colors", "(", "colors", "=", "self", ".", "colors", ",", "n_colors", "=", "len", "(", "self", ".", "classes_", ")", ")", "for", "idx", ",", "row", "in", "enumerate", "(", "self", ".", "predictions_", ")", ":", "self", ".", "ax", ".", "bar", "(", "indices", ",", "row", ",", "label", "=", "self", ".", "classes_", "[", "idx", "]", ",", "bottom", "=", "prev", ",", "color", "=", "colors", "[", "idx", "]", ")", "prev", "+=", "row", "return", "self", ".", "ax" ]
Renders the class prediction error across the axis.
[ "Renders", "the", "class", "prediction", "error", "across", "the", "axis", "." ]
python
train
pingali/dgit
dgitcore/datasets/common.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/datasets/common.py#L497-L514
def annotate_metadata_code(repo, files): """ Update metadata with the commit information """ package = repo.package package['code'] = [] for p in files: matching_files = glob2.glob("**/{}".format(p)) for f in matching_files: absf = os.path.abspath(f) print("Add commit data for {}".format(f)) package['code'].append(OrderedDict([ ('script', f), ('permalink', repo.manager.permalink(repo, absf)), ('mimetypes', mimetypes.guess_type(absf)[0]), ('sha256', compute_sha256(absf)) ]))
[ "def", "annotate_metadata_code", "(", "repo", ",", "files", ")", ":", "package", "=", "repo", ".", "package", "package", "[", "'code'", "]", "=", "[", "]", "for", "p", "in", "files", ":", "matching_files", "=", "glob2", ".", "glob", "(", "\"**/{}\"", ".", "format", "(", "p", ")", ")", "for", "f", "in", "matching_files", ":", "absf", "=", "os", ".", "path", ".", "abspath", "(", "f", ")", "print", "(", "\"Add commit data for {}\"", ".", "format", "(", "f", ")", ")", "package", "[", "'code'", "]", ".", "append", "(", "OrderedDict", "(", "[", "(", "'script'", ",", "f", ")", ",", "(", "'permalink'", ",", "repo", ".", "manager", ".", "permalink", "(", "repo", ",", "absf", ")", ")", ",", "(", "'mimetypes'", ",", "mimetypes", ".", "guess_type", "(", "absf", ")", "[", "0", "]", ")", ",", "(", "'sha256'", ",", "compute_sha256", "(", "absf", ")", ")", "]", ")", ")" ]
Update metadata with the commit information
[ "Update", "metadata", "with", "the", "commit", "information" ]
python
valid
mattsolo1/hmmerclust
hmmerclust/hmmerclust.py
https://github.com/mattsolo1/hmmerclust/blob/471596043a660097ed8b11430d42118a8fd25798/hmmerclust/hmmerclust.py#L539-L555
def import_tree_order_from_file(self, MyOrganismDB, filename): ''' Import the accession list that has been ordered by position in a phylogenetic tree. Get the index in the list, and add this to the Organism object. Later we can use this position to make a heatmap that matches up to a phylogenetic tree. ''' tree_order = [acc.strip() for acc in open(filename)] #print tree_order for org in MyOrganismDB.organisms: for tree_accession in tree_order: #print tree_accession if org.accession == tree_accession: org.tree_order = tree_order.index(tree_accession)
[ "def", "import_tree_order_from_file", "(", "self", ",", "MyOrganismDB", ",", "filename", ")", ":", "tree_order", "=", "[", "acc", ".", "strip", "(", ")", "for", "acc", "in", "open", "(", "filename", ")", "]", "#print tree_order", "for", "org", "in", "MyOrganismDB", ".", "organisms", ":", "for", "tree_accession", "in", "tree_order", ":", "#print tree_accession", "if", "org", ".", "accession", "==", "tree_accession", ":", "org", ".", "tree_order", "=", "tree_order", ".", "index", "(", "tree_accession", ")" ]
Import the accession list that has been ordered by position in a phylogenetic tree. Get the index in the list, and add this to the Organism object. Later we can use this position to make a heatmap that matches up to a phylogenetic tree.
[ "Import", "the", "accession", "list", "that", "has", "been", "ordered", "by", "position", "in", "a", "phylogenetic", "tree", ".", "Get", "the", "index", "in", "the", "list", "and", "add", "this", "to", "the", "Organism", "object", ".", "Later", "we", "can", "use", "this", "position", "to", "make", "a", "heatmap", "that", "matches", "up", "to", "a", "phylogenetic", "tree", "." ]
python
train
clinicedc/edc-auth
edc_auth/import_users.py
https://github.com/clinicedc/edc-auth/blob/e633a5461139d3799f389f7bed0e02c9d2c1e103/edc_auth/import_users.py#L27-L91
def import_users( path, resource_name=None, send_email_to_user=None, alternate_email=None, verbose=None, export_to_file=None, **kwargs, ): """Import users from a CSV file with columns: username first_name last_name email sites: a comma-separated list of sites groups: a comma-separated list of groups job_title """ users = [] with open(path) as f: reader = csv.DictReader(f) for user_data in reader: username = user_data.get("username") site_names = user_data.get("sites").lower().split(",") group_names = user_data.get("groups").lower().split(",") first_name = user_data.get("first_name") last_name = user_data.get("last_name") email = user_data.get("email") o = UserImporter( username=username, first_name=first_name, last_name=last_name, email=email, site_names=site_names, group_names=group_names, resource_name=resource_name, send_email_to_user=send_email_to_user, alternate_email=alternate_email, verbose=verbose, **kwargs, ) users.append( { "username": o.user.username, "password": o.password, "first_name": o.user.first_name, "last_name": o.user.last_name, "sites": o.site_names, "groups": o.group_names, } ) if export_to_file: fieldnames = [ "username", "password", "first_name", "last_name", "sites", "groups", ] with open(path + "new.csv", "w+") as f: writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() for user in users: writer.writerow(user)
[ "def", "import_users", "(", "path", ",", "resource_name", "=", "None", ",", "send_email_to_user", "=", "None", ",", "alternate_email", "=", "None", ",", "verbose", "=", "None", ",", "export_to_file", "=", "None", ",", "*", "*", "kwargs", ",", ")", ":", "users", "=", "[", "]", "with", "open", "(", "path", ")", "as", "f", ":", "reader", "=", "csv", ".", "DictReader", "(", "f", ")", "for", "user_data", "in", "reader", ":", "username", "=", "user_data", ".", "get", "(", "\"username\"", ")", "site_names", "=", "user_data", ".", "get", "(", "\"sites\"", ")", ".", "lower", "(", ")", ".", "split", "(", "\",\"", ")", "group_names", "=", "user_data", ".", "get", "(", "\"groups\"", ")", ".", "lower", "(", ")", ".", "split", "(", "\",\"", ")", "first_name", "=", "user_data", ".", "get", "(", "\"first_name\"", ")", "last_name", "=", "user_data", ".", "get", "(", "\"last_name\"", ")", "email", "=", "user_data", ".", "get", "(", "\"email\"", ")", "o", "=", "UserImporter", "(", "username", "=", "username", ",", "first_name", "=", "first_name", ",", "last_name", "=", "last_name", ",", "email", "=", "email", ",", "site_names", "=", "site_names", ",", "group_names", "=", "group_names", ",", "resource_name", "=", "resource_name", ",", "send_email_to_user", "=", "send_email_to_user", ",", "alternate_email", "=", "alternate_email", ",", "verbose", "=", "verbose", ",", "*", "*", "kwargs", ",", ")", "users", ".", "append", "(", "{", "\"username\"", ":", "o", ".", "user", ".", "username", ",", "\"password\"", ":", "o", ".", "password", ",", "\"first_name\"", ":", "o", ".", "user", ".", "first_name", ",", "\"last_name\"", ":", "o", ".", "user", ".", "last_name", ",", "\"sites\"", ":", "o", ".", "site_names", ",", "\"groups\"", ":", "o", ".", "group_names", ",", "}", ")", "if", "export_to_file", ":", "fieldnames", "=", "[", "\"username\"", ",", "\"password\"", ",", "\"first_name\"", ",", "\"last_name\"", ",", "\"sites\"", ",", "\"groups\"", ",", "]", "with", "open", "(", "path", "+", "\"new.csv\"", ",", "\"w+\"", ")", "as", "f", ":", "writer", "=", "csv", ".", "DictWriter", "(", "f", ",", "fieldnames", "=", "fieldnames", ")", "writer", ".", "writeheader", "(", ")", "for", "user", "in", "users", ":", "writer", ".", "writerow", "(", "user", ")" ]
Import users from a CSV file with columns: username first_name last_name email sites: a comma-separated list of sites groups: a comma-separated list of groups job_title
[ "Import", "users", "from", "a", "CSV", "file", "with", "columns", ":", "username", "first_name", "last_name", "email", "sites", ":", "a", "comma", "-", "separated", "list", "of", "sites", "groups", ":", "a", "comma", "-", "separated", "list", "of", "groups", "job_title" ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/plotting.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/plotting.py#L1913-L1992
def spec_trace(traces, cmap=None, wlen=0.4, log=False, trc='k', tralpha=0.9, size=(10, 13), fig=None, **kwargs): """ Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) \ to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white') """ import matplotlib.pyplot as plt if isinstance(traces, Stream): traces.sort(['station', 'channel']) if not fig: fig = plt.figure() for i, tr in enumerate(traces): if i == 0: ax = fig.add_subplot(len(traces), 1, i + 1) else: ax = fig.add_subplot(len(traces), 1, i + 1, sharex=ax) ax1, ax2 = _spec_trace(tr, cmap=cmap, wlen=wlen, log=log, trc=trc, tralpha=tralpha, axes=ax) ax.set_yticks([]) if i < len(traces) - 1: plt.setp(ax1.get_xticklabels(), visible=False) if isinstance(traces, list): ax.text(0.005, 0.85, "{0}::{1}".format(tr.id, tr.stats.starttime), bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) elif isinstance(traces, Stream): ax.text(0.005, 0.85, tr.id, bbox=dict(facecolor='white', alpha=0.8), transform=ax2.transAxes) ax.text(0.005, 0.02, str(np.max(tr.data).round(1)), bbox=dict(facecolor='white', alpha=0.95), transform=ax2.transAxes) ax.set_xlabel('Time (s)') fig.subplots_adjust(hspace=0) fig.set_size_inches(w=size[0], h=size[1], forward=True) fig.text(0.04, 0.5, 'Frequency (Hz)', va='center', rotation='vertical') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
[ "def", "spec_trace", "(", "traces", ",", "cmap", "=", "None", ",", "wlen", "=", "0.4", ",", "log", "=", "False", ",", "trc", "=", "'k'", ",", "tralpha", "=", "0.9", ",", "size", "=", "(", "10", ",", "13", ")", ",", "fig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "if", "isinstance", "(", "traces", ",", "Stream", ")", ":", "traces", ".", "sort", "(", "[", "'station'", ",", "'channel'", "]", ")", "if", "not", "fig", ":", "fig", "=", "plt", ".", "figure", "(", ")", "for", "i", ",", "tr", "in", "enumerate", "(", "traces", ")", ":", "if", "i", "==", "0", ":", "ax", "=", "fig", ".", "add_subplot", "(", "len", "(", "traces", ")", ",", "1", ",", "i", "+", "1", ")", "else", ":", "ax", "=", "fig", ".", "add_subplot", "(", "len", "(", "traces", ")", ",", "1", ",", "i", "+", "1", ",", "sharex", "=", "ax", ")", "ax1", ",", "ax2", "=", "_spec_trace", "(", "tr", ",", "cmap", "=", "cmap", ",", "wlen", "=", "wlen", ",", "log", "=", "log", ",", "trc", "=", "trc", ",", "tralpha", "=", "tralpha", ",", "axes", "=", "ax", ")", "ax", ".", "set_yticks", "(", "[", "]", ")", "if", "i", "<", "len", "(", "traces", ")", "-", "1", ":", "plt", ".", "setp", "(", "ax1", ".", "get_xticklabels", "(", ")", ",", "visible", "=", "False", ")", "if", "isinstance", "(", "traces", ",", "list", ")", ":", "ax", ".", "text", "(", "0.005", ",", "0.85", ",", "\"{0}::{1}\"", ".", "format", "(", "tr", ".", "id", ",", "tr", ".", "stats", ".", "starttime", ")", ",", "bbox", "=", "dict", "(", "facecolor", "=", "'white'", ",", "alpha", "=", "0.8", ")", ",", "transform", "=", "ax2", ".", "transAxes", ")", "elif", "isinstance", "(", "traces", ",", "Stream", ")", ":", "ax", ".", "text", "(", "0.005", ",", "0.85", ",", "tr", ".", "id", ",", "bbox", "=", "dict", "(", "facecolor", "=", "'white'", ",", "alpha", "=", "0.8", ")", ",", "transform", "=", "ax2", ".", "transAxes", ")", "ax", ".", "text", "(", "0.005", ",", "0.02", ",", "str", "(", "np", ".", "max", "(", "tr", ".", "data", ")", ".", "round", "(", "1", ")", ")", ",", "bbox", "=", "dict", "(", "facecolor", "=", "'white'", ",", "alpha", "=", "0.95", ")", ",", "transform", "=", "ax2", ".", "transAxes", ")", "ax", ".", "set_xlabel", "(", "'Time (s)'", ")", "fig", ".", "subplots_adjust", "(", "hspace", "=", "0", ")", "fig", ".", "set_size_inches", "(", "w", "=", "size", "[", "0", "]", ",", "h", "=", "size", "[", "1", "]", ",", "forward", "=", "True", ")", "fig", ".", "text", "(", "0.04", ",", "0.5", ",", "'Frequency (Hz)'", ",", "va", "=", "'center'", ",", "rotation", "=", "'vertical'", ")", "fig", "=", "_finalise_figure", "(", "fig", "=", "fig", ",", "*", "*", "kwargs", ")", "# pragma: no cover", "return", "fig" ]
Plots seismic data with spectrogram behind. Takes a stream or list of traces and plots the trace with the spectra beneath it. :type traces: list :param traces: Traces to be plotted, can be a single :class:`obspy.core.stream.Stream`, or a list of :class:`obspy.core.trace.Trace`. :type cmap: str :param cmap: `Matplotlib colormap <http://matplotlib.org/examples/color/colormaps_reference.html>`_. :type wlen: float :param wlen: Window length for fft in seconds :type log: bool :param log: Use a log frequency scale :type trc: str :param trc: Color for the trace. :type tralpha: float :param tralpha: Opacity level for the seismogram, from transparent (0.0) \ to opaque (1.0). :type size: tuple :param size: Plot size, tuple of floats, inches :type fig: matplotlib.figure.Figure :param fig: Figure to plot onto, defaults to self generating. :returns: :class:`matplotlib.figure.Figure` .. rubric:: Example >>> from obspy import read >>> from eqcorrscan.utils.plotting import spec_trace >>> st = read() >>> spec_trace(st, trc='white') # doctest: +SKIP .. plot:: from obspy import read from eqcorrscan.utils.plotting import spec_trace st = read() spec_trace(st, trc='white')
[ "Plots", "seismic", "data", "with", "spectrogram", "behind", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/topology.py#L312-L316
def request_check_all(self, wait_time=5): """Wake all monitors, wait for at least one to check its server.""" with self._lock: self._request_check_all() self._condition.wait(wait_time)
[ "def", "request_check_all", "(", "self", ",", "wait_time", "=", "5", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "_request_check_all", "(", ")", "self", ".", "_condition", ".", "wait", "(", "wait_time", ")" ]
Wake all monitors, wait for at least one to check its server.
[ "Wake", "all", "monitors", "wait", "for", "at", "least", "one", "to", "check", "its", "server", "." ]
python
train
zetaops/zengine
zengine/engine.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/engine.py#L657-L690
def check_for_lane_permission(self): """ One or more permissions can be associated with a lane of a workflow. In a similar way, a lane can be restricted with relation to other lanes of the workflow. This method called on lane changes and checks user has required permissions and relations. Raises: HTTPForbidden: if the current user hasn't got the required permissions and proper relations """ # TODO: Cache lane_data in app memory if self.current.lane_permission: log.debug("HAS LANE PERM: %s" % self.current.lane_permission) perm = self.current.lane_permission if not self.current.has_permission(perm): raise HTTPError(403, "You don't have required lane permission: %s" % perm) if self.current.lane_relations: context = self.get_pool_context() log.debug("HAS LANE RELS: %s" % self.current.lane_relations) try: cond_result = eval(self.current.lane_relations, context) except: log.exception("CONDITION EVAL ERROR : %s || %s" % ( self.current.lane_relations, context)) raise if not cond_result: log.debug("LANE RELATION ERR: %s %s" % (self.current.lane_relations, context)) raise HTTPError(403, "You aren't qualified for this lane: %s" % self.current.lane_relations)
[ "def", "check_for_lane_permission", "(", "self", ")", ":", "# TODO: Cache lane_data in app memory", "if", "self", ".", "current", ".", "lane_permission", ":", "log", ".", "debug", "(", "\"HAS LANE PERM: %s\"", "%", "self", ".", "current", ".", "lane_permission", ")", "perm", "=", "self", ".", "current", ".", "lane_permission", "if", "not", "self", ".", "current", ".", "has_permission", "(", "perm", ")", ":", "raise", "HTTPError", "(", "403", ",", "\"You don't have required lane permission: %s\"", "%", "perm", ")", "if", "self", ".", "current", ".", "lane_relations", ":", "context", "=", "self", ".", "get_pool_context", "(", ")", "log", ".", "debug", "(", "\"HAS LANE RELS: %s\"", "%", "self", ".", "current", ".", "lane_relations", ")", "try", ":", "cond_result", "=", "eval", "(", "self", ".", "current", ".", "lane_relations", ",", "context", ")", "except", ":", "log", ".", "exception", "(", "\"CONDITION EVAL ERROR : %s || %s\"", "%", "(", "self", ".", "current", ".", "lane_relations", ",", "context", ")", ")", "raise", "if", "not", "cond_result", ":", "log", ".", "debug", "(", "\"LANE RELATION ERR: %s %s\"", "%", "(", "self", ".", "current", ".", "lane_relations", ",", "context", ")", ")", "raise", "HTTPError", "(", "403", ",", "\"You aren't qualified for this lane: %s\"", "%", "self", ".", "current", ".", "lane_relations", ")" ]
One or more permissions can be associated with a lane of a workflow. In a similar way, a lane can be restricted with relation to other lanes of the workflow. This method called on lane changes and checks user has required permissions and relations. Raises: HTTPForbidden: if the current user hasn't got the required permissions and proper relations
[ "One", "or", "more", "permissions", "can", "be", "associated", "with", "a", "lane", "of", "a", "workflow", ".", "In", "a", "similar", "way", "a", "lane", "can", "be", "restricted", "with", "relation", "to", "other", "lanes", "of", "the", "workflow", "." ]
python
train
maweigert/gputools
gputools/convolve/generic_separable_filters.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L216-L248
def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)): """ gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ truncate = 4. radius = tuple(int(truncate*s +0.5) for s in sigma) size = tuple(2*r+1 for r in radius) s = sigma[0] if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
[ "def", "_gauss_filter", "(", "data", ",", "sigma", "=", "4", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "(", "1", ",", "1", ",", "1", ")", ")", ":", "truncate", "=", "4.", "radius", "=", "tuple", "(", "int", "(", "truncate", "*", "s", "+", "0.5", ")", "for", "s", "in", "sigma", ")", "size", "=", "tuple", "(", "2", "*", "r", "+", "1", "for", "r", "in", "radius", ")", "s", "=", "sigma", "[", "0", "]", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_2d", "(", "FUNC", "=", "\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"", "%", "(", "size", "[", "0", "]", "//", "2", ",", "size", "[", "0", "]", "//", "2", ",", "s", ",", "s", ")", ",", "DEFAULT", "=", "\"0.f\"", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_3d", "(", "FUNC", "=", "\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"", "%", "(", "size", "[", "0", "]", "//", "2", ",", "size", "[", "0", "]", "//", "2", ",", "s", ",", "s", ")", ",", "DEFAULT", "=", "\"0.f\"", ")", ")", "else", ":", "raise", "ValueError", "(", "\"currently only 2 or 3 dimensional data is supported\"", ")", "return", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")" ]
gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)
[ "gaussian", "filter", "of", "given", "size" ]
python
train
wummel/linkchecker
linkcheck/logger/text.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/logger/text.py#L205-L209
def write_warning (self, url_data): """Write url_data.warning.""" self.write(self.part("warning") + self.spaces("warning")) warning_msgs = [u"[%s] %s" % x for x in url_data.warnings] self.writeln(self.wrap(warning_msgs, 65), color=self.colorwarning)
[ "def", "write_warning", "(", "self", ",", "url_data", ")", ":", "self", ".", "write", "(", "self", ".", "part", "(", "\"warning\"", ")", "+", "self", ".", "spaces", "(", "\"warning\"", ")", ")", "warning_msgs", "=", "[", "u\"[%s] %s\"", "%", "x", "for", "x", "in", "url_data", ".", "warnings", "]", "self", ".", "writeln", "(", "self", ".", "wrap", "(", "warning_msgs", ",", "65", ")", ",", "color", "=", "self", ".", "colorwarning", ")" ]
Write url_data.warning.
[ "Write", "url_data", ".", "warning", "." ]
python
train
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L1773-L1802
def _createBitpattern(functioncode, value): """Create the bit pattern that is used for writing single bits. This is basically a storage of numerical constants. Args: * functioncode (int): can be 5 or 15 * value (int): can be 0 or 1 Returns: The bit pattern (string). Raises: TypeError, ValueError """ _checkFunctioncode(functioncode, [5, 15]) _checkInt(value, minvalue=0, maxvalue=1, description='inputvalue') if functioncode == 5: if value == 0: return '\x00\x00' else: return '\xff\x00' elif functioncode == 15: if value == 0: return '\x00' else: return '\x01'
[ "def", "_createBitpattern", "(", "functioncode", ",", "value", ")", ":", "_checkFunctioncode", "(", "functioncode", ",", "[", "5", ",", "15", "]", ")", "_checkInt", "(", "value", ",", "minvalue", "=", "0", ",", "maxvalue", "=", "1", ",", "description", "=", "'inputvalue'", ")", "if", "functioncode", "==", "5", ":", "if", "value", "==", "0", ":", "return", "'\\x00\\x00'", "else", ":", "return", "'\\xff\\x00'", "elif", "functioncode", "==", "15", ":", "if", "value", "==", "0", ":", "return", "'\\x00'", "else", ":", "return", "'\\x01'" ]
Create the bit pattern that is used for writing single bits. This is basically a storage of numerical constants. Args: * functioncode (int): can be 5 or 15 * value (int): can be 0 or 1 Returns: The bit pattern (string). Raises: TypeError, ValueError
[ "Create", "the", "bit", "pattern", "that", "is", "used", "for", "writing", "single", "bits", "." ]
python
train
orbingol/NURBS-Python
geomdl/operations.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1607-L1638
def scale(obj, multiplier, **kwargs): """ Scales curves, surfaces or volumes by the input multiplier. Keyword Arguments: * ``inplace``: if False, operation applied to a copy of the object. *Default: False* :param obj: input geometry :type obj: abstract.SplineGeometry, multi.AbstractGeometry :param multiplier: scaling multiplier :type multiplier: float :return: scaled geometry object """ # Input validity checks if not isinstance(multiplier, (int, float)): raise GeomdlException("The multiplier must be a float or an integer") # Keyword arguments inplace = kwargs.get('inplace', False) if not inplace: geom = copy.deepcopy(obj) else: geom = obj # Scale control points for g in geom: new_ctrlpts = [[] for _ in range(g.ctrlpts_size)] for idx, pts in enumerate(g.ctrlpts): new_ctrlpts[idx] = [p * float(multiplier) for p in pts] g.ctrlpts = new_ctrlpts return geom
[ "def", "scale", "(", "obj", ",", "multiplier", ",", "*", "*", "kwargs", ")", ":", "# Input validity checks", "if", "not", "isinstance", "(", "multiplier", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "GeomdlException", "(", "\"The multiplier must be a float or an integer\"", ")", "# Keyword arguments", "inplace", "=", "kwargs", ".", "get", "(", "'inplace'", ",", "False", ")", "if", "not", "inplace", ":", "geom", "=", "copy", ".", "deepcopy", "(", "obj", ")", "else", ":", "geom", "=", "obj", "# Scale control points", "for", "g", "in", "geom", ":", "new_ctrlpts", "=", "[", "[", "]", "for", "_", "in", "range", "(", "g", ".", "ctrlpts_size", ")", "]", "for", "idx", ",", "pts", "in", "enumerate", "(", "g", ".", "ctrlpts", ")", ":", "new_ctrlpts", "[", "idx", "]", "=", "[", "p", "*", "float", "(", "multiplier", ")", "for", "p", "in", "pts", "]", "g", ".", "ctrlpts", "=", "new_ctrlpts", "return", "geom" ]
Scales curves, surfaces or volumes by the input multiplier. Keyword Arguments: * ``inplace``: if False, operation applied to a copy of the object. *Default: False* :param obj: input geometry :type obj: abstract.SplineGeometry, multi.AbstractGeometry :param multiplier: scaling multiplier :type multiplier: float :return: scaled geometry object
[ "Scales", "curves", "surfaces", "or", "volumes", "by", "the", "input", "multiplier", "." ]
python
train
secdev/scapy
scapy/utils6.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils6.py#L525-L534
def in6_iseui64(x): """ Return True if provided address has an interface identifier part created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*). Otherwise, False is returned. Address must be passed in printable format. """ eui64 = inet_pton(socket.AF_INET6, '::ff:fe00:0') x = in6_and(inet_pton(socket.AF_INET6, x), eui64) return x == eui64
[ "def", "in6_iseui64", "(", "x", ")", ":", "eui64", "=", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "'::ff:fe00:0'", ")", "x", "=", "in6_and", "(", "inet_pton", "(", "socket", ".", "AF_INET6", ",", "x", ")", ",", "eui64", ")", "return", "x", "==", "eui64" ]
Return True if provided address has an interface identifier part created in modified EUI-64 format (meaning it matches *::*:*ff:fe*:*). Otherwise, False is returned. Address must be passed in printable format.
[ "Return", "True", "if", "provided", "address", "has", "an", "interface", "identifier", "part", "created", "in", "modified", "EUI", "-", "64", "format", "(", "meaning", "it", "matches", "*", "::", "*", ":", "*", "ff", ":", "fe", "*", ":", "*", ")", ".", "Otherwise", "False", "is", "returned", ".", "Address", "must", "be", "passed", "in", "printable", "format", "." ]
python
train
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L1021-L1068
def conditional_loss_ratio(loss_ratios, poes, probability): """ Return the loss ratio corresponding to the given PoE (Probability of Exceendance). We can have four cases: 1. If `probability` is in `poes` it takes the bigger corresponding loss_ratios. 2. If it is in `(poe1, poe2)` where both `poe1` and `poe2` are in `poes`, then we perform a linear interpolation on the corresponding losses 3. if the given probability is smaller than the lowest PoE defined, it returns the max loss ratio . 4. if the given probability is greater than the highest PoE defined it returns zero. :param loss_ratios: an iterable over non-decreasing loss ratio values (float) :param poes: an iterable over non-increasing probability of exceedance values (float) :param float probability: the probability value used to interpolate the loss curve """ assert len(loss_ratios) >= 3, loss_ratios rpoes = poes[::-1] if probability > poes[0]: # max poes return 0.0 elif probability < poes[-1]: # min PoE return loss_ratios[-1] if probability in poes: return max([loss for i, loss in enumerate(loss_ratios) if probability == poes[i]]) else: interval_index = bisect.bisect_right(rpoes, probability) if interval_index == len(poes): # poes are all nan return float('nan') elif interval_index == 1: # boundary case x1, x2 = poes[-2:] y1, y2 = loss_ratios[-2:] else: x1, x2 = poes[-interval_index-1:-interval_index + 1] y1, y2 = loss_ratios[-interval_index-1:-interval_index + 1] return (y2 - y1) / (x2 - x1) * (probability - x1) + y1
[ "def", "conditional_loss_ratio", "(", "loss_ratios", ",", "poes", ",", "probability", ")", ":", "assert", "len", "(", "loss_ratios", ")", ">=", "3", ",", "loss_ratios", "rpoes", "=", "poes", "[", ":", ":", "-", "1", "]", "if", "probability", ">", "poes", "[", "0", "]", ":", "# max poes", "return", "0.0", "elif", "probability", "<", "poes", "[", "-", "1", "]", ":", "# min PoE", "return", "loss_ratios", "[", "-", "1", "]", "if", "probability", "in", "poes", ":", "return", "max", "(", "[", "loss", "for", "i", ",", "loss", "in", "enumerate", "(", "loss_ratios", ")", "if", "probability", "==", "poes", "[", "i", "]", "]", ")", "else", ":", "interval_index", "=", "bisect", ".", "bisect_right", "(", "rpoes", ",", "probability", ")", "if", "interval_index", "==", "len", "(", "poes", ")", ":", "# poes are all nan", "return", "float", "(", "'nan'", ")", "elif", "interval_index", "==", "1", ":", "# boundary case", "x1", ",", "x2", "=", "poes", "[", "-", "2", ":", "]", "y1", ",", "y2", "=", "loss_ratios", "[", "-", "2", ":", "]", "else", ":", "x1", ",", "x2", "=", "poes", "[", "-", "interval_index", "-", "1", ":", "-", "interval_index", "+", "1", "]", "y1", ",", "y2", "=", "loss_ratios", "[", "-", "interval_index", "-", "1", ":", "-", "interval_index", "+", "1", "]", "return", "(", "y2", "-", "y1", ")", "/", "(", "x2", "-", "x1", ")", "*", "(", "probability", "-", "x1", ")", "+", "y1" ]
Return the loss ratio corresponding to the given PoE (Probability of Exceendance). We can have four cases: 1. If `probability` is in `poes` it takes the bigger corresponding loss_ratios. 2. If it is in `(poe1, poe2)` where both `poe1` and `poe2` are in `poes`, then we perform a linear interpolation on the corresponding losses 3. if the given probability is smaller than the lowest PoE defined, it returns the max loss ratio . 4. if the given probability is greater than the highest PoE defined it returns zero. :param loss_ratios: an iterable over non-decreasing loss ratio values (float) :param poes: an iterable over non-increasing probability of exceedance values (float) :param float probability: the probability value used to interpolate the loss curve
[ "Return", "the", "loss", "ratio", "corresponding", "to", "the", "given", "PoE", "(", "Probability", "of", "Exceendance", ")", ".", "We", "can", "have", "four", "cases", ":" ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_export.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_export.py#L213-L228
def export_process_element(definitions, process_id, process_attributes_dictionary): """ Creates process element for exported BPMN XML file. :param process_id: string object. ID of exported process element, :param definitions: an XML element ('definitions'), root element of BPMN 2.0 document :param process_attributes_dictionary: dictionary that holds attribute values of 'process' element :return: process XML element """ process = eTree.SubElement(definitions, consts.Consts.process) process.set(consts.Consts.id, process_id) process.set(consts.Consts.is_closed, process_attributes_dictionary[consts.Consts.is_closed]) process.set(consts.Consts.is_executable, process_attributes_dictionary[consts.Consts.is_executable]) process.set(consts.Consts.process_type, process_attributes_dictionary[consts.Consts.process_type]) return process
[ "def", "export_process_element", "(", "definitions", ",", "process_id", ",", "process_attributes_dictionary", ")", ":", "process", "=", "eTree", ".", "SubElement", "(", "definitions", ",", "consts", ".", "Consts", ".", "process", ")", "process", ".", "set", "(", "consts", ".", "Consts", ".", "id", ",", "process_id", ")", "process", ".", "set", "(", "consts", ".", "Consts", ".", "is_closed", ",", "process_attributes_dictionary", "[", "consts", ".", "Consts", ".", "is_closed", "]", ")", "process", ".", "set", "(", "consts", ".", "Consts", ".", "is_executable", ",", "process_attributes_dictionary", "[", "consts", ".", "Consts", ".", "is_executable", "]", ")", "process", ".", "set", "(", "consts", ".", "Consts", ".", "process_type", ",", "process_attributes_dictionary", "[", "consts", ".", "Consts", ".", "process_type", "]", ")", "return", "process" ]
Creates process element for exported BPMN XML file. :param process_id: string object. ID of exported process element, :param definitions: an XML element ('definitions'), root element of BPMN 2.0 document :param process_attributes_dictionary: dictionary that holds attribute values of 'process' element :return: process XML element
[ "Creates", "process", "element", "for", "exported", "BPMN", "XML", "file", "." ]
python
train
seequent/properties
properties/base/union.py
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/base/union.py#L125-L134
def default(self): """Default value of the property""" prop_def = getattr(self, '_default', utils.undefined) for prop in self.props: if prop.default is utils.undefined: continue if prop_def is utils.undefined: prop_def = prop.default break return prop_def
[ "def", "default", "(", "self", ")", ":", "prop_def", "=", "getattr", "(", "self", ",", "'_default'", ",", "utils", ".", "undefined", ")", "for", "prop", "in", "self", ".", "props", ":", "if", "prop", ".", "default", "is", "utils", ".", "undefined", ":", "continue", "if", "prop_def", "is", "utils", ".", "undefined", ":", "prop_def", "=", "prop", ".", "default", "break", "return", "prop_def" ]
Default value of the property
[ "Default", "value", "of", "the", "property" ]
python
train
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L2692-L2724
def _set_bit_size(self, size, step=1, auto_align=False): """ Sets the *size* of the `Decimal` field. :param int size: is the *size* of the `Decimal` field in bits, can be between ``1`` and ``64``. :param int step: is the minimal required step *size* for the `Decimal` field in bits. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field. """ # Field size bit_size = int(size) # Invalid field size if bit_size % step != 0 or not (1 <= bit_size <= 64): raise FieldSizeError(self, self.index, bit_size) # Field group size group_size, offset = divmod(bit_size, 8) # Auto alignment if auto_align: if offset is not 0: self._align_to_byte_size = group_size + 1 else: self._align_to_byte_size = group_size # Invalid field alignment elif group_size > self.alignment.byte_size: raise FieldAlignmentError(self, self.index, Alignment(group_size, self.alignment.bit_offset)) # Set field size self._bit_size = bit_size
[ "def", "_set_bit_size", "(", "self", ",", "size", ",", "step", "=", "1", ",", "auto_align", "=", "False", ")", ":", "# Field size", "bit_size", "=", "int", "(", "size", ")", "# Invalid field size", "if", "bit_size", "%", "step", "!=", "0", "or", "not", "(", "1", "<=", "bit_size", "<=", "64", ")", ":", "raise", "FieldSizeError", "(", "self", ",", "self", ".", "index", ",", "bit_size", ")", "# Field group size", "group_size", ",", "offset", "=", "divmod", "(", "bit_size", ",", "8", ")", "# Auto alignment", "if", "auto_align", ":", "if", "offset", "is", "not", "0", ":", "self", ".", "_align_to_byte_size", "=", "group_size", "+", "1", "else", ":", "self", ".", "_align_to_byte_size", "=", "group_size", "# Invalid field alignment", "elif", "group_size", ">", "self", ".", "alignment", ".", "byte_size", ":", "raise", "FieldAlignmentError", "(", "self", ",", "self", ".", "index", ",", "Alignment", "(", "group_size", ",", "self", ".", "alignment", ".", "bit_offset", ")", ")", "# Set field size", "self", ".", "_bit_size", "=", "bit_size" ]
Sets the *size* of the `Decimal` field. :param int size: is the *size* of the `Decimal` field in bits, can be between ``1`` and ``64``. :param int step: is the minimal required step *size* for the `Decimal` field in bits. :param bool auto_align: if ``True`` the `Decimal` field aligns itself to the next matching byte size according to the *size* of the `Decimal` field.
[ "Sets", "the", "*", "size", "*", "of", "the", "Decimal", "field", "." ]
python
train
ttinoco/OPTALG
optalg/lin_solver/_mumps/__init__.py
https://github.com/ttinoco/OPTALG/blob/d4f141292f281eea4faa71473258139e7f433001/optalg/lin_solver/_mumps/__init__.py#L156-L163
def set_distributed_assembled_values(self, a_loc): """Set the distributed assembled matrix values. Distributed assembled matrices require setting icntl(18) != 0. """ assert a_loc.size == self._refs['irn_loc'].size self._refs.update(a_loc=a_loc) self.id.a_loc = self.cast_array(a_loc)
[ "def", "set_distributed_assembled_values", "(", "self", ",", "a_loc", ")", ":", "assert", "a_loc", ".", "size", "==", "self", ".", "_refs", "[", "'irn_loc'", "]", ".", "size", "self", ".", "_refs", ".", "update", "(", "a_loc", "=", "a_loc", ")", "self", ".", "id", ".", "a_loc", "=", "self", ".", "cast_array", "(", "a_loc", ")" ]
Set the distributed assembled matrix values. Distributed assembled matrices require setting icntl(18) != 0.
[ "Set", "the", "distributed", "assembled", "matrix", "values", "." ]
python
train
graphql-python/graphql-core-next
graphql/language/parser.py
https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/language/parser.py#L547-L554
def parse_type_system_definition(lexer: Lexer) -> TypeSystemDefinitionNode: """TypeSystemDefinition""" # Many definitions begin with a description and require a lookahead. keyword_token = lexer.lookahead() if peek_description(lexer) else lexer.token func = _parse_type_system_definition_functions.get(cast(str, keyword_token.value)) if func: return func(lexer) raise unexpected(lexer, keyword_token)
[ "def", "parse_type_system_definition", "(", "lexer", ":", "Lexer", ")", "->", "TypeSystemDefinitionNode", ":", "# Many definitions begin with a description and require a lookahead.", "keyword_token", "=", "lexer", ".", "lookahead", "(", ")", "if", "peek_description", "(", "lexer", ")", "else", "lexer", ".", "token", "func", "=", "_parse_type_system_definition_functions", ".", "get", "(", "cast", "(", "str", ",", "keyword_token", ".", "value", ")", ")", "if", "func", ":", "return", "func", "(", "lexer", ")", "raise", "unexpected", "(", "lexer", ",", "keyword_token", ")" ]
TypeSystemDefinition
[ "TypeSystemDefinition" ]
python
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L120-L137
def pix2sky(self, pixel): """ Convert pixel coordinates into sky coordinates. Parameters ---------- pixel : (float, float) The (x,y) pixel coordinates Returns ------- sky : (float, float) The (ra,dec) sky coordinates in degrees """ x, y = pixel # wcs and pyfits have oposite ideas of x/y return self.wcs.wcs_pix2world([[y, x]], 1)[0]
[ "def", "pix2sky", "(", "self", ",", "pixel", ")", ":", "x", ",", "y", "=", "pixel", "# wcs and pyfits have oposite ideas of x/y", "return", "self", ".", "wcs", ".", "wcs_pix2world", "(", "[", "[", "y", ",", "x", "]", "]", ",", "1", ")", "[", "0", "]" ]
Convert pixel coordinates into sky coordinates. Parameters ---------- pixel : (float, float) The (x,y) pixel coordinates Returns ------- sky : (float, float) The (ra,dec) sky coordinates in degrees
[ "Convert", "pixel", "coordinates", "into", "sky", "coordinates", "." ]
python
train
pybel/pybel
src/pybel/manager/cache_manager.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/manager/cache_manager.py#L399-L401
def has_name_version(self, name: str, version: str) -> bool: """Check if there exists a network with the name/version combination in the database.""" return self.session.query(exists().where(and_(Network.name == name, Network.version == version))).scalar()
[ "def", "has_name_version", "(", "self", ",", "name", ":", "str", ",", "version", ":", "str", ")", "->", "bool", ":", "return", "self", ".", "session", ".", "query", "(", "exists", "(", ")", ".", "where", "(", "and_", "(", "Network", ".", "name", "==", "name", ",", "Network", ".", "version", "==", "version", ")", ")", ")", ".", "scalar", "(", ")" ]
Check if there exists a network with the name/version combination in the database.
[ "Check", "if", "there", "exists", "a", "network", "with", "the", "name", "/", "version", "combination", "in", "the", "database", "." ]
python
train
openstack/pyghmi
pyghmi/ipmi/command.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L720-L738
def get_sensor_reading(self, sensorname): """Get a sensor reading by name Returns a single decoded sensor reading per the name passed in :param sensorname: Name of the desired sensor :returns: sdr.SensorReading object """ self.init_sdr() for sensor in self._sdr.get_sensor_numbers(): if self._sdr.sensors[sensor].name == sensorname: rsp = self.raw_command(command=0x2d, netfn=4, data=(sensor,)) if 'error' in rsp: raise exc.IpmiException(rsp['error'], rsp['code']) return self._sdr.sensors[sensor].decode_sensor_reading( rsp['data']) self.oem_init() return self._oem.get_sensor_reading(sensorname)
[ "def", "get_sensor_reading", "(", "self", ",", "sensorname", ")", ":", "self", ".", "init_sdr", "(", ")", "for", "sensor", "in", "self", ".", "_sdr", ".", "get_sensor_numbers", "(", ")", ":", "if", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "name", "==", "sensorname", ":", "rsp", "=", "self", ".", "raw_command", "(", "command", "=", "0x2d", ",", "netfn", "=", "4", ",", "data", "=", "(", "sensor", ",", ")", ")", "if", "'error'", "in", "rsp", ":", "raise", "exc", ".", "IpmiException", "(", "rsp", "[", "'error'", "]", ",", "rsp", "[", "'code'", "]", ")", "return", "self", ".", "_sdr", ".", "sensors", "[", "sensor", "]", ".", "decode_sensor_reading", "(", "rsp", "[", "'data'", "]", ")", "self", ".", "oem_init", "(", ")", "return", "self", ".", "_oem", ".", "get_sensor_reading", "(", "sensorname", ")" ]
Get a sensor reading by name Returns a single decoded sensor reading per the name passed in :param sensorname: Name of the desired sensor :returns: sdr.SensorReading object
[ "Get", "a", "sensor", "reading", "by", "name" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/ipv6/ipv6route/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/ipv6/ipv6route/__init__.py#L92-L113
def _set_route(self, v, load=False): """ Setter method for route, mapped from YANG variable /rbridge_id/ipv6/ipv6route/route (list) If this variable is read-only (config: false) in the source YANG file, then _set_route is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("dest",route.route, yang_name="route", rest_name="route", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest', extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}), is_container='list', yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-forward', defining_module='brocade-ip-forward', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """route must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("dest",route.route, yang_name="route", rest_name="route", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest', extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}), is_container='list', yang_name="route", rest_name="route", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-forward', defining_module='brocade-ip-forward', yang_type='list', is_config=True)""", }) self.__route = t if hasattr(self, '_set'): self._set()
[ "def", "_set_route", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"dest\"", ",", "route", ".", "route", ",", "yang_name", "=", "\"route\"", ",", "rest_name", "=", "\"route\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'dest'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure ipv6 static route'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'hidden'", ":", "u'full'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"route\"", ",", "rest_name", "=", "\"route\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure ipv6 static route'", ",", "u'cli-suppress-list-no'", ":", "None", ",", "u'hidden'", ":", "u'full'", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'cli-full-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ip-forward'", ",", "defining_module", "=", "'brocade-ip-forward'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"route must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"dest\",route.route, yang_name=\"route\", rest_name=\"route\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='dest', extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}), is_container='list', yang_name=\"route\", rest_name=\"route\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure ipv6 static route', u'cli-suppress-list-no': None, u'hidden': u'full', u'cli-suppress-mode': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-forward', defining_module='brocade-ip-forward', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__route", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for route, mapped from YANG variable /rbridge_id/ipv6/ipv6route/route (list) If this variable is read-only (config: false) in the source YANG file, then _set_route is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route() directly.
[ "Setter", "method", "for", "route", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "ipv6", "/", "ipv6route", "/", "route", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_route", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_route", "()", "directly", "." ]
python
train
joshspeagle/dynesty
dynesty/nestedsamplers.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/nestedsamplers.py#L813-L839
def propose_unif(self): """Propose a new live point by sampling *uniformly* within the union of N-spheres defined by our live points.""" # Initialize a K-D Tree to assist nearest neighbor searches. if self.use_kdtree: kdtree = spatial.KDTree(self.live_u) else: kdtree = None while True: # Sample a point `u` from the union of N-spheres along with the # number of overlapping spheres `q` at point `u`. u, q = self.radfriends.sample(self.live_u, rstate=self.rstate, return_q=True, kdtree=kdtree) # Check if our sample is within the unit cube. if unitcheck(u, self.nonperiodic): # Accept the point with probability 1/q to account for # overlapping balls. if q == 1 or self.rstate.rand() < 1.0 / q: break # if successful, we're done! # Define the axes of the N-sphere. ax = np.identity(self.npdim) * self.radfriends.radius return u, ax
[ "def", "propose_unif", "(", "self", ")", ":", "# Initialize a K-D Tree to assist nearest neighbor searches.", "if", "self", ".", "use_kdtree", ":", "kdtree", "=", "spatial", ".", "KDTree", "(", "self", ".", "live_u", ")", "else", ":", "kdtree", "=", "None", "while", "True", ":", "# Sample a point `u` from the union of N-spheres along with the", "# number of overlapping spheres `q` at point `u`.", "u", ",", "q", "=", "self", ".", "radfriends", ".", "sample", "(", "self", ".", "live_u", ",", "rstate", "=", "self", ".", "rstate", ",", "return_q", "=", "True", ",", "kdtree", "=", "kdtree", ")", "# Check if our sample is within the unit cube.", "if", "unitcheck", "(", "u", ",", "self", ".", "nonperiodic", ")", ":", "# Accept the point with probability 1/q to account for", "# overlapping balls.", "if", "q", "==", "1", "or", "self", ".", "rstate", ".", "rand", "(", ")", "<", "1.0", "/", "q", ":", "break", "# if successful, we're done!", "# Define the axes of the N-sphere.", "ax", "=", "np", ".", "identity", "(", "self", ".", "npdim", ")", "*", "self", ".", "radfriends", ".", "radius", "return", "u", ",", "ax" ]
Propose a new live point by sampling *uniformly* within the union of N-spheres defined by our live points.
[ "Propose", "a", "new", "live", "point", "by", "sampling", "*", "uniformly", "*", "within", "the", "union", "of", "N", "-", "spheres", "defined", "by", "our", "live", "points", "." ]
python
train
ahmedaljazzar/django-mako
djangomako/backends.py
https://github.com/ahmedaljazzar/django-mako/blob/3a4099e8ae679f431eeb2c35024b2e6028f8a096/djangomako/backends.py#L117-L131
def get_template(self, template_name): """ Trying to get a compiled template given a template name :param template_name: The template name. :raises: - TemplateDoesNotExist if no such template exists. - TemplateSyntaxError if we couldn't compile the template using Mako syntax. :return: Compiled Template. """ try: return self.template_class(self.engine.get_template(template_name)) except mako_exceptions.TemplateLookupException as exc: raise TemplateDoesNotExist(exc.args) except mako_exceptions.CompileException as exc: raise TemplateSyntaxError(exc.args)
[ "def", "get_template", "(", "self", ",", "template_name", ")", ":", "try", ":", "return", "self", ".", "template_class", "(", "self", ".", "engine", ".", "get_template", "(", "template_name", ")", ")", "except", "mako_exceptions", ".", "TemplateLookupException", "as", "exc", ":", "raise", "TemplateDoesNotExist", "(", "exc", ".", "args", ")", "except", "mako_exceptions", ".", "CompileException", "as", "exc", ":", "raise", "TemplateSyntaxError", "(", "exc", ".", "args", ")" ]
Trying to get a compiled template given a template name :param template_name: The template name. :raises: - TemplateDoesNotExist if no such template exists. - TemplateSyntaxError if we couldn't compile the template using Mako syntax. :return: Compiled Template.
[ "Trying", "to", "get", "a", "compiled", "template", "given", "a", "template", "name", ":", "param", "template_name", ":", "The", "template", "name", ".", ":", "raises", ":", "-", "TemplateDoesNotExist", "if", "no", "such", "template", "exists", ".", "-", "TemplateSyntaxError", "if", "we", "couldn", "t", "compile", "the", "template", "using", "Mako", "syntax", ".", ":", "return", ":", "Compiled", "Template", "." ]
python
train
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L478-L536
def container_list(list_names=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Lists containers list_names : False Only return a list of names when True remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: Full dict with all available informations: .. code-block:: bash salt '*' lxd.container_list For a list of names: .. code-block:: bash salt '*' lxd.container_list true See also `container-attributes`_. .. _container-attributes: https://github.com/lxc/pylxd/blob/master/doc/source/containers.rst#container-attributes ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) containers = client.containers.all() if list_names: return [c.name for c in containers] return map(_pylxd_model_to_dict, containers)
[ "def", "container_list", "(", "list_names", "=", "False", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "client", "=", "pylxd_client_get", "(", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "containers", "=", "client", ".", "containers", ".", "all", "(", ")", "if", "list_names", ":", "return", "[", "c", ".", "name", "for", "c", "in", "containers", "]", "return", "map", "(", "_pylxd_model_to_dict", ",", "containers", ")" ]
Lists containers list_names : False Only return a list of names when True remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: Full dict with all available informations: .. code-block:: bash salt '*' lxd.container_list For a list of names: .. code-block:: bash salt '*' lxd.container_list true See also `container-attributes`_. .. _container-attributes: https://github.com/lxc/pylxd/blob/master/doc/source/containers.rst#container-attributes
[ "Lists", "containers" ]
python
train
aht/stream.py
stream.py
https://github.com/aht/stream.py/blob/6a4945cbddaf74138eee5ba33eee3988cfceb84d/stream.py#L1023-L1034
def close(self): """Signal that the executor will no longer accept job submission. Worker threads/processes are now allowed to terminate after all jobs have been are completed. Without a call to close(), they will stay around forever waiting for more jobs to come. """ with self.lock: if self.closed: return self.waitqueue.put(StopIteration) self.closed = True
[ "def", "close", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "closed", ":", "return", "self", ".", "waitqueue", ".", "put", "(", "StopIteration", ")", "self", ".", "closed", "=", "True" ]
Signal that the executor will no longer accept job submission. Worker threads/processes are now allowed to terminate after all jobs have been are completed. Without a call to close(), they will stay around forever waiting for more jobs to come.
[ "Signal", "that", "the", "executor", "will", "no", "longer", "accept", "job", "submission", ".", "Worker", "threads", "/", "processes", "are", "now", "allowed", "to", "terminate", "after", "all", "jobs", "have", "been", "are", "completed", ".", "Without", "a", "call", "to", "close", "()", "they", "will", "stay", "around", "forever", "waiting", "for", "more", "jobs", "to", "come", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/notes.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/notes.py#L64-L80
def add_note(note, **kwargs): """ Add a new note """ note_i = Note() note_i.ref_key = note.ref_key note_i.set_ref(note.ref_key, note.ref_id) note_i.value = note.value note_i.created_by = kwargs.get('user_id') db.DBSession.add(note_i) db.DBSession.flush() return note_i
[ "def", "add_note", "(", "note", ",", "*", "*", "kwargs", ")", ":", "note_i", "=", "Note", "(", ")", "note_i", ".", "ref_key", "=", "note", ".", "ref_key", "note_i", ".", "set_ref", "(", "note", ".", "ref_key", ",", "note", ".", "ref_id", ")", "note_i", ".", "value", "=", "note", ".", "value", "note_i", ".", "created_by", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "db", ".", "DBSession", ".", "add", "(", "note_i", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "note_i" ]
Add a new note
[ "Add", "a", "new", "note" ]
python
train
tensorflow/tensorboard
tensorboard/plugins/text/summary_v2.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/text/summary_v2.py#L29-L60
def text(name, data, step=None, description=None): """Write a text summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A UTF-8 string tensor value. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ summary_metadata = metadata.create_summary_metadata( display_name=None, description=description) # TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback summary_scope = ( getattr(tf.summary.experimental, 'summary_scope', None) or tf.summary.summary_scope) with summary_scope( name, 'text_summary', values=[data, step]) as (tag, _): tf.debugging.assert_type(data, tf.string) return tf.summary.write( tag=tag, tensor=data, step=step, metadata=summary_metadata)
[ "def", "text", "(", "name", ",", "data", ",", "step", "=", "None", ",", "description", "=", "None", ")", ":", "summary_metadata", "=", "metadata", ".", "create_summary_metadata", "(", "display_name", "=", "None", ",", "description", "=", "description", ")", "# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback", "summary_scope", "=", "(", "getattr", "(", "tf", ".", "summary", ".", "experimental", ",", "'summary_scope'", ",", "None", ")", "or", "tf", ".", "summary", ".", "summary_scope", ")", "with", "summary_scope", "(", "name", ",", "'text_summary'", ",", "values", "=", "[", "data", ",", "step", "]", ")", "as", "(", "tag", ",", "_", ")", ":", "tf", ".", "debugging", ".", "assert_type", "(", "data", ",", "tf", ".", "string", ")", "return", "tf", ".", "summary", ".", "write", "(", "tag", "=", "tag", ",", "tensor", "=", "data", ",", "step", "=", "step", ",", "metadata", "=", "summary_metadata", ")" ]
Write a text summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A UTF-8 string tensor value. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None.
[ "Write", "a", "text", "summary", "." ]
python
train
openstack/horizon
horizon/tables/actions.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/tables/actions.py#L752-L756
def get_default_attrs(self): """Returns a list of the default HTML attributes for the action.""" attrs = super(BatchAction, self).get_default_attrs() attrs.update({'data-batch-action': 'true'}) return attrs
[ "def", "get_default_attrs", "(", "self", ")", ":", "attrs", "=", "super", "(", "BatchAction", ",", "self", ")", ".", "get_default_attrs", "(", ")", "attrs", ".", "update", "(", "{", "'data-batch-action'", ":", "'true'", "}", ")", "return", "attrs" ]
Returns a list of the default HTML attributes for the action.
[ "Returns", "a", "list", "of", "the", "default", "HTML", "attributes", "for", "the", "action", "." ]
python
train
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1784-L1792
def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
[ "def", "validate_metadata", "(", "self", ",", "handler", ")", ":", "if", "self", ".", "meta", "==", "'category'", ":", "new_metadata", "=", "self", ".", "metadata", "cur_metadata", "=", "handler", ".", "read_metadata", "(", "self", ".", "cname", ")", "if", "(", "new_metadata", "is", "not", "None", "and", "cur_metadata", "is", "not", "None", "and", "not", "array_equivalent", "(", "new_metadata", ",", "cur_metadata", ")", ")", ":", "raise", "ValueError", "(", "\"cannot append a categorical with \"", "\"different categories to the existing\"", ")" ]
validate that kind=category does not change the categories
[ "validate", "that", "kind", "=", "category", "does", "not", "change", "the", "categories" ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_clients.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_clients.py#L345-L376
def ReadClientStartupInfoHistory(self, client_id, timerange=None, cursor=None): """Reads the full startup history for a particular client.""" client_id_int = db_utils.ClientIDToInt(client_id) query = ("SELECT startup_info, UNIX_TIMESTAMP(timestamp) " "FROM client_startup_history " "WHERE client_id=%s ") args = [client_id_int] if timerange: time_from, time_to = timerange # pylint: disable=unpacking-non-sequence if time_from is not None: query += "AND timestamp >= FROM_UNIXTIME(%s) " args.append(mysql_utils.RDFDatetimeToTimestamp(time_from)) if time_to is not None: query += "AND timestamp <= FROM_UNIXTIME(%s) " args.append(mysql_utils.RDFDatetimeToTimestamp(time_to)) query += "ORDER BY timestamp DESC " ret = [] cursor.execute(query, args) for startup_info, timestamp in cursor.fetchall(): si = rdf_client.StartupInfo.FromSerializedString(startup_info) si.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp) ret.append(si) return ret
[ "def", "ReadClientStartupInfoHistory", "(", "self", ",", "client_id", ",", "timerange", "=", "None", ",", "cursor", "=", "None", ")", ":", "client_id_int", "=", "db_utils", ".", "ClientIDToInt", "(", "client_id", ")", "query", "=", "(", "\"SELECT startup_info, UNIX_TIMESTAMP(timestamp) \"", "\"FROM client_startup_history \"", "\"WHERE client_id=%s \"", ")", "args", "=", "[", "client_id_int", "]", "if", "timerange", ":", "time_from", ",", "time_to", "=", "timerange", "# pylint: disable=unpacking-non-sequence", "if", "time_from", "is", "not", "None", ":", "query", "+=", "\"AND timestamp >= FROM_UNIXTIME(%s) \"", "args", ".", "append", "(", "mysql_utils", ".", "RDFDatetimeToTimestamp", "(", "time_from", ")", ")", "if", "time_to", "is", "not", "None", ":", "query", "+=", "\"AND timestamp <= FROM_UNIXTIME(%s) \"", "args", ".", "append", "(", "mysql_utils", ".", "RDFDatetimeToTimestamp", "(", "time_to", ")", ")", "query", "+=", "\"ORDER BY timestamp DESC \"", "ret", "=", "[", "]", "cursor", ".", "execute", "(", "query", ",", "args", ")", "for", "startup_info", ",", "timestamp", "in", "cursor", ".", "fetchall", "(", ")", ":", "si", "=", "rdf_client", ".", "StartupInfo", ".", "FromSerializedString", "(", "startup_info", ")", "si", ".", "timestamp", "=", "mysql_utils", ".", "TimestampToRDFDatetime", "(", "timestamp", ")", "ret", ".", "append", "(", "si", ")", "return", "ret" ]
Reads the full startup history for a particular client.
[ "Reads", "the", "full", "startup", "history", "for", "a", "particular", "client", "." ]
python
train
mardiros/pyshop
pyshop/helpers/i18n.py
https://github.com/mardiros/pyshop/blob/b42510b9c3fa16e0e5710457401ac38fea5bf7a0/pyshop/helpers/i18n.py#L16-L22
def locale_negotiator(request): """Locale negotiator base on the `Accept-Language` header""" locale = 'en' if request.accept_language: locale = request.accept_language.best_match(LANGUAGES) locale = LANGUAGES.get(locale, 'en') return locale
[ "def", "locale_negotiator", "(", "request", ")", ":", "locale", "=", "'en'", "if", "request", ".", "accept_language", ":", "locale", "=", "request", ".", "accept_language", ".", "best_match", "(", "LANGUAGES", ")", "locale", "=", "LANGUAGES", ".", "get", "(", "locale", ",", "'en'", ")", "return", "locale" ]
Locale negotiator base on the `Accept-Language` header
[ "Locale", "negotiator", "base", "on", "the", "Accept", "-", "Language", "header" ]
python
train
osrg/ryu
ryu/lib/type_desc.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/type_desc.py#L57-L62
def _split_str(s, n): """ split string into list of strings by specified number. """ length = len(s) return [s[i:i + n] for i in range(0, length, n)]
[ "def", "_split_str", "(", "s", ",", "n", ")", ":", "length", "=", "len", "(", "s", ")", "return", "[", "s", "[", "i", ":", "i", "+", "n", "]", "for", "i", "in", "range", "(", "0", ",", "length", ",", "n", ")", "]" ]
split string into list of strings by specified number.
[ "split", "string", "into", "list", "of", "strings", "by", "specified", "number", "." ]
python
train
ECESeniorDesign/lazy_record
lazy_record/repo.py
https://github.com/ECESeniorDesign/lazy_record/blob/929d3cc7c2538b0f792365c0d2b0e0d41084c2dd/lazy_record/repo.py#L256-L273
def update(self, **data): """ Update records in the table with +data+. Often combined with `where`, as it acts on all records in the table unless restricted. ex) >>> Repo("foos").update(name="bar") UPDATE foos SET name = "bar" """ data = data.items() update_command_arg = ", ".join("{} = ?".format(entry[0]) for entry in data) cmd = "update {table} set {update_command_arg} {where_clause}".format( update_command_arg=update_command_arg, where_clause=self.where_clause, table=self.table_name).rstrip() Repo.db.execute(cmd, [entry[1] for entry in data] + self.where_values)
[ "def", "update", "(", "self", ",", "*", "*", "data", ")", ":", "data", "=", "data", ".", "items", "(", ")", "update_command_arg", "=", "\", \"", ".", "join", "(", "\"{} = ?\"", ".", "format", "(", "entry", "[", "0", "]", ")", "for", "entry", "in", "data", ")", "cmd", "=", "\"update {table} set {update_command_arg} {where_clause}\"", ".", "format", "(", "update_command_arg", "=", "update_command_arg", ",", "where_clause", "=", "self", ".", "where_clause", ",", "table", "=", "self", ".", "table_name", ")", ".", "rstrip", "(", ")", "Repo", ".", "db", ".", "execute", "(", "cmd", ",", "[", "entry", "[", "1", "]", "for", "entry", "in", "data", "]", "+", "self", ".", "where_values", ")" ]
Update records in the table with +data+. Often combined with `where`, as it acts on all records in the table unless restricted. ex) >>> Repo("foos").update(name="bar") UPDATE foos SET name = "bar"
[ "Update", "records", "in", "the", "table", "with", "+", "data", "+", ".", "Often", "combined", "with", "where", "as", "it", "acts", "on", "all", "records", "in", "the", "table", "unless", "restricted", "." ]
python
train
ynop/audiomate
audiomate/corpus/corpus.py
https://github.com/ynop/audiomate/blob/61727920b23a708293c3d526fa3000d4de9c6c21/audiomate/corpus/corpus.py#L404-L433
def merge_corpus(self, corpus): """ Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into this corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora, the ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching). Args: corpus (CorpusView): The corpus to merge. """ # Create a copy, so objects aren't changed in the original merging corpus merging_corpus = Corpus.from_corpus(corpus) self.import_tracks(corpus.tracks.values()) self.import_issuers(corpus.issuers.values()) utterance_idx_mapping = self.import_utterances(corpus.utterances.values()) for subview_idx, subview in merging_corpus.subviews.items(): for filter in subview.filter_criteria: if isinstance(filter, subset.MatchingUtteranceIdxFilter): new_filtered_utt_ids = set() for utt_idx in filter.utterance_idxs: new_filtered_utt_ids.add(utterance_idx_mapping[utt_idx].idx) filter.utterance_idxs = new_filtered_utt_ids new_idx = naming.index_name_if_in_list(subview_idx, self.subviews.keys()) self.import_subview(new_idx, subview) for feat_container_idx, feat_container in merging_corpus.feature_containers.items(): self.new_feature_container(feat_container_idx, feat_container.path)
[ "def", "merge_corpus", "(", "self", ",", "corpus", ")", ":", "# Create a copy, so objects aren't changed in the original merging corpus", "merging_corpus", "=", "Corpus", ".", "from_corpus", "(", "corpus", ")", "self", ".", "import_tracks", "(", "corpus", ".", "tracks", ".", "values", "(", ")", ")", "self", ".", "import_issuers", "(", "corpus", ".", "issuers", ".", "values", "(", ")", ")", "utterance_idx_mapping", "=", "self", ".", "import_utterances", "(", "corpus", ".", "utterances", ".", "values", "(", ")", ")", "for", "subview_idx", ",", "subview", "in", "merging_corpus", ".", "subviews", ".", "items", "(", ")", ":", "for", "filter", "in", "subview", ".", "filter_criteria", ":", "if", "isinstance", "(", "filter", ",", "subset", ".", "MatchingUtteranceIdxFilter", ")", ":", "new_filtered_utt_ids", "=", "set", "(", ")", "for", "utt_idx", "in", "filter", ".", "utterance_idxs", ":", "new_filtered_utt_ids", ".", "add", "(", "utterance_idx_mapping", "[", "utt_idx", "]", ".", "idx", ")", "filter", ".", "utterance_idxs", "=", "new_filtered_utt_ids", "new_idx", "=", "naming", ".", "index_name_if_in_list", "(", "subview_idx", ",", "self", ".", "subviews", ".", "keys", "(", ")", ")", "self", ".", "import_subview", "(", "new_idx", ",", "subview", ")", "for", "feat_container_idx", ",", "feat_container", "in", "merging_corpus", ".", "feature_containers", ".", "items", "(", ")", ":", "self", ".", "new_feature_container", "(", "feat_container_idx", ",", "feat_container", ".", "path", ")" ]
Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into this corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora, the ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching). Args: corpus (CorpusView): The corpus to merge.
[ "Merge", "the", "given", "corpus", "into", "this", "corpus", ".", "All", "assets", "(", "tracks", "utterances", "issuers", "...", ")", "are", "copied", "into", "this", "corpus", ".", "If", "any", "ids", "(", "utt", "-", "idx", "track", "-", "idx", "issuer", "-", "idx", "subview", "-", "idx", "...", ")", "are", "occurring", "in", "both", "corpora", "the", "ids", "from", "the", "merging", "corpus", "are", "suffixed", "by", "a", "number", "(", "starting", "from", "1", "until", "no", "other", "is", "matching", ")", "." ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webdriver.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webdriver.py#L65-L95
def _make_w3c_caps(caps): """Makes a W3C alwaysMatch capabilities object. Filters out capability names that are not in the W3C spec. Spec-compliant drivers will reject requests containing unknown capability names. Moves the Firefox profile, if present, from the old location to the new Firefox options object. :Args: - caps - A dictionary of capabilities requested by the caller. """ caps = copy.deepcopy(caps) profile = caps.get('firefox_profile') always_match = {} if caps.get('proxy') and caps['proxy'].get('proxyType'): caps['proxy']['proxyType'] = caps['proxy']['proxyType'].lower() for k, v in caps.items(): if v and k in _OSS_W3C_CONVERSION: always_match[_OSS_W3C_CONVERSION[k]] = v.lower() if k == 'platform' else v if k in _W3C_CAPABILITY_NAMES or ':' in k: always_match[k] = v if profile: moz_opts = always_match.get('moz:firefoxOptions', {}) # If it's already present, assume the caller did that intentionally. if 'profile' not in moz_opts: # Don't mutate the original capabilities. new_opts = copy.deepcopy(moz_opts) new_opts['profile'] = profile always_match['moz:firefoxOptions'] = new_opts return {"firstMatch": [{}], "alwaysMatch": always_match}
[ "def", "_make_w3c_caps", "(", "caps", ")", ":", "caps", "=", "copy", ".", "deepcopy", "(", "caps", ")", "profile", "=", "caps", ".", "get", "(", "'firefox_profile'", ")", "always_match", "=", "{", "}", "if", "caps", ".", "get", "(", "'proxy'", ")", "and", "caps", "[", "'proxy'", "]", ".", "get", "(", "'proxyType'", ")", ":", "caps", "[", "'proxy'", "]", "[", "'proxyType'", "]", "=", "caps", "[", "'proxy'", "]", "[", "'proxyType'", "]", ".", "lower", "(", ")", "for", "k", ",", "v", "in", "caps", ".", "items", "(", ")", ":", "if", "v", "and", "k", "in", "_OSS_W3C_CONVERSION", ":", "always_match", "[", "_OSS_W3C_CONVERSION", "[", "k", "]", "]", "=", "v", ".", "lower", "(", ")", "if", "k", "==", "'platform'", "else", "v", "if", "k", "in", "_W3C_CAPABILITY_NAMES", "or", "':'", "in", "k", ":", "always_match", "[", "k", "]", "=", "v", "if", "profile", ":", "moz_opts", "=", "always_match", ".", "get", "(", "'moz:firefoxOptions'", ",", "{", "}", ")", "# If it's already present, assume the caller did that intentionally.", "if", "'profile'", "not", "in", "moz_opts", ":", "# Don't mutate the original capabilities.", "new_opts", "=", "copy", ".", "deepcopy", "(", "moz_opts", ")", "new_opts", "[", "'profile'", "]", "=", "profile", "always_match", "[", "'moz:firefoxOptions'", "]", "=", "new_opts", "return", "{", "\"firstMatch\"", ":", "[", "{", "}", "]", ",", "\"alwaysMatch\"", ":", "always_match", "}" ]
Makes a W3C alwaysMatch capabilities object. Filters out capability names that are not in the W3C spec. Spec-compliant drivers will reject requests containing unknown capability names. Moves the Firefox profile, if present, from the old location to the new Firefox options object. :Args: - caps - A dictionary of capabilities requested by the caller.
[ "Makes", "a", "W3C", "alwaysMatch", "capabilities", "object", "." ]
python
train
numenta/nupic
src/nupic/regions/sp_region.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/sp_region.py#L887-L906
def readFromProto(cls, proto): """ Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. Read state from proto object. :param proto: SPRegionProto capnproto object """ instance = cls(proto.columnCount, proto.inputWidth) instance.spatialImp = proto.spatialImp instance.learningMode = proto.learningMode instance.inferenceMode = proto.inferenceMode instance.anomalyMode = proto.anomalyMode instance.topDownMode = proto.topDownMode spatialImp = proto.spatialImp instance._sfdr = getSPClass(spatialImp).read(proto.spatialPooler) return instance
[ "def", "readFromProto", "(", "cls", ",", "proto", ")", ":", "instance", "=", "cls", "(", "proto", ".", "columnCount", ",", "proto", ".", "inputWidth", ")", "instance", ".", "spatialImp", "=", "proto", ".", "spatialImp", "instance", ".", "learningMode", "=", "proto", ".", "learningMode", "instance", ".", "inferenceMode", "=", "proto", ".", "inferenceMode", "instance", ".", "anomalyMode", "=", "proto", ".", "anomalyMode", "instance", ".", "topDownMode", "=", "proto", ".", "topDownMode", "spatialImp", "=", "proto", ".", "spatialImp", "instance", ".", "_sfdr", "=", "getSPClass", "(", "spatialImp", ")", ".", "read", "(", "proto", ".", "spatialPooler", ")", "return", "instance" ]
Overrides :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`. Read state from proto object. :param proto: SPRegionProto capnproto object
[ "Overrides", ":", "meth", ":", "~nupic", ".", "bindings", ".", "regions", ".", "PyRegion", ".", "PyRegion", ".", "readFromProto", "." ]
python
valid
django-fluent/django-fluent-contents
fluent_contents/extensions/pluginbase.py
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/extensions/pluginbase.py#L414-L434
def set_cached_output(self, placeholder_name, instance, output): """ .. versionadded:: 0.9 Store the cached output for a rendered item. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key` and stores the results in the configured Django cache backend (e.g. memcached). When custom cache keys are used, also include those in :func:`get_output_cache_keys` so the cache will be cleared when needed. .. versionchanged:: 1.0 The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object. """ cachekey = self.get_output_cache_key(placeholder_name, instance) if self.cache_timeout is not DEFAULT_TIMEOUT: cache.set(cachekey, output, self.cache_timeout) else: # Don't want to mix into the default 0/None issue. cache.set(cachekey, output)
[ "def", "set_cached_output", "(", "self", ",", "placeholder_name", ",", "instance", ",", "output", ")", ":", "cachekey", "=", "self", ".", "get_output_cache_key", "(", "placeholder_name", ",", "instance", ")", "if", "self", ".", "cache_timeout", "is", "not", "DEFAULT_TIMEOUT", ":", "cache", ".", "set", "(", "cachekey", ",", "output", ",", "self", ".", "cache_timeout", ")", "else", ":", "# Don't want to mix into the default 0/None issue.", "cache", ".", "set", "(", "cachekey", ",", "output", ")" ]
.. versionadded:: 0.9 Store the cached output for a rendered item. This method can be overwritten to implement custom caching mechanisms. By default, this function generates the cache key using :func:`~fluent_contents.cache.get_rendering_cache_key` and stores the results in the configured Django cache backend (e.g. memcached). When custom cache keys are used, also include those in :func:`get_output_cache_keys` so the cache will be cleared when needed. .. versionchanged:: 1.0 The received data is no longer a HTML string, but :class:`~fluent_contents.models.ContentItemOutput` object.
[ "..", "versionadded", "::", "0", ".", "9", "Store", "the", "cached", "output", "for", "a", "rendered", "item", "." ]
python
train
EUDAT-B2SAFE/B2HANDLE
b2handle/handleclient.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L1226-L1271
def __exchange_URL_in_13020loc(self, oldurl, newurl, list_of_entries, handle): ''' Exchange every occurrence of oldurl against newurl in a 10320/LOC entry. This does not change the ids or other xml attributes of the <location> element. :param oldurl: The URL that will be overwritten. :param newurl: The URL to write into the entry. :param list_of_entries: A list of the existing entries (to find and remove the correct one). :param handle: Only for the exception message. :raise: GenericHandleError: If several 10320/LOC exist (unlikely). ''' # Find existing 10320/LOC entries python_indices = self.__get_python_indices_for_key( '10320/LOC', list_of_entries ) num_exchanged = 0 if len(python_indices) > 0: if len(python_indices) > 1: msg = str(len(python_indices)) + ' entries of type "10320/LOC".' raise BrokenHandleRecordException(handle=handle, msg=msg) for index in python_indices: entry = list_of_entries.pop(index) xmlroot = ET.fromstring(entry['data']['value']) all_URL_elements = xmlroot.findall('location') for element in all_URL_elements: if element.get('href') == oldurl: LOGGER.debug('__exchange_URL_in_13020loc: Exchanging URL ' + oldurl + ' from 10320/LOC.') num_exchanged += 1 element.set('href', newurl) entry['data']['value'] = ET.tostring(xmlroot, encoding=encoding_value) list_of_entries.append(entry) if num_exchanged == 0: LOGGER.debug('__exchange_URL_in_13020loc: No URLs exchanged.') else: message = '__exchange_URL_in_13020loc: The URL "' + oldurl + '" was exchanged ' + str(num_exchanged) + \ ' times against the new url "' + newurl + '" in 10320/LOC.' message = message.replace('1 times', 'once') LOGGER.debug(message)
[ "def", "__exchange_URL_in_13020loc", "(", "self", ",", "oldurl", ",", "newurl", ",", "list_of_entries", ",", "handle", ")", ":", "# Find existing 10320/LOC entries", "python_indices", "=", "self", ".", "__get_python_indices_for_key", "(", "'10320/LOC'", ",", "list_of_entries", ")", "num_exchanged", "=", "0", "if", "len", "(", "python_indices", ")", ">", "0", ":", "if", "len", "(", "python_indices", ")", ">", "1", ":", "msg", "=", "str", "(", "len", "(", "python_indices", ")", ")", "+", "' entries of type \"10320/LOC\".'", "raise", "BrokenHandleRecordException", "(", "handle", "=", "handle", ",", "msg", "=", "msg", ")", "for", "index", "in", "python_indices", ":", "entry", "=", "list_of_entries", ".", "pop", "(", "index", ")", "xmlroot", "=", "ET", ".", "fromstring", "(", "entry", "[", "'data'", "]", "[", "'value'", "]", ")", "all_URL_elements", "=", "xmlroot", ".", "findall", "(", "'location'", ")", "for", "element", "in", "all_URL_elements", ":", "if", "element", ".", "get", "(", "'href'", ")", "==", "oldurl", ":", "LOGGER", ".", "debug", "(", "'__exchange_URL_in_13020loc: Exchanging URL '", "+", "oldurl", "+", "' from 10320/LOC.'", ")", "num_exchanged", "+=", "1", "element", ".", "set", "(", "'href'", ",", "newurl", ")", "entry", "[", "'data'", "]", "[", "'value'", "]", "=", "ET", ".", "tostring", "(", "xmlroot", ",", "encoding", "=", "encoding_value", ")", "list_of_entries", ".", "append", "(", "entry", ")", "if", "num_exchanged", "==", "0", ":", "LOGGER", ".", "debug", "(", "'__exchange_URL_in_13020loc: No URLs exchanged.'", ")", "else", ":", "message", "=", "'__exchange_URL_in_13020loc: The URL \"'", "+", "oldurl", "+", "'\" was exchanged '", "+", "str", "(", "num_exchanged", ")", "+", "' times against the new url \"'", "+", "newurl", "+", "'\" in 10320/LOC.'", "message", "=", "message", ".", "replace", "(", "'1 times'", ",", "'once'", ")", "LOGGER", ".", "debug", "(", "message", ")" ]
Exchange every occurrence of oldurl against newurl in a 10320/LOC entry. This does not change the ids or other xml attributes of the <location> element. :param oldurl: The URL that will be overwritten. :param newurl: The URL to write into the entry. :param list_of_entries: A list of the existing entries (to find and remove the correct one). :param handle: Only for the exception message. :raise: GenericHandleError: If several 10320/LOC exist (unlikely).
[ "Exchange", "every", "occurrence", "of", "oldurl", "against", "newurl", "in", "a", "10320", "/", "LOC", "entry", ".", "This", "does", "not", "change", "the", "ids", "or", "other", "xml", "attributes", "of", "the", "<location", ">", "element", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3326-L3341
def com_google_fonts_check_varfont_generate_static(ttFont): """ Check a static ttf can be generated from a variable font. """ import tempfile from fontTools.varLib import mutator try: loc = {k.axisTag: float((k.maxValue + k.minValue) / 2) for k in ttFont['fvar'].axes} with tempfile.TemporaryFile() as instance: font = mutator.instantiateVariableFont(ttFont, loc) font.save(instance) yield PASS, ("fontTools.varLib.mutator generated a static font " "instance") except Exception as e: yield FAIL, ("fontTools.varLib.mutator failed to generated a static font " "instance\n{}".format(repr(e)))
[ "def", "com_google_fonts_check_varfont_generate_static", "(", "ttFont", ")", ":", "import", "tempfile", "from", "fontTools", ".", "varLib", "import", "mutator", "try", ":", "loc", "=", "{", "k", ".", "axisTag", ":", "float", "(", "(", "k", ".", "maxValue", "+", "k", ".", "minValue", ")", "/", "2", ")", "for", "k", "in", "ttFont", "[", "'fvar'", "]", ".", "axes", "}", "with", "tempfile", ".", "TemporaryFile", "(", ")", "as", "instance", ":", "font", "=", "mutator", ".", "instantiateVariableFont", "(", "ttFont", ",", "loc", ")", "font", ".", "save", "(", "instance", ")", "yield", "PASS", ",", "(", "\"fontTools.varLib.mutator generated a static font \"", "\"instance\"", ")", "except", "Exception", "as", "e", ":", "yield", "FAIL", ",", "(", "\"fontTools.varLib.mutator failed to generated a static font \"", "\"instance\\n{}\"", ".", "format", "(", "repr", "(", "e", ")", ")", ")" ]
Check a static ttf can be generated from a variable font.
[ "Check", "a", "static", "ttf", "can", "be", "generated", "from", "a", "variable", "font", "." ]
python
train
peterbrittain/asciimatics
asciimatics/widgets.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L1483-L1492
def get_location(self): """ Return the absolute location of this widget on the Screen, taking into account the current state of the Frame that is displaying it and any label offsets of the Widget. :returns: A tuple of the form (<X coordinate>, <Y coordinate>). """ origin = self._frame.canvas.origin return (self._x + origin[0] + self._offset, self._y + origin[1] - self._frame.canvas.start_line)
[ "def", "get_location", "(", "self", ")", ":", "origin", "=", "self", ".", "_frame", ".", "canvas", ".", "origin", "return", "(", "self", ".", "_x", "+", "origin", "[", "0", "]", "+", "self", ".", "_offset", ",", "self", ".", "_y", "+", "origin", "[", "1", "]", "-", "self", ".", "_frame", ".", "canvas", ".", "start_line", ")" ]
Return the absolute location of this widget on the Screen, taking into account the current state of the Frame that is displaying it and any label offsets of the Widget. :returns: A tuple of the form (<X coordinate>, <Y coordinate>).
[ "Return", "the", "absolute", "location", "of", "this", "widget", "on", "the", "Screen", "taking", "into", "account", "the", "current", "state", "of", "the", "Frame", "that", "is", "displaying", "it", "and", "any", "label", "offsets", "of", "the", "Widget", "." ]
python
train
timstaley/voevent-parse
src/voeventparse/voevent.py
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L430-L437
def _listify(x): """Ensure x is iterable; if not then enclose it in a list and return it.""" if isinstance(x, string_types): return [x] elif isinstance(x, collections.Iterable): return x else: return [x]
[ "def", "_listify", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "string_types", ")", ":", "return", "[", "x", "]", "elif", "isinstance", "(", "x", ",", "collections", ".", "Iterable", ")", ":", "return", "x", "else", ":", "return", "[", "x", "]" ]
Ensure x is iterable; if not then enclose it in a list and return it.
[ "Ensure", "x", "is", "iterable", ";", "if", "not", "then", "enclose", "it", "in", "a", "list", "and", "return", "it", "." ]
python
train
awslabs/sockeye
sockeye/data_io.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/data_io.py#L522-L531
def get_num_shards(num_samples: int, samples_per_shard: int, min_num_shards: int) -> int: """ Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards. """ return max(int(math.ceil(num_samples / samples_per_shard)), min_num_shards)
[ "def", "get_num_shards", "(", "num_samples", ":", "int", ",", "samples_per_shard", ":", "int", ",", "min_num_shards", ":", "int", ")", "->", "int", ":", "return", "max", "(", "int", "(", "math", ".", "ceil", "(", "num_samples", "/", "samples_per_shard", ")", ")", ",", "min_num_shards", ")" ]
Returns the number of shards. :param num_samples: Number of training data samples. :param samples_per_shard: Samples per shard. :param min_num_shards: Minimum number of shards. :return: Number of shards.
[ "Returns", "the", "number", "of", "shards", "." ]
python
train
pypa/pipenv
pipenv/vendor/tomlkit/parser.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/tomlkit/parser.py#L272-L325
def _parse_comment_trail(self): # type: () -> Tuple[str, str, str] """ Returns (comment_ws, comment, trail) If there is no comment, comment_ws and comment will simply be empty. """ if self.end(): return "", "", "" comment = "" comment_ws = "" self.mark() while True: c = self._current if c == "\n": break elif c == "#": comment_ws = self.extract() self.mark() self.inc() # Skip # # The comment itself while not self.end() and not self._current.is_nl() and self.inc(): pass comment = self.extract() self.mark() break elif c in " \t\r": self.inc() else: raise self.parse_error(UnexpectedCharError, c) if self.end(): break while self._current.is_spaces() and self.inc(): pass if self._current == "\r": self.inc() if self._current == "\n": self.inc() trail = "" if self._idx != self._marker or self._current.is_ws(): trail = self.extract() return comment_ws, comment, trail
[ "def", "_parse_comment_trail", "(", "self", ")", ":", "# type: () -> Tuple[str, str, str]", "if", "self", ".", "end", "(", ")", ":", "return", "\"\"", ",", "\"\"", ",", "\"\"", "comment", "=", "\"\"", "comment_ws", "=", "\"\"", "self", ".", "mark", "(", ")", "while", "True", ":", "c", "=", "self", ".", "_current", "if", "c", "==", "\"\\n\"", ":", "break", "elif", "c", "==", "\"#\"", ":", "comment_ws", "=", "self", ".", "extract", "(", ")", "self", ".", "mark", "(", ")", "self", ".", "inc", "(", ")", "# Skip #", "# The comment itself", "while", "not", "self", ".", "end", "(", ")", "and", "not", "self", ".", "_current", ".", "is_nl", "(", ")", "and", "self", ".", "inc", "(", ")", ":", "pass", "comment", "=", "self", ".", "extract", "(", ")", "self", ".", "mark", "(", ")", "break", "elif", "c", "in", "\" \\t\\r\"", ":", "self", ".", "inc", "(", ")", "else", ":", "raise", "self", ".", "parse_error", "(", "UnexpectedCharError", ",", "c", ")", "if", "self", ".", "end", "(", ")", ":", "break", "while", "self", ".", "_current", ".", "is_spaces", "(", ")", "and", "self", ".", "inc", "(", ")", ":", "pass", "if", "self", ".", "_current", "==", "\"\\r\"", ":", "self", ".", "inc", "(", ")", "if", "self", ".", "_current", "==", "\"\\n\"", ":", "self", ".", "inc", "(", ")", "trail", "=", "\"\"", "if", "self", ".", "_idx", "!=", "self", ".", "_marker", "or", "self", ".", "_current", ".", "is_ws", "(", ")", ":", "trail", "=", "self", ".", "extract", "(", ")", "return", "comment_ws", ",", "comment", ",", "trail" ]
Returns (comment_ws, comment, trail) If there is no comment, comment_ws and comment will simply be empty.
[ "Returns", "(", "comment_ws", "comment", "trail", ")", "If", "there", "is", "no", "comment", "comment_ws", "and", "comment", "will", "simply", "be", "empty", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/bin/t2t_attack.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/bin/t2t_attack.py#L83-L131
def create_surrogate_run_config(hp): """Create a run config. Args: hp: model hyperparameters Returns: a run config """ save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) save_ckpt_secs = FLAGS.save_checkpoints_secs or None if save_ckpt_secs: save_ckpt_steps = None assert FLAGS.surrogate_output_dir # the various custom getters we have written do not play well together yet. # TODO(noam): ask rsepassi for help here. daisy_chain_variables = ( hp.daisy_chain_variables and hp.activation_dtype == "float32" and hp.weight_dtype == "float32") return trainer_lib.create_run_config( model_name=FLAGS.model, model_dir=os.path.expanduser(FLAGS.surrogate_output_dir), master=FLAGS.master, iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.tpu_num_shards, log_device_placement=FLAGS.log_device_placement, save_checkpoints_steps=save_ckpt_steps, save_checkpoints_secs=save_ckpt_secs, keep_checkpoint_max=FLAGS.keep_checkpoint_max, keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, num_gpus=FLAGS.worker_gpu, gpu_order=FLAGS.gpu_order, num_async_replicas=FLAGS.worker_replicas, gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, enable_graph_rewriter=FLAGS.enable_graph_rewriter, use_tpu=FLAGS.use_tpu, schedule=FLAGS.schedule, no_data_parallelism=hp.no_data_parallelism, daisy_chain_variables=daisy_chain_variables, ps_replicas=FLAGS.ps_replicas, ps_job=FLAGS.ps_job, ps_gpu=FLAGS.ps_gpu, sync=FLAGS.sync, worker_id=FLAGS.worker_id, worker_job=FLAGS.worker_job, random_seed=FLAGS.random_seed, tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, log_step_count_steps=FLAGS.log_step_count_steps, intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)
[ "def", "create_surrogate_run_config", "(", "hp", ")", ":", "save_ckpt_steps", "=", "max", "(", "FLAGS", ".", "iterations_per_loop", ",", "FLAGS", ".", "local_eval_frequency", ")", "save_ckpt_secs", "=", "FLAGS", ".", "save_checkpoints_secs", "or", "None", "if", "save_ckpt_secs", ":", "save_ckpt_steps", "=", "None", "assert", "FLAGS", ".", "surrogate_output_dir", "# the various custom getters we have written do not play well together yet.", "# TODO(noam): ask rsepassi for help here.", "daisy_chain_variables", "=", "(", "hp", ".", "daisy_chain_variables", "and", "hp", ".", "activation_dtype", "==", "\"float32\"", "and", "hp", ".", "weight_dtype", "==", "\"float32\"", ")", "return", "trainer_lib", ".", "create_run_config", "(", "model_name", "=", "FLAGS", ".", "model", ",", "model_dir", "=", "os", ".", "path", ".", "expanduser", "(", "FLAGS", ".", "surrogate_output_dir", ")", ",", "master", "=", "FLAGS", ".", "master", ",", "iterations_per_loop", "=", "FLAGS", ".", "iterations_per_loop", ",", "num_shards", "=", "FLAGS", ".", "tpu_num_shards", ",", "log_device_placement", "=", "FLAGS", ".", "log_device_placement", ",", "save_checkpoints_steps", "=", "save_ckpt_steps", ",", "save_checkpoints_secs", "=", "save_ckpt_secs", ",", "keep_checkpoint_max", "=", "FLAGS", ".", "keep_checkpoint_max", ",", "keep_checkpoint_every_n_hours", "=", "FLAGS", ".", "keep_checkpoint_every_n_hours", ",", "num_gpus", "=", "FLAGS", ".", "worker_gpu", ",", "gpu_order", "=", "FLAGS", ".", "gpu_order", ",", "num_async_replicas", "=", "FLAGS", ".", "worker_replicas", ",", "gpu_mem_fraction", "=", "FLAGS", ".", "worker_gpu_memory_fraction", ",", "enable_graph_rewriter", "=", "FLAGS", ".", "enable_graph_rewriter", ",", "use_tpu", "=", "FLAGS", ".", "use_tpu", ",", "schedule", "=", "FLAGS", ".", "schedule", ",", "no_data_parallelism", "=", "hp", ".", "no_data_parallelism", ",", "daisy_chain_variables", "=", "daisy_chain_variables", ",", "ps_replicas", "=", "FLAGS", ".", "ps_replicas", ",", "ps_job", "=", "FLAGS", ".", "ps_job", ",", "ps_gpu", "=", "FLAGS", ".", "ps_gpu", ",", "sync", "=", "FLAGS", ".", "sync", ",", "worker_id", "=", "FLAGS", ".", "worker_id", ",", "worker_job", "=", "FLAGS", ".", "worker_job", ",", "random_seed", "=", "FLAGS", ".", "random_seed", ",", "tpu_infeed_sleep_secs", "=", "FLAGS", ".", "tpu_infeed_sleep_secs", ",", "inter_op_parallelism_threads", "=", "FLAGS", ".", "inter_op_parallelism_threads", ",", "log_step_count_steps", "=", "FLAGS", ".", "log_step_count_steps", ",", "intra_op_parallelism_threads", "=", "FLAGS", ".", "intra_op_parallelism_threads", ")" ]
Create a run config. Args: hp: model hyperparameters Returns: a run config
[ "Create", "a", "run", "config", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L643-L652
def setMaximumPixmapSize(self, size): """ Sets the maximum pixmap size for this toolbar. :param size | <int> """ self._maximumPixmapSize = size position = self.position() self._position = None self.setPosition(position)
[ "def", "setMaximumPixmapSize", "(", "self", ",", "size", ")", ":", "self", ".", "_maximumPixmapSize", "=", "size", "position", "=", "self", ".", "position", "(", ")", "self", ".", "_position", "=", "None", "self", ".", "setPosition", "(", "position", ")" ]
Sets the maximum pixmap size for this toolbar. :param size | <int>
[ "Sets", "the", "maximum", "pixmap", "size", "for", "this", "toolbar", ".", ":", "param", "size", "|", "<int", ">" ]
python
train
gwastro/pycbc
pycbc/inference/io/base_multitemper.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/io/base_multitemper.py#L77-L81
def write_sampler_metadata(self, sampler): """Adds writing ntemps to file. """ super(MultiTemperedMetadataIO, self).write_sampler_metadata(sampler) self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
[ "def", "write_sampler_metadata", "(", "self", ",", "sampler", ")", ":", "super", "(", "MultiTemperedMetadataIO", ",", "self", ")", ".", "write_sampler_metadata", "(", "sampler", ")", "self", "[", "self", ".", "sampler_group", "]", ".", "attrs", "[", "\"ntemps\"", "]", "=", "sampler", ".", "ntemps" ]
Adds writing ntemps to file.
[ "Adds", "writing", "ntemps", "to", "file", "." ]
python
train
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L1388-L1445
def DAA(cpu): """ Decimal adjusts AL after addition. Adjusts the sum of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal carry is detected, the CF and AF flags are set accordingly. The CF and AF flags are set if the adjustment of the value results in a decimal carry in either digit of the result. The SF, ZF, and PF flags are set according to the result. This instruction is not valid in 64-bit mode.:: IF (((AL AND 0FH) > 9) or AF = 1) THEN AL = AL + 6; CF = CF OR CarryFromLastAddition; (* CF OR carry from AL = AL + 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL AND F0H) > 90H) or CF = 1) THEN AL = AL + 60H; CF = 1; ELSE CF = 0; FI; :param cpu: current CPU. """ cpu.AF = Operators.OR((cpu.AL & 0x0f) > 9, cpu.AF) oldAL = cpu.AL cpu.AL = Operators.ITEBV(8, cpu.AF, cpu.AL + 6, cpu.AL) cpu.CF = Operators.ITE(cpu.AF, Operators.OR(cpu.CF, cpu.AL < oldAL), cpu.CF) cpu.CF = Operators.OR((cpu.AL & 0xf0) > 0x90, cpu.CF) cpu.AL = Operators.ITEBV(8, cpu.CF, cpu.AL + 0x60, cpu.AL) """ #old not-symbolic aware version... if ((cpu.AL & 0x0f) > 9) or cpu.AF: oldAL = cpu.AL cpu.AL = cpu.AL + 6 cpu.CF = Operators.OR(cpu.CF, cpu.AL < oldAL) cpu.AF = True else: cpu.AF = False if ((cpu.AL & 0xf0) > 0x90) or cpu.CF: cpu.AL = cpu.AL + 0x60 cpu.CF = True else: cpu.CF = False """ cpu.ZF = cpu.AL == 0 cpu.SF = (cpu.AL & 0x80) != 0 cpu.PF = cpu._calculate_parity_flag(cpu.AL)
[ "def", "DAA", "(", "cpu", ")", ":", "cpu", ".", "AF", "=", "Operators", ".", "OR", "(", "(", "cpu", ".", "AL", "&", "0x0f", ")", ">", "9", ",", "cpu", ".", "AF", ")", "oldAL", "=", "cpu", ".", "AL", "cpu", ".", "AL", "=", "Operators", ".", "ITEBV", "(", "8", ",", "cpu", ".", "AF", ",", "cpu", ".", "AL", "+", "6", ",", "cpu", ".", "AL", ")", "cpu", ".", "CF", "=", "Operators", ".", "ITE", "(", "cpu", ".", "AF", ",", "Operators", ".", "OR", "(", "cpu", ".", "CF", ",", "cpu", ".", "AL", "<", "oldAL", ")", ",", "cpu", ".", "CF", ")", "cpu", ".", "CF", "=", "Operators", ".", "OR", "(", "(", "cpu", ".", "AL", "&", "0xf0", ")", ">", "0x90", ",", "cpu", ".", "CF", ")", "cpu", ".", "AL", "=", "Operators", ".", "ITEBV", "(", "8", ",", "cpu", ".", "CF", ",", "cpu", ".", "AL", "+", "0x60", ",", "cpu", ".", "AL", ")", "\"\"\"\n #old not-symbolic aware version...\n if ((cpu.AL & 0x0f) > 9) or cpu.AF:\n oldAL = cpu.AL\n cpu.AL = cpu.AL + 6\n cpu.CF = Operators.OR(cpu.CF, cpu.AL < oldAL)\n cpu.AF = True\n else:\n cpu.AF = False\n\n if ((cpu.AL & 0xf0) > 0x90) or cpu.CF:\n cpu.AL = cpu.AL + 0x60\n cpu.CF = True\n else:\n cpu.CF = False\n \"\"\"", "cpu", ".", "ZF", "=", "cpu", ".", "AL", "==", "0", "cpu", ".", "SF", "=", "(", "cpu", ".", "AL", "&", "0x80", ")", "!=", "0", "cpu", ".", "PF", "=", "cpu", ".", "_calculate_parity_flag", "(", "cpu", ".", "AL", ")" ]
Decimal adjusts AL after addition. Adjusts the sum of two packed BCD values to create a packed BCD result. The AL register is the implied source and destination operand. If a decimal carry is detected, the CF and AF flags are set accordingly. The CF and AF flags are set if the adjustment of the value results in a decimal carry in either digit of the result. The SF, ZF, and PF flags are set according to the result. This instruction is not valid in 64-bit mode.:: IF (((AL AND 0FH) > 9) or AF = 1) THEN AL = AL + 6; CF = CF OR CarryFromLastAddition; (* CF OR carry from AL = AL + 6 *) AF = 1; ELSE AF = 0; FI; IF ((AL AND F0H) > 90H) or CF = 1) THEN AL = AL + 60H; CF = 1; ELSE CF = 0; FI; :param cpu: current CPU.
[ "Decimal", "adjusts", "AL", "after", "addition", "." ]
python
valid
ibm-watson-iot/iot-python
src/wiotp/sdk/api/status/__init__.py
https://github.com/ibm-watson-iot/iot-python/blob/195f05adce3fba4ec997017e41e02ebd85c0c4cc/src/wiotp/sdk/api/status/__init__.py#L40-L51
def serviceStatus(self): """ Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform. In case of failure it throws APIException """ r = self._apiClient.get("api/v0002/service-status") if r.status_code == 200: return ServiceStatus(**r.json()) else: raise ApiException(r)
[ "def", "serviceStatus", "(", "self", ")", ":", "r", "=", "self", ".", "_apiClient", ".", "get", "(", "\"api/v0002/service-status\"", ")", "if", "r", ".", "status_code", "==", "200", ":", "return", "ServiceStatus", "(", "*", "*", "r", ".", "json", "(", ")", ")", "else", ":", "raise", "ApiException", "(", "r", ")" ]
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform. In case of failure it throws APIException
[ "Retrieve", "the", "organization", "-", "specific", "status", "of", "each", "of", "the", "services", "offered", "by", "the", "IBM", "Watson", "IoT", "Platform", ".", "In", "case", "of", "failure", "it", "throws", "APIException" ]
python
test
loli/medpy
medpy/features/intensity.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/intensity.py#L733-L744
def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing): """ Helper function for `_extract_hemispheric_difference`. Smoothes both images and then substracts the reference from the active image. """ active_kernel = _create_structure_array(active_sigma, voxel_spacing) active_smoothed = gaussian_filter(active, sigma = active_kernel) reference_kernel = _create_structure_array(reference_sigma, voxel_spacing) reference_smoothed = gaussian_filter(reference, sigma = reference_kernel) return active_smoothed - reference_smoothed
[ "def", "_substract_hemispheres", "(", "active", ",", "reference", ",", "active_sigma", ",", "reference_sigma", ",", "voxel_spacing", ")", ":", "active_kernel", "=", "_create_structure_array", "(", "active_sigma", ",", "voxel_spacing", ")", "active_smoothed", "=", "gaussian_filter", "(", "active", ",", "sigma", "=", "active_kernel", ")", "reference_kernel", "=", "_create_structure_array", "(", "reference_sigma", ",", "voxel_spacing", ")", "reference_smoothed", "=", "gaussian_filter", "(", "reference", ",", "sigma", "=", "reference_kernel", ")", "return", "active_smoothed", "-", "reference_smoothed" ]
Helper function for `_extract_hemispheric_difference`. Smoothes both images and then substracts the reference from the active image.
[ "Helper", "function", "for", "_extract_hemispheric_difference", ".", "Smoothes", "both", "images", "and", "then", "substracts", "the", "reference", "from", "the", "active", "image", "." ]
python
train
mrjoes/sockjs-tornado
sockjs/tornado/basehandler.py
https://github.com/mrjoes/sockjs-tornado/blob/bd3a99b407f1181f054b3b1730f438dde375ca1c/sockjs/tornado/basehandler.py#L42-L46
def finish(self, chunk=None): """Tornado `finish` handler""" self._log_disconnect() super(BaseHandler, self).finish(chunk)
[ "def", "finish", "(", "self", ",", "chunk", "=", "None", ")", ":", "self", ".", "_log_disconnect", "(", ")", "super", "(", "BaseHandler", ",", "self", ")", ".", "finish", "(", "chunk", ")" ]
Tornado `finish` handler
[ "Tornado", "finish", "handler" ]
python
train
astorfi/speechpy
speechpy/feature.py
https://github.com/astorfi/speechpy/blob/9e99ae81398e7584e6234db371d6d7b5e8736192/speechpy/feature.py#L102-L153
def mfcc( signal, sampling_frequency, frame_length=0.020, frame_stride=0.01, num_cepstral=13, num_filters=40, fft_length=512, low_frequency=0, high_frequency=None, dc_elimination=True): """Compute MFCC features from an audio signal. Args: signal (array): the audio signal from which to compute features. Should be an N x 1 array sampling_frequency (int): the sampling frequency of the signal we are working with. frame_length (float): the length of each frame in seconds. Default is 0.020s frame_stride (float): the step between successive frames in seconds. Default is 0.02s (means no overlap) num_filters (int): the number of filters in the filterbank, default 40. fft_length (int): number of FFT points. Default is 512. low_frequency (float): lowest band edge of mel filters. In Hz, default is 0. high_frequency (float): highest band edge of mel filters. In Hz, default is samplerate/2 num_cepstral (int): Number of cepstral coefficients. dc_elimination (bool): hIf the first dc component should be eliminated or not. Returns: array: A numpy array of size (num_frames x num_cepstral) containing mfcc features. """ feature, energy = mfe(signal, sampling_frequency=sampling_frequency, frame_length=frame_length, frame_stride=frame_stride, num_filters=num_filters, fft_length=fft_length, low_frequency=low_frequency, high_frequency=high_frequency) if len(feature) == 0: return np.empty((0, num_cepstral)) feature = np.log(feature) feature = dct(feature, type=2, axis=-1, norm='ortho')[:, :num_cepstral] # replace first cepstral coefficient with log of frame energy for DC # elimination. if dc_elimination: feature[:, 0] = np.log(energy) return feature
[ "def", "mfcc", "(", "signal", ",", "sampling_frequency", ",", "frame_length", "=", "0.020", ",", "frame_stride", "=", "0.01", ",", "num_cepstral", "=", "13", ",", "num_filters", "=", "40", ",", "fft_length", "=", "512", ",", "low_frequency", "=", "0", ",", "high_frequency", "=", "None", ",", "dc_elimination", "=", "True", ")", ":", "feature", ",", "energy", "=", "mfe", "(", "signal", ",", "sampling_frequency", "=", "sampling_frequency", ",", "frame_length", "=", "frame_length", ",", "frame_stride", "=", "frame_stride", ",", "num_filters", "=", "num_filters", ",", "fft_length", "=", "fft_length", ",", "low_frequency", "=", "low_frequency", ",", "high_frequency", "=", "high_frequency", ")", "if", "len", "(", "feature", ")", "==", "0", ":", "return", "np", ".", "empty", "(", "(", "0", ",", "num_cepstral", ")", ")", "feature", "=", "np", ".", "log", "(", "feature", ")", "feature", "=", "dct", "(", "feature", ",", "type", "=", "2", ",", "axis", "=", "-", "1", ",", "norm", "=", "'ortho'", ")", "[", ":", ",", ":", "num_cepstral", "]", "# replace first cepstral coefficient with log of frame energy for DC", "# elimination.", "if", "dc_elimination", ":", "feature", "[", ":", ",", "0", "]", "=", "np", ".", "log", "(", "energy", ")", "return", "feature" ]
Compute MFCC features from an audio signal. Args: signal (array): the audio signal from which to compute features. Should be an N x 1 array sampling_frequency (int): the sampling frequency of the signal we are working with. frame_length (float): the length of each frame in seconds. Default is 0.020s frame_stride (float): the step between successive frames in seconds. Default is 0.02s (means no overlap) num_filters (int): the number of filters in the filterbank, default 40. fft_length (int): number of FFT points. Default is 512. low_frequency (float): lowest band edge of mel filters. In Hz, default is 0. high_frequency (float): highest band edge of mel filters. In Hz, default is samplerate/2 num_cepstral (int): Number of cepstral coefficients. dc_elimination (bool): hIf the first dc component should be eliminated or not. Returns: array: A numpy array of size (num_frames x num_cepstral) containing mfcc features.
[ "Compute", "MFCC", "features", "from", "an", "audio", "signal", "." ]
python
train
edx/edx-enterprise
enterprise/migrations/0066_add_system_wide_enterprise_operator_role.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/migrations/0066_add_system_wide_enterprise_operator_role.py#L16-L21
def delete_roles(apps, schema_editor): """Delete the enterprise roles.""" SystemWideEnterpriseRole = apps.get_model('enterprise', 'SystemWideEnterpriseRole') SystemWideEnterpriseRole.objects.filter( name__in=[ENTERPRISE_OPERATOR_ROLE] ).delete()
[ "def", "delete_roles", "(", "apps", ",", "schema_editor", ")", ":", "SystemWideEnterpriseRole", "=", "apps", ".", "get_model", "(", "'enterprise'", ",", "'SystemWideEnterpriseRole'", ")", "SystemWideEnterpriseRole", ".", "objects", ".", "filter", "(", "name__in", "=", "[", "ENTERPRISE_OPERATOR_ROLE", "]", ")", ".", "delete", "(", ")" ]
Delete the enterprise roles.
[ "Delete", "the", "enterprise", "roles", "." ]
python
valid
Cue/scales
src/greplin/scales/__init__.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L669-L673
def filterCollapsedItems(data): """Return a filtered iteration over a list of items.""" return ((key, value)\ for key, value in six.iteritems(data) \ if not (isinstance(value, StatContainer) and value.isCollapsed()))
[ "def", "filterCollapsedItems", "(", "data", ")", ":", "return", "(", "(", "key", ",", "value", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "data", ")", "if", "not", "(", "isinstance", "(", "value", ",", "StatContainer", ")", "and", "value", ".", "isCollapsed", "(", ")", ")", ")" ]
Return a filtered iteration over a list of items.
[ "Return", "a", "filtered", "iteration", "over", "a", "list", "of", "items", "." ]
python
train
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L328-L330
def channels_open(self, room_id, **kwargs): """Adds the channel back to the user’s list of channels.""" return self.__call_api_post('channels.open', roomId=room_id, kwargs=kwargs)
[ "def", "channels_open", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'channels.open'", ",", "roomId", "=", "room_id", ",", "kwargs", "=", "kwargs", ")" ]
Adds the channel back to the user’s list of channels.
[ "Adds", "the", "channel", "back", "to", "the", "user’s", "list", "of", "channels", "." ]
python
train
dusktreader/flask-praetorian
flask_praetorian/base.py
https://github.com/dusktreader/flask-praetorian/blob/d530cf3ffeffd61bfff1b8c79e8b45e9bfa0db0c/flask_praetorian/base.py#L198-L206
def encrypt_password(self, raw_password): """ Encrypts a plaintext password using the stored passlib password context """ PraetorianError.require_condition( self.pwd_ctx is not None, "Praetorian must be initialized before this method is available", ) return self.pwd_ctx.encrypt(raw_password, scheme=self.hash_scheme)
[ "def", "encrypt_password", "(", "self", ",", "raw_password", ")", ":", "PraetorianError", ".", "require_condition", "(", "self", ".", "pwd_ctx", "is", "not", "None", ",", "\"Praetorian must be initialized before this method is available\"", ",", ")", "return", "self", ".", "pwd_ctx", ".", "encrypt", "(", "raw_password", ",", "scheme", "=", "self", ".", "hash_scheme", ")" ]
Encrypts a plaintext password using the stored passlib password context
[ "Encrypts", "a", "plaintext", "password", "using", "the", "stored", "passlib", "password", "context" ]
python
train
tornadoweb/tornado
tornado/curl_httpclient.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/curl_httpclient.py#L141-L157
def _handle_events(self, fd: int, events: int) -> None: """Called by IOLoop when there is activity on one of our file descriptors. """ action = 0 if events & ioloop.IOLoop.READ: action |= pycurl.CSELECT_IN if events & ioloop.IOLoop.WRITE: action |= pycurl.CSELECT_OUT while True: try: ret, num_handles = self._multi.socket_action(fd, action) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests()
[ "def", "_handle_events", "(", "self", ",", "fd", ":", "int", ",", "events", ":", "int", ")", "->", "None", ":", "action", "=", "0", "if", "events", "&", "ioloop", ".", "IOLoop", ".", "READ", ":", "action", "|=", "pycurl", ".", "CSELECT_IN", "if", "events", "&", "ioloop", ".", "IOLoop", ".", "WRITE", ":", "action", "|=", "pycurl", ".", "CSELECT_OUT", "while", "True", ":", "try", ":", "ret", ",", "num_handles", "=", "self", ".", "_multi", ".", "socket_action", "(", "fd", ",", "action", ")", "except", "pycurl", ".", "error", "as", "e", ":", "ret", "=", "e", ".", "args", "[", "0", "]", "if", "ret", "!=", "pycurl", ".", "E_CALL_MULTI_PERFORM", ":", "break", "self", ".", "_finish_pending_requests", "(", ")" ]
Called by IOLoop when there is activity on one of our file descriptors.
[ "Called", "by", "IOLoop", "when", "there", "is", "activity", "on", "one", "of", "our", "file", "descriptors", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/hash.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/hash.py#L452-L512
def murmur3_64(data: Union[bytes, bytearray], seed: int = 19820125) -> int: """ Pure 64-bit Python implementation of MurmurHash3; see http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash (plus RNC bugfixes). Args: data: data to hash seed: seed Returns: integer hash """ # noqa m = 0xc6a4a7935bd1e995 r = 47 mask = 2 ** 64 - 1 length = len(data) h = seed ^ ((m * length) & mask) offset = (length // 8) * 8 # RNC: was /, but for Python 3 that gives float; brackets added for clarity for ll in range(0, offset, 8): k = bytes_to_long(data[ll:ll + 8]) k = (k * m) & mask k ^= (k >> r) & mask k = (k * m) & mask h = (h ^ k) h = (h * m) & mask l = length & 7 if l >= 7: h = (h ^ (data[offset + 6] << 48)) if l >= 6: h = (h ^ (data[offset + 5] << 40)) if l >= 5: h = (h ^ (data[offset + 4] << 32)) if l >= 4: h = (h ^ (data[offset + 3] << 24)) if l >= 3: h = (h ^ (data[offset + 2] << 16)) if l >= 2: h = (h ^ (data[offset + 1] << 8)) if l >= 1: h = (h ^ data[offset]) h = (h * m) & mask h ^= (h >> r) & mask h = (h * m) & mask h ^= (h >> r) & mask return h
[ "def", "murmur3_64", "(", "data", ":", "Union", "[", "bytes", ",", "bytearray", "]", ",", "seed", ":", "int", "=", "19820125", ")", "->", "int", ":", "# noqa", "m", "=", "0xc6a4a7935bd1e995", "r", "=", "47", "mask", "=", "2", "**", "64", "-", "1", "length", "=", "len", "(", "data", ")", "h", "=", "seed", "^", "(", "(", "m", "*", "length", ")", "&", "mask", ")", "offset", "=", "(", "length", "//", "8", ")", "*", "8", "# RNC: was /, but for Python 3 that gives float; brackets added for clarity", "for", "ll", "in", "range", "(", "0", ",", "offset", ",", "8", ")", ":", "k", "=", "bytes_to_long", "(", "data", "[", "ll", ":", "ll", "+", "8", "]", ")", "k", "=", "(", "k", "*", "m", ")", "&", "mask", "k", "^=", "(", "k", ">>", "r", ")", "&", "mask", "k", "=", "(", "k", "*", "m", ")", "&", "mask", "h", "=", "(", "h", "^", "k", ")", "h", "=", "(", "h", "*", "m", ")", "&", "mask", "l", "=", "length", "&", "7", "if", "l", ">=", "7", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "6", "]", "<<", "48", ")", ")", "if", "l", ">=", "6", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "5", "]", "<<", "40", ")", ")", "if", "l", ">=", "5", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "4", "]", "<<", "32", ")", ")", "if", "l", ">=", "4", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "3", "]", "<<", "24", ")", ")", "if", "l", ">=", "3", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "2", "]", "<<", "16", ")", ")", "if", "l", ">=", "2", ":", "h", "=", "(", "h", "^", "(", "data", "[", "offset", "+", "1", "]", "<<", "8", ")", ")", "if", "l", ">=", "1", ":", "h", "=", "(", "h", "^", "data", "[", "offset", "]", ")", "h", "=", "(", "h", "*", "m", ")", "&", "mask", "h", "^=", "(", "h", ">>", "r", ")", "&", "mask", "h", "=", "(", "h", "*", "m", ")", "&", "mask", "h", "^=", "(", "h", ">>", "r", ")", "&", "mask", "return", "h" ]
Pure 64-bit Python implementation of MurmurHash3; see http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash (plus RNC bugfixes). Args: data: data to hash seed: seed Returns: integer hash
[ "Pure", "64", "-", "bit", "Python", "implementation", "of", "MurmurHash3", ";", "see", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "13305290", "/", "is", "-", "there", "-", "a", "-", "pure", "-", "python", "-", "implementation", "-", "of", "-", "murmurhash", "(", "plus", "RNC", "bugfixes", ")", ".", "Args", ":", "data", ":", "data", "to", "hash", "seed", ":", "seed" ]
python
train
polysquare/polysquare-generic-file-linter
polysquarelinter/spelling.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/spelling.py#L285-L301
def filter_nonspellcheckable_tokens(line, block_out_regexes=None): """Return line with paths, urls and emails filtered out. Block out other strings of text matching :block_out_regexes: if passed in. """ all_block_out_regexes = [ r"[^\s]*:[^\s]*[/\\][^\s]*", r"[^\s]*[/\\][^\s]*", r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]+\b" ] + (block_out_regexes or list()) for block_regex in all_block_out_regexes: for marker in re.finditer(block_regex, line): spaces = " " * (marker.end() - marker.start()) line = line[:marker.start()] + spaces + line[marker.end():] return line
[ "def", "filter_nonspellcheckable_tokens", "(", "line", ",", "block_out_regexes", "=", "None", ")", ":", "all_block_out_regexes", "=", "[", "r\"[^\\s]*:[^\\s]*[/\\\\][^\\s]*\"", ",", "r\"[^\\s]*[/\\\\][^\\s]*\"", ",", "r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]+\\b\"", "]", "+", "(", "block_out_regexes", "or", "list", "(", ")", ")", "for", "block_regex", "in", "all_block_out_regexes", ":", "for", "marker", "in", "re", ".", "finditer", "(", "block_regex", ",", "line", ")", ":", "spaces", "=", "\" \"", "*", "(", "marker", ".", "end", "(", ")", "-", "marker", ".", "start", "(", ")", ")", "line", "=", "line", "[", ":", "marker", ".", "start", "(", ")", "]", "+", "spaces", "+", "line", "[", "marker", ".", "end", "(", ")", ":", "]", "return", "line" ]
Return line with paths, urls and emails filtered out. Block out other strings of text matching :block_out_regexes: if passed in.
[ "Return", "line", "with", "paths", "urls", "and", "emails", "filtered", "out", "." ]
python
train
Microsoft/knack
knack/arguments.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/arguments.py#L306-L317
def ignore(self, argument_dest, **kwargs): """ Register an argument with type knack.arguments.ignore_type (hidden/ignored) :param argument_dest: The destination argument to apply the ignore type to :type argument_dest: str """ self._check_stale() if not self._applicable(): return dest_option = ['--__{}'.format(argument_dest.upper())] self.argument(argument_dest, arg_type=ignore_type, options_list=dest_option, **kwargs)
[ "def", "ignore", "(", "self", ",", "argument_dest", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_check_stale", "(", ")", "if", "not", "self", ".", "_applicable", "(", ")", ":", "return", "dest_option", "=", "[", "'--__{}'", ".", "format", "(", "argument_dest", ".", "upper", "(", ")", ")", "]", "self", ".", "argument", "(", "argument_dest", ",", "arg_type", "=", "ignore_type", ",", "options_list", "=", "dest_option", ",", "*", "*", "kwargs", ")" ]
Register an argument with type knack.arguments.ignore_type (hidden/ignored) :param argument_dest: The destination argument to apply the ignore type to :type argument_dest: str
[ "Register", "an", "argument", "with", "type", "knack", ".", "arguments", ".", "ignore_type", "(", "hidden", "/", "ignored", ")" ]
python
train
pgmpy/pgmpy
pgmpy/models/MarkovChain.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/MarkovChain.py#L152-L171
def add_variables_from(self, variables, cards): """ Add several variables to the model at once. Parameters: ----------- variables: array-like iterable object List of variables to be added. cards: array-like iterable object List of cardinalities of the variables to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variables_from(['x', 'y'], [3, 4]) """ for var, card in zip(variables, cards): self.add_variable(var, card)
[ "def", "add_variables_from", "(", "self", ",", "variables", ",", "cards", ")", ":", "for", "var", ",", "card", "in", "zip", "(", "variables", ",", "cards", ")", ":", "self", ".", "add_variable", "(", "var", ",", "card", ")" ]
Add several variables to the model at once. Parameters: ----------- variables: array-like iterable object List of variables to be added. cards: array-like iterable object List of cardinalities of the variables to be added. Examples: --------- >>> from pgmpy.models import MarkovChain as MC >>> model = MC() >>> model.add_variables_from(['x', 'y'], [3, 4])
[ "Add", "several", "variables", "to", "the", "model", "at", "once", "." ]
python
train
Syndace/python-x3dh
x3dh/state.py
https://github.com/Syndace/python-x3dh/blob/a6cec1ae858121b88bef1b178f5cda5e43d5c391/x3dh/state.py#L329-L438
def getSharedSecretActive( self, other_public_bundle, allow_zero_otpks = False ): """ Do the key exchange, as the active party. This involves selecting keys from the passive parties' public bundle. :param other_public_bundle: An instance of PublicBundle, filled with the public data of the passive party. :param allow_zero_otpks: A flag indicating whether bundles with no one-time pre keys are allowed or throw an error. False is the recommended default. :returns: A dictionary containing the shared secret, the shared associated data and the data the passive party needs to finalize the key exchange. The returned structure looks like this:: { "to_other": { # The public key of the active parties' identity key pair "ik": bytes, # The public key of the active parties' ephemeral key pair "ek": bytes, # The public key of the used passive parties' one-time pre key or None "otpk": bytes or None, # The public key of the passive parties' signed pre key pair "spk": bytes }, "ad": bytes, # The shared associated data "sk": bytes # The shared secret } :raises KeyExchangeException: If an error occurs during the key exchange. The exception message will contain (human-readable) details. """ self.__checkSPKTimestamp() other_ik = self.__KeyPair(pub = other_public_bundle.ik) other_spk = { "key": self.__KeyPair(pub = other_public_bundle.spk), "signature": other_public_bundle.spk_signature } other_otpks = [ self.__KeyPair(pub = otpk) for otpk in other_public_bundle.otpks ] if len(other_otpks) == 0 and not allow_zero_otpks: raise KeyExchangeException( "The other public bundle does not contain any OTPKs, which is not " + "allowed." ) other_spk_serialized = self.__PublicKeyEncoder.encodePublicKey( other_spk["key"].pub, self.__curve ) if not self.__XEdDSA(mont_pub = other_ik.pub).verify( other_spk_serialized, other_spk["signature"] ): raise KeyExchangeException( "The signature of this public bundle's spk could not be verifified." ) ek = self.__KeyPair.generate() dh1 = self.__ik.getSharedSecret(other_spk["key"]) dh2 = ek.getSharedSecret(other_ik) dh3 = ek.getSharedSecret(other_spk["key"]) dh4 = b"" otpk = None if len(other_otpks) > 0: otpk_index = ord(os.urandom(1)) % len(other_otpks) otpk = other_otpks[otpk_index] dh4 = ek.getSharedSecret(otpk) sk = self.__kdf(dh1 + dh2 + dh3 + dh4) ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey( self.__ik.pub, self.__curve ) other_ik_pub_serialized = self.__PublicKeyEncoder.encodePublicKey( other_ik.pub, self.__curve ) ad = ik_pub_serialized + other_ik_pub_serialized return { "to_other": { "ik": self.__ik.pub, "ek": ek.pub, "otpk": otpk.pub if otpk else None, "spk": other_spk["key"].pub }, "ad": ad, "sk": sk }
[ "def", "getSharedSecretActive", "(", "self", ",", "other_public_bundle", ",", "allow_zero_otpks", "=", "False", ")", ":", "self", ".", "__checkSPKTimestamp", "(", ")", "other_ik", "=", "self", ".", "__KeyPair", "(", "pub", "=", "other_public_bundle", ".", "ik", ")", "other_spk", "=", "{", "\"key\"", ":", "self", ".", "__KeyPair", "(", "pub", "=", "other_public_bundle", ".", "spk", ")", ",", "\"signature\"", ":", "other_public_bundle", ".", "spk_signature", "}", "other_otpks", "=", "[", "self", ".", "__KeyPair", "(", "pub", "=", "otpk", ")", "for", "otpk", "in", "other_public_bundle", ".", "otpks", "]", "if", "len", "(", "other_otpks", ")", "==", "0", "and", "not", "allow_zero_otpks", ":", "raise", "KeyExchangeException", "(", "\"The other public bundle does not contain any OTPKs, which is not \"", "+", "\"allowed.\"", ")", "other_spk_serialized", "=", "self", ".", "__PublicKeyEncoder", ".", "encodePublicKey", "(", "other_spk", "[", "\"key\"", "]", ".", "pub", ",", "self", ".", "__curve", ")", "if", "not", "self", ".", "__XEdDSA", "(", "mont_pub", "=", "other_ik", ".", "pub", ")", ".", "verify", "(", "other_spk_serialized", ",", "other_spk", "[", "\"signature\"", "]", ")", ":", "raise", "KeyExchangeException", "(", "\"The signature of this public bundle's spk could not be verifified.\"", ")", "ek", "=", "self", ".", "__KeyPair", ".", "generate", "(", ")", "dh1", "=", "self", ".", "__ik", ".", "getSharedSecret", "(", "other_spk", "[", "\"key\"", "]", ")", "dh2", "=", "ek", ".", "getSharedSecret", "(", "other_ik", ")", "dh3", "=", "ek", ".", "getSharedSecret", "(", "other_spk", "[", "\"key\"", "]", ")", "dh4", "=", "b\"\"", "otpk", "=", "None", "if", "len", "(", "other_otpks", ")", ">", "0", ":", "otpk_index", "=", "ord", "(", "os", ".", "urandom", "(", "1", ")", ")", "%", "len", "(", "other_otpks", ")", "otpk", "=", "other_otpks", "[", "otpk_index", "]", "dh4", "=", "ek", ".", "getSharedSecret", "(", "otpk", ")", "sk", "=", "self", ".", "__kdf", "(", "dh1", "+", "dh2", "+", "dh3", "+", "dh4", ")", "ik_pub_serialized", "=", "self", ".", "__PublicKeyEncoder", ".", "encodePublicKey", "(", "self", ".", "__ik", ".", "pub", ",", "self", ".", "__curve", ")", "other_ik_pub_serialized", "=", "self", ".", "__PublicKeyEncoder", ".", "encodePublicKey", "(", "other_ik", ".", "pub", ",", "self", ".", "__curve", ")", "ad", "=", "ik_pub_serialized", "+", "other_ik_pub_serialized", "return", "{", "\"to_other\"", ":", "{", "\"ik\"", ":", "self", ".", "__ik", ".", "pub", ",", "\"ek\"", ":", "ek", ".", "pub", ",", "\"otpk\"", ":", "otpk", ".", "pub", "if", "otpk", "else", "None", ",", "\"spk\"", ":", "other_spk", "[", "\"key\"", "]", ".", "pub", "}", ",", "\"ad\"", ":", "ad", ",", "\"sk\"", ":", "sk", "}" ]
Do the key exchange, as the active party. This involves selecting keys from the passive parties' public bundle. :param other_public_bundle: An instance of PublicBundle, filled with the public data of the passive party. :param allow_zero_otpks: A flag indicating whether bundles with no one-time pre keys are allowed or throw an error. False is the recommended default. :returns: A dictionary containing the shared secret, the shared associated data and the data the passive party needs to finalize the key exchange. The returned structure looks like this:: { "to_other": { # The public key of the active parties' identity key pair "ik": bytes, # The public key of the active parties' ephemeral key pair "ek": bytes, # The public key of the used passive parties' one-time pre key or None "otpk": bytes or None, # The public key of the passive parties' signed pre key pair "spk": bytes }, "ad": bytes, # The shared associated data "sk": bytes # The shared secret } :raises KeyExchangeException: If an error occurs during the key exchange. The exception message will contain (human-readable) details.
[ "Do", "the", "key", "exchange", "as", "the", "active", "party", ".", "This", "involves", "selecting", "keys", "from", "the", "passive", "parties", "public", "bundle", "." ]
python
train
RJT1990/pyflux
pyflux/arma/arimax.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/arimax.py#L298-L354
def _mb_normal_model(self, beta, mini_batch): """ Creates the structure of the model (model matrices, etc) for a mini-batch Normal family ARIMAX model. Here the structure is the same as for _normal_model() but we are going to sample a random choice of data points (of length mini_batch). Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ rand_int = np.random.randint(low=0, high=self.data_length-mini_batch-self.max_lag+1) sample = np.arange(start=rand_int, stop=rand_int+mini_batch) data = self.y[sample] X = self.X[sample, :] Y = data[self.max_lag:] if self.ar != 0: ar_matrix = data[(self.max_lag-1):-1] for i in range(1, self.ar): ar_matrix = np.vstack((ar_matrix, data[(self.max_lag-i-1):-i-1])) else: ar_matrix = np.zeros(data.shape[0]-self.max_lag) # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) # Constant and AR terms if self.ar == 0: mu = np.transpose(ar_matrix) elif self.ar == 1: mu = np.transpose(ar_matrix)*z[:-self.family_z_no-self.ma-len(self.X_names)][0] else: mu = np.matmul(np.transpose(ar_matrix),z[:-self.family_z_no-self.ma-len(self.X_names)]) # X terms mu = mu + np.matmul(X[self.integ+self.max_lag:],z[self.ma+self.ar:(self.ma+self.ar+len(self.X_names))]) # MA terms if self.ma != 0: mu = arimax_recursion(z, mu, Y, self.max_lag, Y.shape[0], self.ar, self.ma) return mu, Y
[ "def", "_mb_normal_model", "(", "self", ",", "beta", ",", "mini_batch", ")", ":", "rand_int", "=", "np", ".", "random", ".", "randint", "(", "low", "=", "0", ",", "high", "=", "self", ".", "data_length", "-", "mini_batch", "-", "self", ".", "max_lag", "+", "1", ")", "sample", "=", "np", ".", "arange", "(", "start", "=", "rand_int", ",", "stop", "=", "rand_int", "+", "mini_batch", ")", "data", "=", "self", ".", "y", "[", "sample", "]", "X", "=", "self", ".", "X", "[", "sample", ",", ":", "]", "Y", "=", "data", "[", "self", ".", "max_lag", ":", "]", "if", "self", ".", "ar", "!=", "0", ":", "ar_matrix", "=", "data", "[", "(", "self", ".", "max_lag", "-", "1", ")", ":", "-", "1", "]", "for", "i", "in", "range", "(", "1", ",", "self", ".", "ar", ")", ":", "ar_matrix", "=", "np", ".", "vstack", "(", "(", "ar_matrix", ",", "data", "[", "(", "self", ".", "max_lag", "-", "i", "-", "1", ")", ":", "-", "i", "-", "1", "]", ")", ")", "else", ":", "ar_matrix", "=", "np", ".", "zeros", "(", "data", ".", "shape", "[", "0", "]", "-", "self", ".", "max_lag", ")", "# Transform latent variables", "z", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "transform", "(", "beta", "[", "k", "]", ")", "for", "k", "in", "range", "(", "beta", ".", "shape", "[", "0", "]", ")", "]", ")", "# Constant and AR terms", "if", "self", ".", "ar", "==", "0", ":", "mu", "=", "np", ".", "transpose", "(", "ar_matrix", ")", "elif", "self", ".", "ar", "==", "1", ":", "mu", "=", "np", ".", "transpose", "(", "ar_matrix", ")", "*", "z", "[", ":", "-", "self", ".", "family_z_no", "-", "self", ".", "ma", "-", "len", "(", "self", ".", "X_names", ")", "]", "[", "0", "]", "else", ":", "mu", "=", "np", ".", "matmul", "(", "np", ".", "transpose", "(", "ar_matrix", ")", ",", "z", "[", ":", "-", "self", ".", "family_z_no", "-", "self", ".", "ma", "-", "len", "(", "self", ".", "X_names", ")", "]", ")", "# X terms", "mu", "=", "mu", "+", "np", ".", "matmul", "(", "X", "[", "self", ".", "integ", "+", "self", ".", "max_lag", ":", "]", ",", "z", "[", "self", ".", "ma", "+", "self", ".", "ar", ":", "(", "self", ".", "ma", "+", "self", ".", "ar", "+", "len", "(", "self", ".", "X_names", ")", ")", "]", ")", "# MA terms", "if", "self", ".", "ma", "!=", "0", ":", "mu", "=", "arimax_recursion", "(", "z", ",", "mu", ",", "Y", ",", "self", ".", "max_lag", ",", "Y", ".", "shape", "[", "0", "]", ",", "self", ".", "ar", ",", "self", ".", "ma", ")", "return", "mu", ",", "Y" ]
Creates the structure of the model (model matrices, etc) for a mini-batch Normal family ARIMAX model. Here the structure is the same as for _normal_model() but we are going to sample a random choice of data points (of length mini_batch). Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags)
[ "Creates", "the", "structure", "of", "the", "model", "(", "model", "matrices", "etc", ")", "for", "a", "mini", "-", "batch", "Normal", "family", "ARIMAX", "model", "." ]
python
train
gwww/elkm1
elkm1_lib/counters.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/counters.py#L13-L15
def set(self, value): """(Helper) Set counter to value""" self._elk.send(cx_encode(self._index, value))
[ "def", "set", "(", "self", ",", "value", ")", ":", "self", ".", "_elk", ".", "send", "(", "cx_encode", "(", "self", ".", "_index", ",", "value", ")", ")" ]
(Helper) Set counter to value
[ "(", "Helper", ")", "Set", "counter", "to", "value" ]
python
train
synw/goerr
goerr/__init__.py
https://github.com/synw/goerr/blob/08b3809d6715bffe26899a769d96fa5de8573faf/goerr/__init__.py#L75-L81
def info(self, *args) -> "Err": """ Creates an info message """ error = self._create_err("info", *args) print(self._errmsg(error)) return error
[ "def", "info", "(", "self", ",", "*", "args", ")", "->", "\"Err\"", ":", "error", "=", "self", ".", "_create_err", "(", "\"info\"", ",", "*", "args", ")", "print", "(", "self", ".", "_errmsg", "(", "error", ")", ")", "return", "error" ]
Creates an info message
[ "Creates", "an", "info", "message" ]
python
train
ThomasChiroux/attowiki
src/attowiki/git_tools.py
https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/git_tools.py#L115-L133
def reset_to_last_commit(): """reset a modified file to his last commit status This method does the same than a :: $ git reset --hard Keyword Arguments: <none> Returns: <nothing> """ try: repo = Repo() gitcmd = repo.git gitcmd.reset(hard=True) except Exception: pass
[ "def", "reset_to_last_commit", "(", ")", ":", "try", ":", "repo", "=", "Repo", "(", ")", "gitcmd", "=", "repo", ".", "git", "gitcmd", ".", "reset", "(", "hard", "=", "True", ")", "except", "Exception", ":", "pass" ]
reset a modified file to his last commit status This method does the same than a :: $ git reset --hard Keyword Arguments: <none> Returns: <nothing>
[ "reset", "a", "modified", "file", "to", "his", "last", "commit", "status" ]
python
train
Kaggle/kaggle-api
kaggle/api/kaggle_api_extended.py
https://github.com/Kaggle/kaggle-api/blob/65f14b1386470c5784d4753e491478e7537660d9/kaggle/api/kaggle_api_extended.py#L2038-L2051
def kernels_output_cli(self, kernel, kernel_opt=None, path=None, force=False, quiet=False): """ client wrapper for kernels_output, with same arguments. Extra arguments are described below, and see kernels_output for others. Parameters ========== kernel_opt: option from client instead of kernel, if not defined """ kernel = kernel or kernel_opt self.kernels_output(kernel, path, force, quiet)
[ "def", "kernels_output_cli", "(", "self", ",", "kernel", ",", "kernel_opt", "=", "None", ",", "path", "=", "None", ",", "force", "=", "False", ",", "quiet", "=", "False", ")", ":", "kernel", "=", "kernel", "or", "kernel_opt", "self", ".", "kernels_output", "(", "kernel", ",", "path", ",", "force", ",", "quiet", ")" ]
client wrapper for kernels_output, with same arguments. Extra arguments are described below, and see kernels_output for others. Parameters ========== kernel_opt: option from client instead of kernel, if not defined
[ "client", "wrapper", "for", "kernels_output", "with", "same", "arguments", ".", "Extra", "arguments", "are", "described", "below", "and", "see", "kernels_output", "for", "others", ".", "Parameters", "==========", "kernel_opt", ":", "option", "from", "client", "instead", "of", "kernel", "if", "not", "defined" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15405-L15425
def wnexpd(left, right, window): """ Expand each of the intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnexpd_c.html :param left: Amount subtracted from each left endpoint. :type left: float :param right: Amount added to each right endpoint. :type right: float :param window: Window to be expanded. :type window: spiceypy.utils.support_types.SpiceCell :return: Expanded Window. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 left = ctypes.c_double(left) right = ctypes.c_double(right) libspice.wnexpd_c(left, right, ctypes.byref(window)) return window
[ "def", "wnexpd", "(", "left", ",", "right", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "left", "=", "ctypes", ".", "c_double", "(", "left", ")", "right", "=", "ctypes", ".", "c_double", "(", "right", ")", "libspice", ".", "wnexpd_c", "(", "left", ",", "right", ",", "ctypes", ".", "byref", "(", "window", ")", ")", "return", "window" ]
Expand each of the intervals of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnexpd_c.html :param left: Amount subtracted from each left endpoint. :type left: float :param right: Amount added to each right endpoint. :type right: float :param window: Window to be expanded. :type window: spiceypy.utils.support_types.SpiceCell :return: Expanded Window. :rtype: spiceypy.utils.support_types.SpiceCell
[ "Expand", "each", "of", "the", "intervals", "of", "a", "double", "precision", "window", "." ]
python
train
programa-stic/barf-project
barf/barf.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/barf.py#L128-L138
def _setup_x86_arch(self, arch_mode=None): """Set up x86 architecture. """ if arch_mode is None: arch_mode = self.binary.architecture_mode # Set up architecture information self.name = "x86" self.arch_info = X86ArchitectureInformation(arch_mode) self.disassembler = X86Disassembler(arch_mode) self.ir_translator = X86Translator(arch_mode)
[ "def", "_setup_x86_arch", "(", "self", ",", "arch_mode", "=", "None", ")", ":", "if", "arch_mode", "is", "None", ":", "arch_mode", "=", "self", ".", "binary", ".", "architecture_mode", "# Set up architecture information", "self", ".", "name", "=", "\"x86\"", "self", ".", "arch_info", "=", "X86ArchitectureInformation", "(", "arch_mode", ")", "self", ".", "disassembler", "=", "X86Disassembler", "(", "arch_mode", ")", "self", ".", "ir_translator", "=", "X86Translator", "(", "arch_mode", ")" ]
Set up x86 architecture.
[ "Set", "up", "x86", "architecture", "." ]
python
train
ewels/MultiQC
multiqc/modules/bowtie2/bowtie2.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/bowtie2/bowtie2.py#L206-L263
def bowtie2_alignment_plot (self): """ Make the HighCharts HTML to plot the alignment rates """ half_warning = '' for s_name in self.bowtie2_data: if 'paired_aligned_mate_one_halved' in self.bowtie2_data[s_name] or 'paired_aligned_mate_multi_halved' in self.bowtie2_data[s_name] or 'paired_aligned_mate_none_halved' in self.bowtie2_data[s_name]: half_warning = '<em>Please note that single mate alignment counts are halved to tally with pair counts properly.</em>' description_text = 'This plot shows the number of reads aligning to the reference in different ways.' # Config for the plot config = { 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } # Two plots, don't mix SE with PE if self.num_se > 0: sekeys = OrderedDict() sekeys['unpaired_aligned_one'] = { 'color': '#20568f', 'name': 'SE mapped uniquely' } sekeys['unpaired_aligned_multi'] = { 'color': '#f7a35c', 'name': 'SE multimapped' } sekeys['unpaired_aligned_none'] = { 'color': '#981919', 'name': 'SE not aligned' } config['id'] = 'bowtie2_se_plot' config['title'] = 'Bowtie 2: SE Alignment Scores' self.add_section( description = description_text, helptext = ''' There are 3 possible types of alignment: * **SE Mapped uniquely**: Read has only one occurence in the reference genome. * **SE Multimapped**: Read has multiple occurence. * **SE No aligned**: Read has no occurence. ''', plot = bargraph.plot(self.bowtie2_data, sekeys, config) ) if self.num_pe > 0: pekeys = OrderedDict() pekeys['paired_aligned_one'] = { 'color': '#20568f', 'name': 'PE mapped uniquely' } pekeys['paired_aligned_discord_one'] = { 'color': '#5c94ca', 'name': 'PE mapped discordantly uniquely' } pekeys['paired_aligned_mate_one_halved'] = { 'color': '#95ceff', 'name': 'PE one mate mapped uniquely' } pekeys['paired_aligned_multi'] = { 'color': '#f7a35c', 'name': 'PE multimapped' } pekeys['paired_aligned_discord_multi'] = { 'color': '#dce333', 'name': 'PE discordantly multimapped' } pekeys['paired_aligned_mate_multi_halved'] = { 'color': '#ffeb75', 'name': 'PE one mate multimapped' } pekeys['paired_aligned_mate_none_halved'] = { 'color': '#981919', 'name': 'PE neither mate aligned' } config['id'] = 'bowtie2_pe_plot' config['title'] = 'Bowtie 2: PE Alignment Scores' self.add_section( description = "<br>".join([description_text,half_warning]), helptext = ''' There are 6 possible types of alignment: * **PE mapped uniquely**: Pair has only one occurence in the reference genome. * **PE mapped discordantly uniquely**: Pair has only one occurence but not in proper pair. * **PE one mate mapped uniquely**: One read of a pair has one occurence. * **PE multimapped**: Pair has multiple occurence. * **PE one mate multimapped**: One read of a pair has multiple occurence. * **PE neither mate aligned**: Pair has no occurence. ''', plot = bargraph.plot(self.bowtie2_data, pekeys, config) )
[ "def", "bowtie2_alignment_plot", "(", "self", ")", ":", "half_warning", "=", "''", "for", "s_name", "in", "self", ".", "bowtie2_data", ":", "if", "'paired_aligned_mate_one_halved'", "in", "self", ".", "bowtie2_data", "[", "s_name", "]", "or", "'paired_aligned_mate_multi_halved'", "in", "self", ".", "bowtie2_data", "[", "s_name", "]", "or", "'paired_aligned_mate_none_halved'", "in", "self", ".", "bowtie2_data", "[", "s_name", "]", ":", "half_warning", "=", "'<em>Please note that single mate alignment counts are halved to tally with pair counts properly.</em>'", "description_text", "=", "'This plot shows the number of reads aligning to the reference in different ways.'", "# Config for the plot", "config", "=", "{", "'ylab'", ":", "'# Reads'", ",", "'cpswitch_counts_label'", ":", "'Number of Reads'", "}", "# Two plots, don't mix SE with PE", "if", "self", ".", "num_se", ">", "0", ":", "sekeys", "=", "OrderedDict", "(", ")", "sekeys", "[", "'unpaired_aligned_one'", "]", "=", "{", "'color'", ":", "'#20568f'", ",", "'name'", ":", "'SE mapped uniquely'", "}", "sekeys", "[", "'unpaired_aligned_multi'", "]", "=", "{", "'color'", ":", "'#f7a35c'", ",", "'name'", ":", "'SE multimapped'", "}", "sekeys", "[", "'unpaired_aligned_none'", "]", "=", "{", "'color'", ":", "'#981919'", ",", "'name'", ":", "'SE not aligned'", "}", "config", "[", "'id'", "]", "=", "'bowtie2_se_plot'", "config", "[", "'title'", "]", "=", "'Bowtie 2: SE Alignment Scores'", "self", ".", "add_section", "(", "description", "=", "description_text", ",", "helptext", "=", "'''\n There are 3 possible types of alignment:\n * **SE Mapped uniquely**: Read has only one occurence in the reference genome.\n * **SE Multimapped**: Read has multiple occurence.\n * **SE No aligned**: Read has no occurence.\n '''", ",", "plot", "=", "bargraph", ".", "plot", "(", "self", ".", "bowtie2_data", ",", "sekeys", ",", "config", ")", ")", "if", "self", ".", "num_pe", ">", "0", ":", "pekeys", "=", "OrderedDict", "(", ")", "pekeys", "[", "'paired_aligned_one'", "]", "=", "{", "'color'", ":", "'#20568f'", ",", "'name'", ":", "'PE mapped uniquely'", "}", "pekeys", "[", "'paired_aligned_discord_one'", "]", "=", "{", "'color'", ":", "'#5c94ca'", ",", "'name'", ":", "'PE mapped discordantly uniquely'", "}", "pekeys", "[", "'paired_aligned_mate_one_halved'", "]", "=", "{", "'color'", ":", "'#95ceff'", ",", "'name'", ":", "'PE one mate mapped uniquely'", "}", "pekeys", "[", "'paired_aligned_multi'", "]", "=", "{", "'color'", ":", "'#f7a35c'", ",", "'name'", ":", "'PE multimapped'", "}", "pekeys", "[", "'paired_aligned_discord_multi'", "]", "=", "{", "'color'", ":", "'#dce333'", ",", "'name'", ":", "'PE discordantly multimapped'", "}", "pekeys", "[", "'paired_aligned_mate_multi_halved'", "]", "=", "{", "'color'", ":", "'#ffeb75'", ",", "'name'", ":", "'PE one mate multimapped'", "}", "pekeys", "[", "'paired_aligned_mate_none_halved'", "]", "=", "{", "'color'", ":", "'#981919'", ",", "'name'", ":", "'PE neither mate aligned'", "}", "config", "[", "'id'", "]", "=", "'bowtie2_pe_plot'", "config", "[", "'title'", "]", "=", "'Bowtie 2: PE Alignment Scores'", "self", ".", "add_section", "(", "description", "=", "\"<br>\"", ".", "join", "(", "[", "description_text", ",", "half_warning", "]", ")", ",", "helptext", "=", "'''\n There are 6 possible types of alignment:\n * **PE mapped uniquely**: Pair has only one occurence in the reference genome.\n * **PE mapped discordantly uniquely**: Pair has only one occurence but not in proper pair.\n * **PE one mate mapped uniquely**: One read of a pair has one occurence.\n * **PE multimapped**: Pair has multiple occurence.\n * **PE one mate multimapped**: One read of a pair has multiple occurence.\n * **PE neither mate aligned**: Pair has no occurence.\n '''", ",", "plot", "=", "bargraph", ".", "plot", "(", "self", ".", "bowtie2_data", ",", "pekeys", ",", "config", ")", ")" ]
Make the HighCharts HTML to plot the alignment rates
[ "Make", "the", "HighCharts", "HTML", "to", "plot", "the", "alignment", "rates" ]
python
train