repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
hydraplatform/hydra-base
hydra_base/db/model.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/db/model.py#L860-L882
def add_link(self, name, desc, layout, node_1, node_2): """ Add a link to a network. Links are what effectively define the network topology, by associating two already existing nodes. """ existing_link = get_session().query(Link).filter(Link.name==name, Link.network_id==self.id).first() if existing_link is not None: raise HydraError("A link with name %s is already in network %s"%(name, self.id)) l = Link() l.name = name l.description = desc l.layout = json.dumps(layout) if layout is not None else None l.node_a = node_1 l.node_b = node_2 get_session().add(l) self.links.append(l) return l
[ "def", "add_link", "(", "self", ",", "name", ",", "desc", ",", "layout", ",", "node_1", ",", "node_2", ")", ":", "existing_link", "=", "get_session", "(", ")", ".", "query", "(", "Link", ")", ".", "filter", "(", "Link", ".", "name", "==", "name", ",", "Link", ".", "network_id", "==", "self", ".", "id", ")", ".", "first", "(", ")", "if", "existing_link", "is", "not", "None", ":", "raise", "HydraError", "(", "\"A link with name %s is already in network %s\"", "%", "(", "name", ",", "self", ".", "id", ")", ")", "l", "=", "Link", "(", ")", "l", ".", "name", "=", "name", "l", ".", "description", "=", "desc", "l", ".", "layout", "=", "json", ".", "dumps", "(", "layout", ")", "if", "layout", "is", "not", "None", "else", "None", "l", ".", "node_a", "=", "node_1", "l", ".", "node_b", "=", "node_2", "get_session", "(", ")", ".", "add", "(", "l", ")", "self", ".", "links", ".", "append", "(", "l", ")", "return", "l" ]
Add a link to a network. Links are what effectively define the network topology, by associating two already existing nodes.
[ "Add", "a", "link", "to", "a", "network", ".", "Links", "are", "what", "effectively", "define", "the", "network", "topology", "by", "associating", "two", "already", "existing", "nodes", "." ]
python
train
33.26087
awslabs/aws-sam-cli
samcli/commands/local/lib/sam_api_provider.py
https://github.com/awslabs/aws-sam-cli/blob/c05af5e7378c6f05f7d82ad3f0bca17204177db6/samcli/commands/local/lib/sam_api_provider.py#L301-L314
def _normalize_http_methods(http_method): """ Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all supported Http Methods on Api Gateway. :param str http_method: Http method :yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods) """ if http_method.upper() == 'ANY': for method in SamApiProvider._ANY_HTTP_METHODS: yield method.upper() else: yield http_method.upper()
[ "def", "_normalize_http_methods", "(", "http_method", ")", ":", "if", "http_method", ".", "upper", "(", ")", "==", "'ANY'", ":", "for", "method", "in", "SamApiProvider", ".", "_ANY_HTTP_METHODS", ":", "yield", "method", ".", "upper", "(", ")", "else", ":", "yield", "http_method", ".", "upper", "(", ")" ]
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all supported Http Methods on Api Gateway. :param str http_method: Http method :yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
[ "Normalizes", "Http", "Methods", ".", "Api", "Gateway", "allows", "a", "Http", "Methods", "of", "ANY", ".", "This", "is", "a", "special", "verb", "to", "denote", "all", "supported", "Http", "Methods", "on", "Api", "Gateway", "." ]
python
train
39.571429
EUDAT-B2SAFE/B2HANDLE
b2handle/handleclient.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/handleclient.py#L934-L973
def search_handle(self, URL=None, prefix=None, **key_value_pairs): ''' Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: .. code:: python list_of_handles = search_handle('http://www.foo.com') list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element. ''' LOGGER.debug('search_handle...') list_of_handles = self.__searcher.search_handle(URL=URL, prefix=prefix, **key_value_pairs) return list_of_handles
[ "def", "search_handle", "(", "self", ",", "URL", "=", "None", ",", "prefix", "=", "None", ",", "*", "*", "key_value_pairs", ")", ":", "LOGGER", ".", "debug", "(", "'search_handle...'", ")", "list_of_handles", "=", "self", ".", "__searcher", ".", "search_handle", "(", "URL", "=", "URL", ",", "prefix", "=", "prefix", ",", "*", "*", "key_value_pairs", ")", "return", "list_of_handles" ]
Search for handles containing the specified key with the specified value. The search terms are passed on to the reverse lookup servlet as-is. The servlet is supposed to be case-insensitive, but if it isn't, the wrong case will cause a :exc:`~b2handle.handleexceptions.ReverseLookupException`. *Note:* If allowed search keys are configured, only these are used. If no allowed search keys are specified, all key-value pairs are passed on to the reverse lookup servlet, possibly causing a :exc:`~b2handle.handleexceptions.ReverseLookupException`. Example calls: .. code:: python list_of_handles = search_handle('http://www.foo.com') list_of_handles = search_handle('http://www.foo.com', CHECKSUM=99999) list_of_handles = search_handle(URL='http://www.foo.com', CHECKSUM=99999) :param URL: Optional. The URL to search for (reverse lookup). [This is NOT the URL of the search servlet!] :param prefix: Optional. The Handle prefix to which the search should be limited to. If unspecified, the method will search across all prefixes present at the server given to the constructor. :param key_value_pairs: Optional. Several search fields and values can be specified as key-value-pairs, e.g. CHECKSUM=123456, URL=www.foo.com :raise: :exc:`~b2handle.handleexceptions.ReverseLookupException`: If a search field is specified that cannot be used, or if something else goes wrong. :return: A list of all Handles (strings) that bear the given key with given value of given prefix or server. The list may be empty and may also contain more than one element.
[ "Search", "for", "handles", "containing", "the", "specified", "key", "with", "the", "specified", "value", ".", "The", "search", "terms", "are", "passed", "on", "to", "the", "reverse", "lookup", "servlet", "as", "-", "is", ".", "The", "servlet", "is", "supposed", "to", "be", "case", "-", "insensitive", "but", "if", "it", "isn", "t", "the", "wrong", "case", "will", "cause", "a", ":", "exc", ":", "~b2handle", ".", "handleexceptions", ".", "ReverseLookupException", "." ]
python
train
50.975
spyder-ide/spyder
spyder/widgets/github/backend.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/github/backend.py#L205-L220
def _store_token(self, token, remember=False): """Store token for future use.""" if token and remember: try: keyring.set_password('github', 'token', token) except Exception: if self._show_msgbox: QMessageBox.warning(self.parent_widget, _('Failed to store token'), _('It was not possible to securely ' 'save your token. You will be ' 'prompted for your Github token ' 'next time you want to report ' 'an issue.')) remember = False CONF.set('main', 'report_error/remember_token', remember)
[ "def", "_store_token", "(", "self", ",", "token", ",", "remember", "=", "False", ")", ":", "if", "token", "and", "remember", ":", "try", ":", "keyring", ".", "set_password", "(", "'github'", ",", "'token'", ",", "token", ")", "except", "Exception", ":", "if", "self", ".", "_show_msgbox", ":", "QMessageBox", ".", "warning", "(", "self", ".", "parent_widget", ",", "_", "(", "'Failed to store token'", ")", ",", "_", "(", "'It was not possible to securely '", "'save your token. You will be '", "'prompted for your Github token '", "'next time you want to report '", "'an issue.'", ")", ")", "remember", "=", "False", "CONF", ".", "set", "(", "'main'", ",", "'report_error/remember_token'", ",", "remember", ")" ]
Store token for future use.
[ "Store", "token", "for", "future", "use", "." ]
python
train
52.25
cloudera/cm_api
python/src/cm_shell/cmps.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_shell/cmps.py#L277-L292
def service_action(self, service, action): "Perform given action on service for the selected cluster" try: service = api.get_cluster(self.cluster).get_service(service) except ApiException: print("Service not found") return None if action == "start": service.start() if action == "restart": service.restart() if action == "stop": service.stop() return True
[ "def", "service_action", "(", "self", ",", "service", ",", "action", ")", ":", "try", ":", "service", "=", "api", ".", "get_cluster", "(", "self", ".", "cluster", ")", ".", "get_service", "(", "service", ")", "except", "ApiException", ":", "print", "(", "\"Service not found\"", ")", "return", "None", "if", "action", "==", "\"start\"", ":", "service", ".", "start", "(", ")", "if", "action", "==", "\"restart\"", ":", "service", ".", "restart", "(", ")", "if", "action", "==", "\"stop\"", ":", "service", ".", "stop", "(", ")", "return", "True" ]
Perform given action on service for the selected cluster
[ "Perform", "given", "action", "on", "service", "for", "the", "selected", "cluster" ]
python
train
29.375
gwastro/pycbc
pycbc/workflow/segment.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/segment.py#L286-L340
def get_cumulative_veto_group_files(workflow, option, cat_files, out_dir, execute_now=True, tags=None): """ Get the cumulative veto files that define the different backgrounds we want to analyze, defined by groups of vetos. Parameters ----------- workflow : Workflow object Instance of the workflow object option : str ini file option to use to get the veto groups cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files execute_now : Boolean If true outputs are generated at runtime. Else jobs go into the workflow and are generated then. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- seg_files : workflow.core.FileList instance The cumulative segment files for each veto group. names : list of strings The segment names for the corresponding seg_file cat_files : workflow.core.FileList instance The list of individual category veto files """ if tags is None: tags = [] logging.info("Starting generating vetoes for groups in %s" %(option)) make_analysis_dir(out_dir) cat_sets = parse_cat_ini_opt(workflow.cp.get_opt_tags('workflow-segments', option, tags)) cum_seg_files = FileList() names = [] for cat_set in cat_sets: segment_name = "CUMULATIVE_CAT_%s" % (''.join(sorted(cat_set))) logging.info('getting information for %s' % segment_name) categories = [cat_to_veto_def_cat(c) for c in cat_set] cum_seg_files += [get_cumulative_segs(workflow, categories, cat_files, out_dir, execute_now=execute_now, segment_name=segment_name, tags=tags)] names.append(segment_name) logging.info("Done generating vetoes for groups in %s" %(option)) return cum_seg_files, names, cat_files
[ "def", "get_cumulative_veto_group_files", "(", "workflow", ",", "option", ",", "cat_files", ",", "out_dir", ",", "execute_now", "=", "True", ",", "tags", "=", "None", ")", ":", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "logging", ".", "info", "(", "\"Starting generating vetoes for groups in %s\"", "%", "(", "option", ")", ")", "make_analysis_dir", "(", "out_dir", ")", "cat_sets", "=", "parse_cat_ini_opt", "(", "workflow", ".", "cp", ".", "get_opt_tags", "(", "'workflow-segments'", ",", "option", ",", "tags", ")", ")", "cum_seg_files", "=", "FileList", "(", ")", "names", "=", "[", "]", "for", "cat_set", "in", "cat_sets", ":", "segment_name", "=", "\"CUMULATIVE_CAT_%s\"", "%", "(", "''", ".", "join", "(", "sorted", "(", "cat_set", ")", ")", ")", "logging", ".", "info", "(", "'getting information for %s'", "%", "segment_name", ")", "categories", "=", "[", "cat_to_veto_def_cat", "(", "c", ")", "for", "c", "in", "cat_set", "]", "cum_seg_files", "+=", "[", "get_cumulative_segs", "(", "workflow", ",", "categories", ",", "cat_files", ",", "out_dir", ",", "execute_now", "=", "execute_now", ",", "segment_name", "=", "segment_name", ",", "tags", "=", "tags", ")", "]", "names", ".", "append", "(", "segment_name", ")", "logging", ".", "info", "(", "\"Done generating vetoes for groups in %s\"", "%", "(", "option", ")", ")", "return", "cum_seg_files", ",", "names", ",", "cat_files" ]
Get the cumulative veto files that define the different backgrounds we want to analyze, defined by groups of vetos. Parameters ----------- workflow : Workflow object Instance of the workflow object option : str ini file option to use to get the veto groups cat_files : FileList of SegFiles The category veto files generated by get_veto_segs out_dir : path Location to store output files execute_now : Boolean If true outputs are generated at runtime. Else jobs go into the workflow and are generated then. tags : list of strings Used to retrieve subsections of the ini file for configuration options. Returns -------- seg_files : workflow.core.FileList instance The cumulative segment files for each veto group. names : list of strings The segment names for the corresponding seg_file cat_files : workflow.core.FileList instance The list of individual category veto files
[ "Get", "the", "cumulative", "veto", "files", "that", "define", "the", "different", "backgrounds", "we", "want", "to", "analyze", "defined", "by", "groups", "of", "vetos", "." ]
python
train
37.054545
limodou/uliweb
uliweb/lib/werkzeug/wrappers.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wrappers.py#L402-L411
def args(self): """The parsed URL parameters. By default an :class:`~werkzeug.datastructures.ImmutableMultiDict` is returned from this function. This can be changed by setting :attr:`parameter_storage_class` to a different type. This might be necessary if the order of the form data is important. """ return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')), self.url_charset, errors=self.encoding_errors, cls=self.parameter_storage_class)
[ "def", "args", "(", "self", ")", ":", "return", "url_decode", "(", "wsgi_get_bytes", "(", "self", ".", "environ", ".", "get", "(", "'QUERY_STRING'", ",", "''", ")", ")", ",", "self", ".", "url_charset", ",", "errors", "=", "self", ".", "encoding_errors", ",", "cls", "=", "self", ".", "parameter_storage_class", ")" ]
The parsed URL parameters. By default an :class:`~werkzeug.datastructures.ImmutableMultiDict` is returned from this function. This can be changed by setting :attr:`parameter_storage_class` to a different type. This might be necessary if the order of the form data is important.
[ "The", "parsed", "URL", "parameters", ".", "By", "default", "an", ":", "class", ":", "~werkzeug", ".", "datastructures", ".", "ImmutableMultiDict", "is", "returned", "from", "this", "function", ".", "This", "can", "be", "changed", "by", "setting", ":", "attr", ":", "parameter_storage_class", "to", "a", "different", "type", ".", "This", "might", "be", "necessary", "if", "the", "order", "of", "the", "form", "data", "is", "important", "." ]
python
train
55.5
AtteqCom/zsl
src/zsl/task/task_decorator.py
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/task/task_decorator.py#L490-L505
def forbid_web_access(f): """ Forbids running task using http request. :param f: Callable :return Callable """ @wraps(f) def wrapper_fn(*args, **kwargs): if isinstance(JobContext.get_current_context(), WebJobContext): raise ForbiddenError('Access forbidden from web.') return f(*args, **kwargs) return wrapper_fn
[ "def", "forbid_web_access", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapper_fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "JobContext", ".", "get_current_context", "(", ")", ",", "WebJobContext", ")", ":", "raise", "ForbiddenError", "(", "'Access forbidden from web.'", ")", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper_fn" ]
Forbids running task using http request. :param f: Callable :return Callable
[ "Forbids", "running", "task", "using", "http", "request", "." ]
python
train
22.5625
pypa/setuptools
setuptools/command/easy_install.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/command/easy_install.py#L2145-L2149
def get_header(cls, script_text="", executable=None): """Create a #! line, getting options (if any) from script_text""" cmd = cls.command_spec_class.best().from_param(executable) cmd.install_options(script_text) return cmd.as_header()
[ "def", "get_header", "(", "cls", ",", "script_text", "=", "\"\"", ",", "executable", "=", "None", ")", ":", "cmd", "=", "cls", ".", "command_spec_class", ".", "best", "(", ")", ".", "from_param", "(", "executable", ")", "cmd", ".", "install_options", "(", "script_text", ")", "return", "cmd", ".", "as_header", "(", ")" ]
Create a #! line, getting options (if any) from script_text
[ "Create", "a", "#!", "line", "getting", "options", "(", "if", "any", ")", "from", "script_text" ]
python
train
52.4
Jaymon/captain
captain/__main__.py
https://github.com/Jaymon/captain/blob/4297f32961d423a10d0f053bc252e29fbe939a47/captain/__main__.py#L15-L65
def main(path): '''scan path directory and any subdirectories for valid captain scripts''' basepath = os.path.abspath(os.path.expanduser(str(path))) echo.h2("Available scripts in {}".format(basepath)) echo.br() for root_dir, dirs, files in os.walk(basepath, topdown=True): for f in fnmatch.filter(files, '*.py'): try: filepath = os.path.join(root_dir, f) # super edge case, this makes sure the python script won't start # an interactive console session which would cause the session # to start and not allow the for loop to complete with open(filepath, encoding="UTF-8") as fp: body = fp.read() is_console = "InteractiveConsole" in body is_console = is_console or "code" in body is_console = is_console and "interact(" in body if is_console: continue s = captain.Script(filepath) if s.can_run_from_cli(): rel_filepath = s.call_path(basepath) p = s.parser echo.h3(rel_filepath) desc = p.description if desc: echo.indent(desc, indent=(" " * 4)) subcommands = s.subcommands if subcommands: echo.br() echo.indent("Subcommands:", indent=(" " * 4)) for sc in subcommands.keys(): echo.indent(sc, indent=(" " * 6)) echo.br() except captain.ParseError: pass except Exception as e: #echo.exception(e) #echo.err("Failed to parse {} because {}", f, e.message) echo.err("Failed to parse {}", f) echo.verbose(e.message) echo.br()
[ "def", "main", "(", "path", ")", ":", "basepath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "str", "(", "path", ")", ")", ")", "echo", ".", "h2", "(", "\"Available scripts in {}\"", ".", "format", "(", "basepath", ")", ")", "echo", ".", "br", "(", ")", "for", "root_dir", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "basepath", ",", "topdown", "=", "True", ")", ":", "for", "f", "in", "fnmatch", ".", "filter", "(", "files", ",", "'*.py'", ")", ":", "try", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "root_dir", ",", "f", ")", "# super edge case, this makes sure the python script won't start", "# an interactive console session which would cause the session", "# to start and not allow the for loop to complete", "with", "open", "(", "filepath", ",", "encoding", "=", "\"UTF-8\"", ")", "as", "fp", ":", "body", "=", "fp", ".", "read", "(", ")", "is_console", "=", "\"InteractiveConsole\"", "in", "body", "is_console", "=", "is_console", "or", "\"code\"", "in", "body", "is_console", "=", "is_console", "and", "\"interact(\"", "in", "body", "if", "is_console", ":", "continue", "s", "=", "captain", ".", "Script", "(", "filepath", ")", "if", "s", ".", "can_run_from_cli", "(", ")", ":", "rel_filepath", "=", "s", ".", "call_path", "(", "basepath", ")", "p", "=", "s", ".", "parser", "echo", ".", "h3", "(", "rel_filepath", ")", "desc", "=", "p", ".", "description", "if", "desc", ":", "echo", ".", "indent", "(", "desc", ",", "indent", "=", "(", "\" \"", "*", "4", ")", ")", "subcommands", "=", "s", ".", "subcommands", "if", "subcommands", ":", "echo", ".", "br", "(", ")", "echo", ".", "indent", "(", "\"Subcommands:\"", ",", "indent", "=", "(", "\" \"", "*", "4", ")", ")", "for", "sc", "in", "subcommands", ".", "keys", "(", ")", ":", "echo", ".", "indent", "(", "sc", ",", "indent", "=", "(", "\" \"", "*", "6", ")", ")", "echo", ".", "br", "(", ")", "except", "captain", ".", "ParseError", ":", "pass", "except", "Exception", "as", "e", ":", "#echo.exception(e)", "#echo.err(\"Failed to parse {} because {}\", f, e.message)", "echo", ".", "err", "(", "\"Failed to parse {}\"", ",", "f", ")", "echo", ".", "verbose", "(", "e", ".", "message", ")", "echo", ".", "br", "(", ")" ]
scan path directory and any subdirectories for valid captain scripts
[ "scan", "path", "directory", "and", "any", "subdirectories", "for", "valid", "captain", "scripts" ]
python
valid
38.313725
cisco-sas/kitty
kitty/model/low_level/container.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L280-L292
def get_info(self): ''' Get info regarding the current fuzzed enclosed node :return: info dictionary ''' field = self._current_field() if field: info = field.get_info() info['path'] = '%s/%s' % (self.name if self.name else '<no name>', info['path']) else: info = super(Container, self).get_info() return info
[ "def", "get_info", "(", "self", ")", ":", "field", "=", "self", ".", "_current_field", "(", ")", "if", "field", ":", "info", "=", "field", ".", "get_info", "(", ")", "info", "[", "'path'", "]", "=", "'%s/%s'", "%", "(", "self", ".", "name", "if", "self", ".", "name", "else", "'<no name>'", ",", "info", "[", "'path'", "]", ")", "else", ":", "info", "=", "super", "(", "Container", ",", "self", ")", ".", "get_info", "(", ")", "return", "info" ]
Get info regarding the current fuzzed enclosed node :return: info dictionary
[ "Get", "info", "regarding", "the", "current", "fuzzed", "enclosed", "node" ]
python
train
30.538462
pydata/xarray
xarray/core/dataarray.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataarray.py#L817-L828
def isel(self, indexers=None, drop=False, **indexers_kwargs): """Return a new DataArray whose dataset is given by integer indexing along the specified dimension(s). See Also -------- Dataset.isel DataArray.sel """ indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'isel') ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers) return self._from_temp_dataset(ds)
[ "def", "isel", "(", "self", ",", "indexers", "=", "None", ",", "drop", "=", "False", ",", "*", "*", "indexers_kwargs", ")", ":", "indexers", "=", "either_dict_or_kwargs", "(", "indexers", ",", "indexers_kwargs", ",", "'isel'", ")", "ds", "=", "self", ".", "_to_temp_dataset", "(", ")", ".", "isel", "(", "drop", "=", "drop", ",", "indexers", "=", "indexers", ")", "return", "self", ".", "_from_temp_dataset", "(", "ds", ")" ]
Return a new DataArray whose dataset is given by integer indexing along the specified dimension(s). See Also -------- Dataset.isel DataArray.sel
[ "Return", "a", "new", "DataArray", "whose", "dataset", "is", "given", "by", "integer", "indexing", "along", "the", "specified", "dimension", "(", "s", ")", "." ]
python
train
37.5
grantmcconnaughey/Lintly
lintly/patch.py
https://github.com/grantmcconnaughey/Lintly/blob/73c1ee36740ac5bb2a32d3f24fca2a27f4d4e466/lintly/patch.py#L26-L70
def changed_lines(self): """ A list of dicts in the format: { 'file_name': str, 'content': str, 'line_number': int, 'position': int } """ lines = [] file_name = '' line_number = 0 patch_position = -1 found_first_information_line = False for i, content in enumerate(self.body.splitlines()): range_information_match = RANGE_INFORMATION_LINE.search(content) file_name_line_match = FILE_NAME_LINE.search(content) if file_name_line_match: file_name = file_name_line_match.group('file_name') found_first_information_line = False elif range_information_match: line_number = int(range_information_match.group('line_number')) if not found_first_information_line: # This is the first information line. Set patch position to 1 and start counting patch_position = 0 found_first_information_line = True elif MODIFIED_LINE.search(content): line = { 'file_name': file_name, 'content': content, 'line_number': line_number, 'position': patch_position } lines.append(line) line_number += 1 elif NOT_REMOVED_OR_NEWLINE_WARNING.search(content) or content == '': line_number += 1 patch_position += 1 return lines
[ "def", "changed_lines", "(", "self", ")", ":", "lines", "=", "[", "]", "file_name", "=", "''", "line_number", "=", "0", "patch_position", "=", "-", "1", "found_first_information_line", "=", "False", "for", "i", ",", "content", "in", "enumerate", "(", "self", ".", "body", ".", "splitlines", "(", ")", ")", ":", "range_information_match", "=", "RANGE_INFORMATION_LINE", ".", "search", "(", "content", ")", "file_name_line_match", "=", "FILE_NAME_LINE", ".", "search", "(", "content", ")", "if", "file_name_line_match", ":", "file_name", "=", "file_name_line_match", ".", "group", "(", "'file_name'", ")", "found_first_information_line", "=", "False", "elif", "range_information_match", ":", "line_number", "=", "int", "(", "range_information_match", ".", "group", "(", "'line_number'", ")", ")", "if", "not", "found_first_information_line", ":", "# This is the first information line. Set patch position to 1 and start counting", "patch_position", "=", "0", "found_first_information_line", "=", "True", "elif", "MODIFIED_LINE", ".", "search", "(", "content", ")", ":", "line", "=", "{", "'file_name'", ":", "file_name", ",", "'content'", ":", "content", ",", "'line_number'", ":", "line_number", ",", "'position'", ":", "patch_position", "}", "lines", ".", "append", "(", "line", ")", "line_number", "+=", "1", "elif", "NOT_REMOVED_OR_NEWLINE_WARNING", ".", "search", "(", "content", ")", "or", "content", "==", "''", ":", "line_number", "+=", "1", "patch_position", "+=", "1", "return", "lines" ]
A list of dicts in the format: { 'file_name': str, 'content': str, 'line_number': int, 'position': int }
[ "A", "list", "of", "dicts", "in", "the", "format", ":", "{", "file_name", ":", "str", "content", ":", "str", "line_number", ":", "int", "position", ":", "int", "}" ]
python
train
35.288889
googleapis/google-cloud-python
dns/google/cloud/dns/zone.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/zone.py#L138-L148
def description(self, value): """Update description of the zone. :type value: str :param value: (Optional) new description :raises: ValueError for invalid value types. """ if not isinstance(value, six.string_types) and value is not None: raise ValueError("Pass a string, or None") self._properties["description"] = value
[ "def", "description", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "value", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Pass a string, or None\"", ")", "self", ".", "_properties", "[", "\"description\"", "]", "=", "value" ]
Update description of the zone. :type value: str :param value: (Optional) new description :raises: ValueError for invalid value types.
[ "Update", "description", "of", "the", "zone", "." ]
python
train
34.545455
klavinslab/coral
coral/analysis/_structure/nupack.py
https://github.com/klavinslab/coral/blob/17f59591211562a59a051f474cd6cecba4829df9/coral/analysis/_structure/nupack.py#L216-L290
def pairs_multi(self, strands, cutoff=0.001, permutation=None, temp=37.0, pseudo=False, material=None, dangles='some', sodium=1.0, magnesium=0.0): '''Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strands: List of strands to use as inputs to pairs -multi. :type strands: list :param permutation: The circular permutation of strands to test in complex. e.g. to test in the order that was input for 4 strands, the permutation would be [1,2,3,4]. If set to None, defaults to the order of the input strands. :type permutation: list :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :returns: Two probability matrices: The probability matrix as in the pairs method (but with a dimension equal to the sum of the lengths of the sequences in the permutation), and a similar probability matrix where multiple strands of the same species are considered to be indistinguishable. :rtype: list ''' # Set the material (will be used to set command material flag) material = self._set_material(strands, material, multi=True) # Set up command flags cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium, magnesium, multi=True) # Set up the input file and run the command. Note: no STDOUT if permutation is None: permutation = range(1, len(strands) + 1) lines = self._multi_lines(strands, permutation) self._run('pairs', cmd_args, lines) # Read the output from file N = sum([len(s) for s in strands]) matrices = [] for mat_type in ['ppairs', 'epairs']: data = self._read_tempfile('pairs.' + mat_type) probs = re.search('\n\n\d*\n(.*)', data, flags=re.DOTALL).group(1) lines = probs.split('\n') # Remove the last line (empty) lines.pop() pairlist = [line.split('\t') for line in lines] prob_matrix = self._pairs_to_np(pairlist, N) matrices.append(prob_matrix) return matrices
[ "def", "pairs_multi", "(", "self", ",", "strands", ",", "cutoff", "=", "0.001", ",", "permutation", "=", "None", ",", "temp", "=", "37.0", ",", "pseudo", "=", "False", ",", "material", "=", "None", ",", "dangles", "=", "'some'", ",", "sodium", "=", "1.0", ",", "magnesium", "=", "0.0", ")", ":", "# Set the material (will be used to set command material flag)", "material", "=", "self", ".", "_set_material", "(", "strands", ",", "material", ",", "multi", "=", "True", ")", "# Set up command flags", "cmd_args", "=", "self", ".", "_prep_cmd_args", "(", "temp", ",", "dangles", ",", "material", ",", "pseudo", ",", "sodium", ",", "magnesium", ",", "multi", "=", "True", ")", "# Set up the input file and run the command. Note: no STDOUT", "if", "permutation", "is", "None", ":", "permutation", "=", "range", "(", "1", ",", "len", "(", "strands", ")", "+", "1", ")", "lines", "=", "self", ".", "_multi_lines", "(", "strands", ",", "permutation", ")", "self", ".", "_run", "(", "'pairs'", ",", "cmd_args", ",", "lines", ")", "# Read the output from file", "N", "=", "sum", "(", "[", "len", "(", "s", ")", "for", "s", "in", "strands", "]", ")", "matrices", "=", "[", "]", "for", "mat_type", "in", "[", "'ppairs'", ",", "'epairs'", "]", ":", "data", "=", "self", ".", "_read_tempfile", "(", "'pairs.'", "+", "mat_type", ")", "probs", "=", "re", ".", "search", "(", "'\\n\\n\\d*\\n(.*)'", ",", "data", ",", "flags", "=", "re", ".", "DOTALL", ")", ".", "group", "(", "1", ")", "lines", "=", "probs", ".", "split", "(", "'\\n'", ")", "# Remove the last line (empty)", "lines", ".", "pop", "(", ")", "pairlist", "=", "[", "line", ".", "split", "(", "'\\t'", ")", "for", "line", "in", "lines", "]", "prob_matrix", "=", "self", ".", "_pairs_to_np", "(", "pairlist", ",", "N", ")", "matrices", ".", "append", "(", "prob_matrix", ")", "return", "matrices" ]
Compute the pair probabilities for an ordered complex of strands. Runs the \'pairs\' command. :param strands: List of strands to use as inputs to pairs -multi. :type strands: list :param permutation: The circular permutation of strands to test in complex. e.g. to test in the order that was input for 4 strands, the permutation would be [1,2,3,4]. If set to None, defaults to the order of the input strands. :type permutation: list :param temp: Temperature setting for the computation. Negative values are not allowed. :type temp: float :param pseudo: Enable pseudoknots. :type pseudo: bool :param material: The material setting to use in the computation. If set to None (the default), the material type is inferred from the strands. Other settings available: 'dna' for DNA parameters, 'rna' for RNA (1995) parameters, and 'rna1999' for the RNA 1999 parameters. :type material: str :param dangles: How to treat dangles in the computation. From the user guide: For \'none\': Dangle energies are ignored. For \'some\': \'A dangle energy is incorporated for each unpaired base flanking a duplex\'. For 'all': all dangle energy is considered. :type dangles: str :param sodium: Sodium concentration in solution (molar), only applies to DNA. :type sodium: float :param magnesium: Magnesium concentration in solution (molar), only applies to DNA> :type magnesium: float :param cutoff: Only probabilities above this cutoff appear in the output. :type cutoff: float :returns: Two probability matrices: The probability matrix as in the pairs method (but with a dimension equal to the sum of the lengths of the sequences in the permutation), and a similar probability matrix where multiple strands of the same species are considered to be indistinguishable. :rtype: list
[ "Compute", "the", "pair", "probabilities", "for", "an", "ordered", "complex", "of", "strands", ".", "Runs", "the", "\\", "pairs", "\\", "command", "." ]
python
train
49.093333
shaiguitar/snowclient.py
snowclient/snowrecord.py
https://github.com/shaiguitar/snowclient.py/blob/6bb513576d3b37612a7a4da225140d134f3e1c82/snowclient/snowrecord.py#L10-L17
def tablename_from_link(klass, link): """ Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc. """ arr = link.split("/") i = arr.index("table") tn = arr[i+1] return tn
[ "def", "tablename_from_link", "(", "klass", ",", "link", ")", ":", "arr", "=", "link", ".", "split", "(", "\"/\"", ")", "i", "=", "arr", ".", "index", "(", "\"table\"", ")", "tn", "=", "arr", "[", "i", "+", "1", "]", "return", "tn" ]
Helper method for URL's that look like /api/now/v1/table/FOO/sys_id etc.
[ "Helper", "method", "for", "URL", "s", "that", "look", "like", "/", "api", "/", "now", "/", "v1", "/", "table", "/", "FOO", "/", "sys_id", "etc", "." ]
python
train
29.5
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vcs.py#L34-L43
def local_node_swbd_number(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") local_node = ET.SubElement(config, "local-node", xmlns="urn:brocade.com:mgmt:brocade-vcs") swbd_number = ET.SubElement(local_node, "swbd-number") swbd_number.text = kwargs.pop('swbd_number') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "local_node_swbd_number", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "local_node", "=", "ET", ".", "SubElement", "(", "config", ",", "\"local-node\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-vcs\"", ")", "swbd_number", "=", "ET", ".", "SubElement", "(", "local_node", ",", "\"swbd-number\"", ")", "swbd_number", ".", "text", "=", "kwargs", ".", "pop", "(", "'swbd_number'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.1
materialsproject/pymatgen
pymatgen/analysis/structure_prediction/substitutor.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_prediction/substitutor.py#L69-L161
def pred_from_structures(self, target_species, structures_list, remove_duplicates=True, remove_existing=False): """ performs a structure prediction targeting compounds containing all of the target_species, based on a list of structure (those structures can for instance come from a database like the ICSD). It will return all the structures formed by ionic substitutions with a probability higher than the threshold Notes: If the default probability model is used, input structures must be oxidation state decorated. See AutoOxiStateDecorationTransformation This method does not change the number of species in a structure. i.e if the number of target species is 3, only input structures containing 3 species will be considered. Args: target_species: a list of species with oxidation states e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)] structures_list: a list of dictionnary of the form {'structure':Structure object ,'id':some id where it comes from} the id can for instance refer to an ICSD id. remove_duplicates: if True, the duplicates in the predicted structures will be removed remove_existing: if True, the predicted structures that already exist in the structures_list will be removed Returns: a list of TransformedStructure objects. """ target_species = get_el_sp(target_species) result = [] transmuter = StandardTransmuter([]) if len(list(set(target_species) & set(self.get_allowed_species()))) \ != len(target_species): raise ValueError("the species in target_species are not allowed " + "for the probability model you are using") for permut in itertools.permutations(target_species): for s in structures_list: # check if: species are in the domain, # and the probability of subst. is above the threshold els = s['structure'].composition.elements if len(els) == len(permut) and \ len(list(set(els) & set( self.get_allowed_species()))) == \ len(els) and self._sp.cond_prob_list(permut, els) > \ self._threshold: clean_subst = {els[i]: permut[i] for i in range(0, len(els)) if els[i] != permut[i]} if len(clean_subst) == 0: continue transf = SubstitutionTransformation(clean_subst) if Substitutor._is_charge_balanced( transf.apply_transformation(s['structure'])): ts = TransformedStructure( s['structure'], [transf], history=[{"source": s['id']}], other_parameters={ 'type': 'structure_prediction', 'proba': self._sp.cond_prob_list(permut, els)} ) result.append(ts) transmuter.append_transformed_structures([ts]) if remove_duplicates: transmuter.apply_filter(RemoveDuplicatesFilter( symprec=self._symprec)) if remove_existing: # Make the list of structures from structures_list that corresponds to the # target species chemsys = list(set([sp.symbol for sp in target_species])) structures_list_target = [st['structure'] for st in structures_list if Substitutor._is_from_chemical_system( chemsys, st['structure'])] transmuter.apply_filter(RemoveExistingFilter(structures_list_target, symprec=self._symprec)) return transmuter.transformed_structures
[ "def", "pred_from_structures", "(", "self", ",", "target_species", ",", "structures_list", ",", "remove_duplicates", "=", "True", ",", "remove_existing", "=", "False", ")", ":", "target_species", "=", "get_el_sp", "(", "target_species", ")", "result", "=", "[", "]", "transmuter", "=", "StandardTransmuter", "(", "[", "]", ")", "if", "len", "(", "list", "(", "set", "(", "target_species", ")", "&", "set", "(", "self", ".", "get_allowed_species", "(", ")", ")", ")", ")", "!=", "len", "(", "target_species", ")", ":", "raise", "ValueError", "(", "\"the species in target_species are not allowed \"", "+", "\"for the probability model you are using\"", ")", "for", "permut", "in", "itertools", ".", "permutations", "(", "target_species", ")", ":", "for", "s", "in", "structures_list", ":", "# check if: species are in the domain,", "# and the probability of subst. is above the threshold", "els", "=", "s", "[", "'structure'", "]", ".", "composition", ".", "elements", "if", "len", "(", "els", ")", "==", "len", "(", "permut", ")", "and", "len", "(", "list", "(", "set", "(", "els", ")", "&", "set", "(", "self", ".", "get_allowed_species", "(", ")", ")", ")", ")", "==", "len", "(", "els", ")", "and", "self", ".", "_sp", ".", "cond_prob_list", "(", "permut", ",", "els", ")", ">", "self", ".", "_threshold", ":", "clean_subst", "=", "{", "els", "[", "i", "]", ":", "permut", "[", "i", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "els", ")", ")", "if", "els", "[", "i", "]", "!=", "permut", "[", "i", "]", "}", "if", "len", "(", "clean_subst", ")", "==", "0", ":", "continue", "transf", "=", "SubstitutionTransformation", "(", "clean_subst", ")", "if", "Substitutor", ".", "_is_charge_balanced", "(", "transf", ".", "apply_transformation", "(", "s", "[", "'structure'", "]", ")", ")", ":", "ts", "=", "TransformedStructure", "(", "s", "[", "'structure'", "]", ",", "[", "transf", "]", ",", "history", "=", "[", "{", "\"source\"", ":", "s", "[", "'id'", "]", "}", "]", ",", "other_parameters", "=", "{", "'type'", ":", "'structure_prediction'", ",", "'proba'", ":", "self", ".", "_sp", ".", "cond_prob_list", "(", "permut", ",", "els", ")", "}", ")", "result", ".", "append", "(", "ts", ")", "transmuter", ".", "append_transformed_structures", "(", "[", "ts", "]", ")", "if", "remove_duplicates", ":", "transmuter", ".", "apply_filter", "(", "RemoveDuplicatesFilter", "(", "symprec", "=", "self", ".", "_symprec", ")", ")", "if", "remove_existing", ":", "# Make the list of structures from structures_list that corresponds to the", "# target species", "chemsys", "=", "list", "(", "set", "(", "[", "sp", ".", "symbol", "for", "sp", "in", "target_species", "]", ")", ")", "structures_list_target", "=", "[", "st", "[", "'structure'", "]", "for", "st", "in", "structures_list", "if", "Substitutor", ".", "_is_from_chemical_system", "(", "chemsys", ",", "st", "[", "'structure'", "]", ")", "]", "transmuter", ".", "apply_filter", "(", "RemoveExistingFilter", "(", "structures_list_target", ",", "symprec", "=", "self", ".", "_symprec", ")", ")", "return", "transmuter", ".", "transformed_structures" ]
performs a structure prediction targeting compounds containing all of the target_species, based on a list of structure (those structures can for instance come from a database like the ICSD). It will return all the structures formed by ionic substitutions with a probability higher than the threshold Notes: If the default probability model is used, input structures must be oxidation state decorated. See AutoOxiStateDecorationTransformation This method does not change the number of species in a structure. i.e if the number of target species is 3, only input structures containing 3 species will be considered. Args: target_species: a list of species with oxidation states e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)] structures_list: a list of dictionnary of the form {'structure':Structure object ,'id':some id where it comes from} the id can for instance refer to an ICSD id. remove_duplicates: if True, the duplicates in the predicted structures will be removed remove_existing: if True, the predicted structures that already exist in the structures_list will be removed Returns: a list of TransformedStructure objects.
[ "performs", "a", "structure", "prediction", "targeting", "compounds", "containing", "all", "of", "the", "target_species", "based", "on", "a", "list", "of", "structure", "(", "those", "structures", "can", "for", "instance", "come", "from", "a", "database", "like", "the", "ICSD", ")", ".", "It", "will", "return", "all", "the", "structures", "formed", "by", "ionic", "substitutions", "with", "a", "probability", "higher", "than", "the", "threshold" ]
python
train
46.204301
kata198/indexedredis
IndexedRedis/__init__.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/__init__.py#L538-L558
def getUpdatedFields(self, cascadeObjects=False): ''' getUpdatedFields - See changed fields. @param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively). Otherwise, will just check if the pk has changed. @return - a dictionary of fieldName : tuple(old, new). fieldName may be a string or may implement IRField (which implements string, and can be used just like a string) ''' updatedFields = {} for thisField in self.FIELDS: thisVal = object.__getattribute__(self, thisField) if self._origData.get(thisField, '') != thisVal: updatedFields[thisField] = (self._origData[thisField], thisVal) if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase) and thisVal.objHasUnsavedChanges(): updatedFields[thisField] = (self._origData[thisField], thisVal) return updatedFields
[ "def", "getUpdatedFields", "(", "self", ",", "cascadeObjects", "=", "False", ")", ":", "updatedFields", "=", "{", "}", "for", "thisField", "in", "self", ".", "FIELDS", ":", "thisVal", "=", "object", ".", "__getattribute__", "(", "self", ",", "thisField", ")", "if", "self", ".", "_origData", ".", "get", "(", "thisField", ",", "''", ")", "!=", "thisVal", ":", "updatedFields", "[", "thisField", "]", "=", "(", "self", ".", "_origData", "[", "thisField", "]", ",", "thisVal", ")", "if", "cascadeObjects", "is", "True", "and", "issubclass", "(", "thisField", ".", "__class__", ",", "IRForeignLinkFieldBase", ")", "and", "thisVal", ".", "objHasUnsavedChanges", "(", ")", ":", "updatedFields", "[", "thisField", "]", "=", "(", "self", ".", "_origData", "[", "thisField", "]", ",", "thisVal", ")", "return", "updatedFields" ]
getUpdatedFields - See changed fields. @param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively). Otherwise, will just check if the pk has changed. @return - a dictionary of fieldName : tuple(old, new). fieldName may be a string or may implement IRField (which implements string, and can be used just like a string)
[ "getUpdatedFields", "-", "See", "changed", "fields", ".", "@param", "cascadeObjects", "<bool", ">", "default", "False", "if", "True", "will", "check", "if", "any", "foreign", "linked", "objects", "themselves", "have", "unsaved", "changes", "(", "recursively", ")", ".", "Otherwise", "will", "just", "check", "if", "the", "pk", "has", "changed", "." ]
python
valid
43.333333
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L1864-L1897
def public_ip_address_get(name, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get details about a specific public IP address. :param name: The name of the public IP address to query. :param resource_group: The resource group name assigned to the public IP address. CLI Example: .. code-block:: bash salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup ''' expand = kwargs.get('expand') netconn = __utils__['azurearm.get_client']('network', **kwargs) try: pub_ip = netconn.public_ip_addresses.get( public_ip_address_name=name, resource_group_name=resource_group, expand=expand ) result = pub_ip.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "public_ip_address_get", "(", "name", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "expand", "=", "kwargs", ".", "get", "(", "'expand'", ")", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "pub_ip", "=", "netconn", ".", "public_ip_addresses", ".", "get", "(", "public_ip_address_name", "=", "name", ",", "resource_group_name", "=", "resource_group", ",", "expand", "=", "expand", ")", "result", "=", "pub_ip", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 Get details about a specific public IP address. :param name: The name of the public IP address to query. :param resource_group: The resource group name assigned to the public IP address. CLI Example: .. code-block:: bash salt-call azurearm_network.public_ip_address_get test-pub-ip testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
26.235294
nickmckay/LiPD-utilities
Python/lipd/timeseries.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/timeseries.py#L196-L236
def _extract_pc(d, root, pc, whichtables): """ Extract all data from a PaleoData dictionary. :param dict d: PaleoData dictionary :param dict root: Time series root data :param str pc: paleoData or chronData :param str whichtables: all, meas, summ, or ens :return list _ts: Time series """ logger_ts.info("enter extract_pc") _ts = [] try: # For each table in pc for k, v in d[pc].items(): if whichtables == "all" or whichtables == "meas": for _table_name1, _table_data1 in v["measurementTable"].items(): _ts = _extract_table(_table_data1, copy.deepcopy(root), pc, _ts, "meas") if whichtables != "meas": if "model" in v: for _table_name1, _table_data1 in v["model"].items(): # get the method info for this model. This will be paired to all summ and ens table data _method = _extract_method(_table_data1["method"]) if whichtables == "all" or whichtables == "summ": if "summaryTable" in _table_data1: for _table_name2, _table_data2 in _table_data1["summaryTable"].items(): # take a copy of this tso root _tso = copy.deepcopy(root) # add in the method details _tso.update(_method) # add in the table details _ts = _extract_table(_table_data2, _tso, pc, _ts, "summ") if whichtables == "all" or whichtables == "ens": if "ensembleTable" in _table_data1: for _table_name2, _table_data2 in _table_data1["ensembleTable"].items(): _tso = copy.deepcopy(root) _tso.update(_method) _ts = _extract_table(_table_data2, _tso, pc, _ts, "ens") except Exception as e: logger_ts.warn("extract_pc: Exception: {}".format(e)) return _ts
[ "def", "_extract_pc", "(", "d", ",", "root", ",", "pc", ",", "whichtables", ")", ":", "logger_ts", ".", "info", "(", "\"enter extract_pc\"", ")", "_ts", "=", "[", "]", "try", ":", "# For each table in pc", "for", "k", ",", "v", "in", "d", "[", "pc", "]", ".", "items", "(", ")", ":", "if", "whichtables", "==", "\"all\"", "or", "whichtables", "==", "\"meas\"", ":", "for", "_table_name1", ",", "_table_data1", "in", "v", "[", "\"measurementTable\"", "]", ".", "items", "(", ")", ":", "_ts", "=", "_extract_table", "(", "_table_data1", ",", "copy", ".", "deepcopy", "(", "root", ")", ",", "pc", ",", "_ts", ",", "\"meas\"", ")", "if", "whichtables", "!=", "\"meas\"", ":", "if", "\"model\"", "in", "v", ":", "for", "_table_name1", ",", "_table_data1", "in", "v", "[", "\"model\"", "]", ".", "items", "(", ")", ":", "# get the method info for this model. This will be paired to all summ and ens table data", "_method", "=", "_extract_method", "(", "_table_data1", "[", "\"method\"", "]", ")", "if", "whichtables", "==", "\"all\"", "or", "whichtables", "==", "\"summ\"", ":", "if", "\"summaryTable\"", "in", "_table_data1", ":", "for", "_table_name2", ",", "_table_data2", "in", "_table_data1", "[", "\"summaryTable\"", "]", ".", "items", "(", ")", ":", "# take a copy of this tso root", "_tso", "=", "copy", ".", "deepcopy", "(", "root", ")", "# add in the method details", "_tso", ".", "update", "(", "_method", ")", "# add in the table details", "_ts", "=", "_extract_table", "(", "_table_data2", ",", "_tso", ",", "pc", ",", "_ts", ",", "\"summ\"", ")", "if", "whichtables", "==", "\"all\"", "or", "whichtables", "==", "\"ens\"", ":", "if", "\"ensembleTable\"", "in", "_table_data1", ":", "for", "_table_name2", ",", "_table_data2", "in", "_table_data1", "[", "\"ensembleTable\"", "]", ".", "items", "(", ")", ":", "_tso", "=", "copy", ".", "deepcopy", "(", "root", ")", "_tso", ".", "update", "(", "_method", ")", "_ts", "=", "_extract_table", "(", "_table_data2", ",", "_tso", ",", "pc", ",", "_ts", ",", "\"ens\"", ")", "except", "Exception", "as", "e", ":", "logger_ts", ".", "warn", "(", "\"extract_pc: Exception: {}\"", ".", "format", "(", "e", ")", ")", "return", "_ts" ]
Extract all data from a PaleoData dictionary. :param dict d: PaleoData dictionary :param dict root: Time series root data :param str pc: paleoData or chronData :param str whichtables: all, meas, summ, or ens :return list _ts: Time series
[ "Extract", "all", "data", "from", "a", "PaleoData", "dictionary", ".", ":", "param", "dict", "d", ":", "PaleoData", "dictionary", ":", "param", "dict", "root", ":", "Time", "series", "root", "data", ":", "param", "str", "pc", ":", "paleoData", "or", "chronData", ":", "param", "str", "whichtables", ":", "all", "meas", "summ", "or", "ens", ":", "return", "list", "_ts", ":", "Time", "series" ]
python
train
53.268293
mfcloud/python-zvm-sdk
zvmsdk/utils.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/zvmsdk/utils.py#L365-L379
def wrap_invalid_resp_data_error(function): """Catch exceptions when using zvm client response data.""" @functools.wraps(function) def decorated_function(*arg, **kwargs): try: return function(*arg, **kwargs) except (ValueError, TypeError, IndexError, AttributeError, KeyError) as err: msg = ('Invalid smt response data. Error: %s' % six.text_type(err)) LOG.error(msg) raise exception.SDKInternalError(msg=msg) return decorated_function
[ "def", "wrap_invalid_resp_data_error", "(", "function", ")", ":", "@", "functools", ".", "wraps", "(", "function", ")", "def", "decorated_function", "(", "*", "arg", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "function", "(", "*", "arg", ",", "*", "*", "kwargs", ")", "except", "(", "ValueError", ",", "TypeError", ",", "IndexError", ",", "AttributeError", ",", "KeyError", ")", "as", "err", ":", "msg", "=", "(", "'Invalid smt response data. Error: %s'", "%", "six", ".", "text_type", "(", "err", ")", ")", "LOG", ".", "error", "(", "msg", ")", "raise", "exception", ".", "SDKInternalError", "(", "msg", "=", "msg", ")", "return", "decorated_function" ]
Catch exceptions when using zvm client response data.
[ "Catch", "exceptions", "when", "using", "zvm", "client", "response", "data", "." ]
python
train
35.866667
ChrisCummins/labm8
jsonutil.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/jsonutil.py#L75-L87
def write_file(path, data, format=True): """ Write JSON data to file. Arguments: path (str): Destination. data (dict or list): JSON serializable data. format (bool, optional): Pretty-print JSON data. """ if format: fs.write_file(path, format_json(data)) else: fs.write_file(path, json.dumps(data))
[ "def", "write_file", "(", "path", ",", "data", ",", "format", "=", "True", ")", ":", "if", "format", ":", "fs", ".", "write_file", "(", "path", ",", "format_json", "(", "data", ")", ")", "else", ":", "fs", ".", "write_file", "(", "path", ",", "json", ".", "dumps", "(", "data", ")", ")" ]
Write JSON data to file. Arguments: path (str): Destination. data (dict or list): JSON serializable data. format (bool, optional): Pretty-print JSON data.
[ "Write", "JSON", "data", "to", "file", "." ]
python
train
26.923077
VIVelev/PyDojoML
dojo/base/preprocessor.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/base/preprocessor.py#L32-L50
def get_params(self, *keys): """Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary """ if len(keys) == 0: return vars(self) else: return [vars(self)[k] for k in keys]
[ "def", "get_params", "(", "self", ",", "*", "keys", ")", ":", "if", "len", "(", "keys", ")", "==", "0", ":", "return", "vars", "(", "self", ")", "else", ":", "return", "[", "vars", "(", "self", ")", "[", "k", "]", "for", "k", "in", "keys", "]" ]
Returns the specified parameters for the current preprocessor. Parameters: ----------- keys : variable sized list, containing the names of the requested parameters Returns: -------- values : list or dictionary, if any `keys` are specified those named parameters' values are returned, otherwise all parameters are returned as a dictionary
[ "Returns", "the", "specified", "parameters", "for", "the", "current", "preprocessor", "." ]
python
train
29.842105
NikolayDachev/jadm
lib/tabulate-0.7.2/tabulate.py
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/tabulate-0.7.2/tabulate.py#L466-L537
def _normalize_tabular_data(tabular_data, headers): """Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". """ if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = tabular_data.keys() rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0) keys = tabular_data.keys() vals = tabular_data.values # values matrix doesn't need to be transposed names = tabular_data.index rows = [[v]+list(row) for v,row in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": headers = list(map(_text_type,keys)) # headers should be strings else: # it's a usual an iterable of iterables, or a NumPy array rows = list(tabular_data) if (headers == "keys" and hasattr(tabular_data, "dtype") and getattr(tabular_data.dtype, "names")): # numpy record array headers = tabular_data.dtype.names elif (headers == "keys" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], "_fields")): # namedtuple headers = list(map(_text_type, rows[0]._fields)) elif headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: headers = list(map(_text_type, rows[0])) # headers should be strings rows = rows[1:] headers = list(headers) rows = list(map(list,rows)) # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""]*(ncols - nhs) + headers return rows, headers
[ "def", "_normalize_tabular_data", "(", "tabular_data", ",", "headers", ")", ":", "if", "hasattr", "(", "tabular_data", ",", "\"keys\"", ")", "and", "hasattr", "(", "tabular_data", ",", "\"values\"", ")", ":", "# dict-like and pandas.DataFrame?", "if", "hasattr", "(", "tabular_data", ".", "values", ",", "\"__call__\"", ")", ":", "# likely a conventional dict", "keys", "=", "tabular_data", ".", "keys", "(", ")", "rows", "=", "list", "(", "izip_longest", "(", "*", "tabular_data", ".", "values", "(", ")", ")", ")", "# columns have to be transposed", "elif", "hasattr", "(", "tabular_data", ",", "\"index\"", ")", ":", "# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)", "keys", "=", "tabular_data", ".", "keys", "(", ")", "vals", "=", "tabular_data", ".", "values", "# values matrix doesn't need to be transposed", "names", "=", "tabular_data", ".", "index", "rows", "=", "[", "[", "v", "]", "+", "list", "(", "row", ")", "for", "v", ",", "row", "in", "zip", "(", "names", ",", "vals", ")", "]", "else", ":", "raise", "ValueError", "(", "\"tabular data doesn't appear to be a dict or a DataFrame\"", ")", "if", "headers", "==", "\"keys\"", ":", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "keys", ")", ")", "# headers should be strings", "else", ":", "# it's a usual an iterable of iterables, or a NumPy array", "rows", "=", "list", "(", "tabular_data", ")", "if", "(", "headers", "==", "\"keys\"", "and", "hasattr", "(", "tabular_data", ",", "\"dtype\"", ")", "and", "getattr", "(", "tabular_data", ".", "dtype", ",", "\"names\"", ")", ")", ":", "# numpy record array", "headers", "=", "tabular_data", ".", "dtype", ".", "names", "elif", "(", "headers", "==", "\"keys\"", "and", "len", "(", "rows", ")", ">", "0", "and", "isinstance", "(", "rows", "[", "0", "]", ",", "tuple", ")", "and", "hasattr", "(", "rows", "[", "0", "]", ",", "\"_fields\"", ")", ")", ":", "# namedtuple", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "rows", "[", "0", "]", ".", "_fields", ")", ")", "elif", "headers", "==", "\"keys\"", "and", "len", "(", "rows", ")", ">", "0", ":", "# keys are column indices", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "range", "(", "len", "(", "rows", "[", "0", "]", ")", ")", ")", ")", "# take headers from the first row if necessary", "if", "headers", "==", "\"firstrow\"", "and", "len", "(", "rows", ")", ">", "0", ":", "headers", "=", "list", "(", "map", "(", "_text_type", ",", "rows", "[", "0", "]", ")", ")", "# headers should be strings", "rows", "=", "rows", "[", "1", ":", "]", "headers", "=", "list", "(", "headers", ")", "rows", "=", "list", "(", "map", "(", "list", ",", "rows", ")", ")", "# pad with empty headers for initial columns if necessary", "if", "headers", "and", "len", "(", "rows", ")", ">", "0", ":", "nhs", "=", "len", "(", "headers", ")", "ncols", "=", "len", "(", "rows", "[", "0", "]", ")", "if", "nhs", "<", "ncols", ":", "headers", "=", "[", "\"\"", "]", "*", "(", "ncols", "-", "nhs", ")", "+", "headers", "return", "rows", ",", "headers" ]
Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys".
[ "Transform", "a", "supported", "data", "type", "to", "a", "list", "of", "lists", "and", "a", "list", "of", "headers", "." ]
python
train
37.25
vmware/pyvmomi
pyVmomi/VmomiSupport.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVmomi/VmomiSupport.py#L1702-L1710
def _GetActualName(name): """ Note: Must be holding the _lazyLock """ if _allowCapitalizedNames: name = UncapitalizeVmodlName(name) for defMap in _dataDefMap, _managedDefMap, _enumDefMap: dic = defMap.get(name) if dic: return dic[0] return None
[ "def", "_GetActualName", "(", "name", ")", ":", "if", "_allowCapitalizedNames", ":", "name", "=", "UncapitalizeVmodlName", "(", "name", ")", "for", "defMap", "in", "_dataDefMap", ",", "_managedDefMap", ",", "_enumDefMap", ":", "dic", "=", "defMap", ".", "get", "(", "name", ")", "if", "dic", ":", "return", "dic", "[", "0", "]", "return", "None" ]
Note: Must be holding the _lazyLock
[ "Note", ":", "Must", "be", "holding", "the", "_lazyLock" ]
python
train
30.555556
ebroecker/canmatrix
src/canmatrix/formats/arxml.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/formats/arxml.py#L1349-L1361
def get_element_desc(element, ar_tree, ns): # type: (_Element, _DocRoot, str) -> str """Get element description from XML.""" desc = get_child(element, "DESC", ar_tree, ns) txt = get_child(desc, 'L-2[@L="DE"]', ar_tree, ns) if txt is None: txt = get_child(desc, 'L-2[@L="EN"]', ar_tree, ns) if txt is None: txt = get_child(desc, 'L-2', ar_tree, ns) if txt is not None: return txt.text else: return ""
[ "def", "get_element_desc", "(", "element", ",", "ar_tree", ",", "ns", ")", ":", "# type: (_Element, _DocRoot, str) -> str", "desc", "=", "get_child", "(", "element", ",", "\"DESC\"", ",", "ar_tree", ",", "ns", ")", "txt", "=", "get_child", "(", "desc", ",", "'L-2[@L=\"DE\"]'", ",", "ar_tree", ",", "ns", ")", "if", "txt", "is", "None", ":", "txt", "=", "get_child", "(", "desc", ",", "'L-2[@L=\"EN\"]'", ",", "ar_tree", ",", "ns", ")", "if", "txt", "is", "None", ":", "txt", "=", "get_child", "(", "desc", ",", "'L-2'", ",", "ar_tree", ",", "ns", ")", "if", "txt", "is", "not", "None", ":", "return", "txt", ".", "text", "else", ":", "return", "\"\"" ]
Get element description from XML.
[ "Get", "element", "description", "from", "XML", "." ]
python
train
34.692308
materialsproject/pymatgen
pymatgen/analysis/chemenv/coordination_environments/structure_environments.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/chemenv/coordination_environments/structure_environments.py#L936-L963
def as_dict(self): """ Bson-serializable dict representation of the StructureEnvironments object. :return: Bson-serializable dict representation of the StructureEnvironments object. """ ce_list_dict = [{str(cn): [ce.as_dict() if ce is not None else None for ce in ce_dict[cn]] for cn in ce_dict} if ce_dict is not None else None for ce_dict in self.ce_list] nbs_sets_dict = [{str(cn): [nb_set.as_dict() for nb_set in nb_sets] for cn, nb_sets in site_nbs_sets.items()} if site_nbs_sets is not None else None for site_nbs_sets in self.neighbors_sets] info_dict = {key: val for key, val in self.info.items() if key not in ['sites_info']} info_dict['sites_info'] = [{'nb_sets_info': {str(cn): {str(inb_set): nb_set_info for inb_set, nb_set_info in cn_sets.items()} for cn, cn_sets in site_info['nb_sets_info'].items()}, 'time': site_info['time']} if 'nb_sets_info' in site_info else {} for site_info in self.info['sites_info']] return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "voronoi": self.voronoi.as_dict(), "valences": self.valences, "sites_map": self.sites_map, "equivalent_sites": [[ps.as_dict() for ps in psl] for psl in self.equivalent_sites], "ce_list": ce_list_dict, "structure": self.structure.as_dict(), "neighbors_sets": nbs_sets_dict, "info": info_dict}
[ "def", "as_dict", "(", "self", ")", ":", "ce_list_dict", "=", "[", "{", "str", "(", "cn", ")", ":", "[", "ce", ".", "as_dict", "(", ")", "if", "ce", "is", "not", "None", "else", "None", "for", "ce", "in", "ce_dict", "[", "cn", "]", "]", "for", "cn", "in", "ce_dict", "}", "if", "ce_dict", "is", "not", "None", "else", "None", "for", "ce_dict", "in", "self", ".", "ce_list", "]", "nbs_sets_dict", "=", "[", "{", "str", "(", "cn", ")", ":", "[", "nb_set", ".", "as_dict", "(", ")", "for", "nb_set", "in", "nb_sets", "]", "for", "cn", ",", "nb_sets", "in", "site_nbs_sets", ".", "items", "(", ")", "}", "if", "site_nbs_sets", "is", "not", "None", "else", "None", "for", "site_nbs_sets", "in", "self", ".", "neighbors_sets", "]", "info_dict", "=", "{", "key", ":", "val", "for", "key", ",", "val", "in", "self", ".", "info", ".", "items", "(", ")", "if", "key", "not", "in", "[", "'sites_info'", "]", "}", "info_dict", "[", "'sites_info'", "]", "=", "[", "{", "'nb_sets_info'", ":", "{", "str", "(", "cn", ")", ":", "{", "str", "(", "inb_set", ")", ":", "nb_set_info", "for", "inb_set", ",", "nb_set_info", "in", "cn_sets", ".", "items", "(", ")", "}", "for", "cn", ",", "cn_sets", "in", "site_info", "[", "'nb_sets_info'", "]", ".", "items", "(", ")", "}", ",", "'time'", ":", "site_info", "[", "'time'", "]", "}", "if", "'nb_sets_info'", "in", "site_info", "else", "{", "}", "for", "site_info", "in", "self", ".", "info", "[", "'sites_info'", "]", "]", "return", "{", "\"@module\"", ":", "self", ".", "__class__", ".", "__module__", ",", "\"@class\"", ":", "self", ".", "__class__", ".", "__name__", ",", "\"voronoi\"", ":", "self", ".", "voronoi", ".", "as_dict", "(", ")", ",", "\"valences\"", ":", "self", ".", "valences", ",", "\"sites_map\"", ":", "self", ".", "sites_map", ",", "\"equivalent_sites\"", ":", "[", "[", "ps", ".", "as_dict", "(", ")", "for", "ps", "in", "psl", "]", "for", "psl", "in", "self", ".", "equivalent_sites", "]", ",", "\"ce_list\"", ":", "ce_list_dict", ",", "\"structure\"", ":", "self", ".", "structure", ".", "as_dict", "(", ")", ",", "\"neighbors_sets\"", ":", "nbs_sets_dict", ",", "\"info\"", ":", "info_dict", "}" ]
Bson-serializable dict representation of the StructureEnvironments object. :return: Bson-serializable dict representation of the StructureEnvironments object.
[ "Bson", "-", "serializable", "dict", "representation", "of", "the", "StructureEnvironments", "object", ".", ":", "return", ":", "Bson", "-", "serializable", "dict", "representation", "of", "the", "StructureEnvironments", "object", "." ]
python
train
63.357143
UDST/urbansim
urbansim/models/dcm.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1119-L1156
def summed_probabilities(self, choosers, alternatives): """ Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together. """ if len(alternatives) == 0 or len(choosers) == 0: return pd.Series() logger.debug( 'start: calculate summed probabilities in LCM group {}'.format( self.name)) probs = [] for name, df in self._iter_groups(choosers): probs.append( self.models[name].summed_probabilities(df, alternatives)) add = tz.curry(pd.Series.add, fill_value=0) probs = tz.reduce(add, probs) logger.debug( 'finish: calculate summed probabilities in LCM group {}'.format( self.name)) return probs
[ "def", "summed_probabilities", "(", "self", ",", "choosers", ",", "alternatives", ")", ":", "if", "len", "(", "alternatives", ")", "==", "0", "or", "len", "(", "choosers", ")", "==", "0", ":", "return", "pd", ".", "Series", "(", ")", "logger", ".", "debug", "(", "'start: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "probs", "=", "[", "]", "for", "name", ",", "df", "in", "self", ".", "_iter_groups", "(", "choosers", ")", ":", "probs", ".", "append", "(", "self", ".", "models", "[", "name", "]", ".", "summed_probabilities", "(", "df", ",", "alternatives", ")", ")", "add", "=", "tz", ".", "curry", "(", "pd", ".", "Series", ".", "add", ",", "fill_value", "=", "0", ")", "probs", "=", "tz", ".", "reduce", "(", "add", ",", "probs", ")", "logger", ".", "debug", "(", "'finish: calculate summed probabilities in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "probs" ]
Returns the sum of probabilities for alternatives across all chooser segments. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. Returns ------- probs : pandas.Series Summed probabilities from each segment added together.
[ "Returns", "the", "sum", "of", "probabilities", "for", "alternatives", "across", "all", "chooser", "segments", "." ]
python
train
32.342105
theislab/scanpy
scanpy/readwrite.py
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/readwrite.py#L622-L637
def get_used_files(): """Get files used by processes with name scanpy.""" import psutil loop_over_scanpy_processes = (proc for proc in psutil.process_iter() if proc.name() == 'scanpy') filenames = [] for proc in loop_over_scanpy_processes: try: flist = proc.open_files() for nt in flist: filenames.append(nt.path) # This catches a race condition where a process ends # before we can examine its files except psutil.NoSuchProcess as err: pass return set(filenames)
[ "def", "get_used_files", "(", ")", ":", "import", "psutil", "loop_over_scanpy_processes", "=", "(", "proc", "for", "proc", "in", "psutil", ".", "process_iter", "(", ")", "if", "proc", ".", "name", "(", ")", "==", "'scanpy'", ")", "filenames", "=", "[", "]", "for", "proc", "in", "loop_over_scanpy_processes", ":", "try", ":", "flist", "=", "proc", ".", "open_files", "(", ")", "for", "nt", "in", "flist", ":", "filenames", ".", "append", "(", "nt", ".", "path", ")", "# This catches a race condition where a process ends", "# before we can examine its files", "except", "psutil", ".", "NoSuchProcess", "as", "err", ":", "pass", "return", "set", "(", "filenames", ")" ]
Get files used by processes with name scanpy.
[ "Get", "files", "used", "by", "processes", "with", "name", "scanpy", "." ]
python
train
36.875
estnltk/estnltk
estnltk/text.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/text.py#L1180-L1184
def spellcheck_results(self): """The list of True/False values denoting the correct spelling of words.""" if not self.is_tagged(WORDS): self.tokenize_words() return vabamorf.spellcheck(self.word_texts, suggestions=True)
[ "def", "spellcheck_results", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "WORDS", ")", ":", "self", ".", "tokenize_words", "(", ")", "return", "vabamorf", ".", "spellcheck", "(", "self", ".", "word_texts", ",", "suggestions", "=", "True", ")" ]
The list of True/False values denoting the correct spelling of words.
[ "The", "list", "of", "True", "/", "False", "values", "denoting", "the", "correct", "spelling", "of", "words", "." ]
python
train
50.2
CitrineInformatics/python-citrination-client
citrination_client/data/client.py
https://github.com/CitrineInformatics/python-citrination-client/blob/409984fc65ce101a620f069263f155303492465c/citrination_client/data/client.py#L136-L151
def get_ingest_status(self, dataset_id): """ Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state this endpoint will return error/failure. If any files are still processing, will return processing. :param dataset_id: Dataset identifier :return: Status of dataset ingestion as a string """ failure_message = "Failed to create dataset ingest status for dataset {}".format(dataset_id) response = self._get_success_json( self._get('v1/datasets/' + str(dataset_id) + '/ingest-status', failure_message=failure_message))['data'] if 'status' in response: return response['status'] return ''
[ "def", "get_ingest_status", "(", "self", ",", "dataset_id", ")", ":", "failure_message", "=", "\"Failed to create dataset ingest status for dataset {}\"", ".", "format", "(", "dataset_id", ")", "response", "=", "self", ".", "_get_success_json", "(", "self", ".", "_get", "(", "'v1/datasets/'", "+", "str", "(", "dataset_id", ")", "+", "'/ingest-status'", ",", "failure_message", "=", "failure_message", ")", ")", "[", "'data'", "]", "if", "'status'", "in", "response", ":", "return", "response", "[", "'status'", "]", "return", "''" ]
Returns the current status of dataset ingestion. If any file uploaded to a dataset is in an error/failure state this endpoint will return error/failure. If any files are still processing, will return processing. :param dataset_id: Dataset identifier :return: Status of dataset ingestion as a string
[ "Returns", "the", "current", "status", "of", "dataset", "ingestion", ".", "If", "any", "file", "uploaded", "to", "a", "dataset", "is", "in", "an", "error", "/", "failure", "state", "this", "endpoint", "will", "return", "error", "/", "failure", ".", "If", "any", "files", "are", "still", "processing", "will", "return", "processing", "." ]
python
valid
47.625
jrief/django-sass-processor
sass_processor/management/commands/compilescss.py
https://github.com/jrief/django-sass-processor/blob/3ca746258432b1428daee9a2b2f7e05a1e327492/sass_processor/management/commands/compilescss.py#L180-L197
def find_sources(self): """ Look for Python sources available for the current configuration. """ app_configs = apps.get_app_configs() for app_config in app_configs: ignore_dirs = [] for root, dirs, files in os.walk(app_config.path): if [True for idir in ignore_dirs if root.startswith(idir)]: continue if '__init__.py' not in files: ignore_dirs.append(root) continue for filename in files: basename, ext = os.path.splitext(filename) if ext != '.py': continue yield os.path.abspath(os.path.join(root, filename))
[ "def", "find_sources", "(", "self", ")", ":", "app_configs", "=", "apps", ".", "get_app_configs", "(", ")", "for", "app_config", "in", "app_configs", ":", "ignore_dirs", "=", "[", "]", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "app_config", ".", "path", ")", ":", "if", "[", "True", "for", "idir", "in", "ignore_dirs", "if", "root", ".", "startswith", "(", "idir", ")", "]", ":", "continue", "if", "'__init__.py'", "not", "in", "files", ":", "ignore_dirs", ".", "append", "(", "root", ")", "continue", "for", "filename", "in", "files", ":", "basename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "if", "ext", "!=", "'.py'", ":", "continue", "yield", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "filename", ")", ")" ]
Look for Python sources available for the current configuration.
[ "Look", "for", "Python", "sources", "available", "for", "the", "current", "configuration", "." ]
python
train
41.611111
statueofmike/rtsp
scripts/others/rts2.py
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/others/rts2.py#L221-L233
def recvRtspReply(self): """Receive RTSP reply from the server.""" while True: reply = self.rtspSocket.recv(1024) if reply: self.parseRtspReply(reply) # Close the RTSP socket upon requesting Teardown if self.requestSent == self.TEARDOWN: self.rtspSocket.shutdown(socket.SHUT_RDWR) self.rtspSocket.close() break
[ "def", "recvRtspReply", "(", "self", ")", ":", "while", "True", ":", "reply", "=", "self", ".", "rtspSocket", ".", "recv", "(", "1024", ")", "if", "reply", ":", "self", ".", "parseRtspReply", "(", "reply", ")", "# Close the RTSP socket upon requesting Teardown", "if", "self", ".", "requestSent", "==", "self", ".", "TEARDOWN", ":", "self", ".", "rtspSocket", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "self", ".", "rtspSocket", ".", "close", "(", ")", "break" ]
Receive RTSP reply from the server.
[ "Receive", "RTSP", "reply", "from", "the", "server", "." ]
python
train
28
akesterson/dpath-python
dpath/util.py
https://github.com/akesterson/dpath-python/blob/2d9117c5fc6870d546aadefb5bf3ab194f4c7411/dpath/util.py#L35-L47
def new(obj, path, value, separator="/"): """ Set the element at the terminus of path to value, and create it if it does not exist (as opposed to 'set' that can only change existing keys). path will NOT be treated like a glob. If it has globbing characters in it, they will become part of the resulting keys """ pathlist = __safe_path__(path, separator) pathobj = dpath.path.path_types(obj, pathlist) return dpath.path.set(obj, pathobj, value, create_missing=True)
[ "def", "new", "(", "obj", ",", "path", ",", "value", ",", "separator", "=", "\"/\"", ")", ":", "pathlist", "=", "__safe_path__", "(", "path", ",", "separator", ")", "pathobj", "=", "dpath", ".", "path", ".", "path_types", "(", "obj", ",", "pathlist", ")", "return", "dpath", ".", "path", ".", "set", "(", "obj", ",", "pathobj", ",", "value", ",", "create_missing", "=", "True", ")" ]
Set the element at the terminus of path to value, and create it if it does not exist (as opposed to 'set' that can only change existing keys). path will NOT be treated like a glob. If it has globbing characters in it, they will become part of the resulting keys
[ "Set", "the", "element", "at", "the", "terminus", "of", "path", "to", "value", "and", "create", "it", "if", "it", "does", "not", "exist", "(", "as", "opposed", "to", "set", "that", "can", "only", "change", "existing", "keys", ")", "." ]
python
train
38.230769
jelmer/python-fastimport
fastimport/parser.py
https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/parser.py#L587-L600
def _path_pair(self, s): """Parse two paths separated by a space.""" # TODO: handle a space in the first path if s.startswith(b'"'): parts = s[1:].split(b'" ', 1) else: parts = s.split(b' ', 1) if len(parts) != 2: self.abort(errors.BadFormat, '?', '?', s) elif parts[1].startswith(b'"') and parts[1].endswith(b'"'): parts[1] = parts[1][1:-1] elif parts[1].startswith(b'"') or parts[1].endswith(b'"'): self.abort(errors.BadFormat, '?', '?', s) return [_unquote_c_string(s) for s in parts]
[ "def", "_path_pair", "(", "self", ",", "s", ")", ":", "# TODO: handle a space in the first path", "if", "s", ".", "startswith", "(", "b'\"'", ")", ":", "parts", "=", "s", "[", "1", ":", "]", ".", "split", "(", "b'\" '", ",", "1", ")", "else", ":", "parts", "=", "s", ".", "split", "(", "b' '", ",", "1", ")", "if", "len", "(", "parts", ")", "!=", "2", ":", "self", ".", "abort", "(", "errors", ".", "BadFormat", ",", "'?'", ",", "'?'", ",", "s", ")", "elif", "parts", "[", "1", "]", ".", "startswith", "(", "b'\"'", ")", "and", "parts", "[", "1", "]", ".", "endswith", "(", "b'\"'", ")", ":", "parts", "[", "1", "]", "=", "parts", "[", "1", "]", "[", "1", ":", "-", "1", "]", "elif", "parts", "[", "1", "]", ".", "startswith", "(", "b'\"'", ")", "or", "parts", "[", "1", "]", ".", "endswith", "(", "b'\"'", ")", ":", "self", ".", "abort", "(", "errors", ".", "BadFormat", ",", "'?'", ",", "'?'", ",", "s", ")", "return", "[", "_unquote_c_string", "(", "s", ")", "for", "s", "in", "parts", "]" ]
Parse two paths separated by a space.
[ "Parse", "two", "paths", "separated", "by", "a", "space", "." ]
python
train
42.714286
jedie/DragonPy
dragonpy/utils/srecord_utils.py
https://github.com/jedie/DragonPy/blob/6659e5b5133aab26979a498ee7453495773a4f6c/dragonpy/utils/srecord_utils.py#L161-L172
def parse_srec(srec): """ Extract the data portion of a given S-Record (without checksum) Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum """ record_type = srec[0:2] data_len = srec[2:4] addr_len = __ADDR_LEN.get(record_type) * 2 addr = srec[4:4 + addr_len] data = srec[4 + addr_len:len(srec)-2] checksum = srec[len(srec) - 2:] return record_type, data_len, addr, data, checksum
[ "def", "parse_srec", "(", "srec", ")", ":", "record_type", "=", "srec", "[", "0", ":", "2", "]", "data_len", "=", "srec", "[", "2", ":", "4", "]", "addr_len", "=", "__ADDR_LEN", ".", "get", "(", "record_type", ")", "*", "2", "addr", "=", "srec", "[", "4", ":", "4", "+", "addr_len", "]", "data", "=", "srec", "[", "4", "+", "addr_len", ":", "len", "(", "srec", ")", "-", "2", "]", "checksum", "=", "srec", "[", "len", "(", "srec", ")", "-", "2", ":", "]", "return", "record_type", ",", "data_len", ",", "addr", ",", "data", ",", "checksum" ]
Extract the data portion of a given S-Record (without checksum) Returns: the record type, the lenght of the data section, the write address, the data itself and the checksum
[ "Extract", "the", "data", "portion", "of", "a", "given", "S", "-", "Record", "(", "without", "checksum", ")", "Returns", ":", "the", "record", "type", "the", "lenght", "of", "the", "data", "section", "the", "write", "address", "the", "data", "itself", "and", "the", "checksum" ]
python
train
40.083333
thebigmunch/google-music
src/google_music/clients/mobileclient.py
https://github.com/thebigmunch/google-music/blob/d8a94dab462a1f063fbc1152187a73dc2f0e2a85/src/google_music/clients/mobileclient.py#L1512-L1525
def situations(self, *, tz_offset=None): """Get a listing of situations. Parameters: tz_offset (int, Optional): A time zone offset from UTC in seconds. """ response = self._call( mc_calls.ListenNowSituations, tz_offset ) situation_list = response.body.get('situations', []) return situation_list
[ "def", "situations", "(", "self", ",", "*", ",", "tz_offset", "=", "None", ")", ":", "response", "=", "self", ".", "_call", "(", "mc_calls", ".", "ListenNowSituations", ",", "tz_offset", ")", "situation_list", "=", "response", ".", "body", ".", "get", "(", "'situations'", ",", "[", "]", ")", "return", "situation_list" ]
Get a listing of situations. Parameters: tz_offset (int, Optional): A time zone offset from UTC in seconds.
[ "Get", "a", "listing", "of", "situations", "." ]
python
train
22
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1974-L2010
def argument_types(self): """Retrieve a container for the non-variadic arguments for this type. The returned object is iterable and indexable. Each item in the container is a Type instance. """ class ArgumentsIterator(collections.Sequence): def __init__(self, parent): self.parent = parent self.length = None def __len__(self): if self.length is None: self.length = conf.lib.clang_getNumArgTypes(self.parent) return self.length def __getitem__(self, key): # FIXME Support slice objects. if not isinstance(key, int): raise TypeError("Must supply a non-negative int.") if key < 0: raise IndexError("Only non-negative indexes are accepted.") if key >= len(self): raise IndexError("Index greater than container length: " "%d > %d" % ( key, len(self) )) result = conf.lib.clang_getArgType(self.parent, key) if result.kind == TypeKind.INVALID: raise IndexError("Argument could not be retrieved.") return result assert self.kind == TypeKind.FUNCTIONPROTO return ArgumentsIterator(self)
[ "def", "argument_types", "(", "self", ")", ":", "class", "ArgumentsIterator", "(", "collections", ".", "Sequence", ")", ":", "def", "__init__", "(", "self", ",", "parent", ")", ":", "self", ".", "parent", "=", "parent", "self", ".", "length", "=", "None", "def", "__len__", "(", "self", ")", ":", "if", "self", ".", "length", "is", "None", ":", "self", ".", "length", "=", "conf", ".", "lib", ".", "clang_getNumArgTypes", "(", "self", ".", "parent", ")", "return", "self", ".", "length", "def", "__getitem__", "(", "self", ",", "key", ")", ":", "# FIXME Support slice objects.", "if", "not", "isinstance", "(", "key", ",", "int", ")", ":", "raise", "TypeError", "(", "\"Must supply a non-negative int.\"", ")", "if", "key", "<", "0", ":", "raise", "IndexError", "(", "\"Only non-negative indexes are accepted.\"", ")", "if", "key", ">=", "len", "(", "self", ")", ":", "raise", "IndexError", "(", "\"Index greater than container length: \"", "\"%d > %d\"", "%", "(", "key", ",", "len", "(", "self", ")", ")", ")", "result", "=", "conf", ".", "lib", ".", "clang_getArgType", "(", "self", ".", "parent", ",", "key", ")", "if", "result", ".", "kind", "==", "TypeKind", ".", "INVALID", ":", "raise", "IndexError", "(", "\"Argument could not be retrieved.\"", ")", "return", "result", "assert", "self", ".", "kind", "==", "TypeKind", ".", "FUNCTIONPROTO", "return", "ArgumentsIterator", "(", "self", ")" ]
Retrieve a container for the non-variadic arguments for this type. The returned object is iterable and indexable. Each item in the container is a Type instance.
[ "Retrieve", "a", "container", "for", "the", "non", "-", "variadic", "arguments", "for", "this", "type", "." ]
python
train
36.648649
apache/incubator-mxnet
python/mxnet/context.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/context.py#L244-L259
def num_gpus(): """Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs. """ count = ctypes.c_int() check_call(_LIB.MXGetGPUCount(ctypes.byref(count))) return count.value
[ "def", "num_gpus", "(", ")", ":", "count", "=", "ctypes", ".", "c_int", "(", ")", "check_call", "(", "_LIB", ".", "MXGetGPUCount", "(", "ctypes", ".", "byref", "(", "count", ")", ")", ")", "return", "count", ".", "value" ]
Query CUDA for the number of GPUs present. Raises ------ Will raise an exception on any CUDA error. Returns ------- count : int The number of GPUs.
[ "Query", "CUDA", "for", "the", "number", "of", "GPUs", "present", "." ]
python
train
19
bhmm/bhmm
bhmm/api.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/api.py#L97-L127
def gaussian_hmm(pi, P, means, sigmas): """ Initializes a 1D-Gaussian HMM Parameters ---------- pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix means : ndarray(nstates, ) Means of Gaussian output distributions sigmas : ndarray(nstates, ) Standard deviations of Gaussian output distributions stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """ from bhmm.hmm.gaussian_hmm import GaussianHMM from bhmm.output_models.gaussian import GaussianOutputModel # count states nstates = _np.array(P).shape[0] # initialize output model output_model = GaussianOutputModel(nstates, means, sigmas) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM ghmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM ghmm = GaussianHMM(ghmm) return ghmm
[ "def", "gaussian_hmm", "(", "pi", ",", "P", ",", "means", ",", "sigmas", ")", ":", "from", "bhmm", ".", "hmm", ".", "gaussian_hmm", "import", "GaussianHMM", "from", "bhmm", ".", "output_models", ".", "gaussian", "import", "GaussianOutputModel", "# count states", "nstates", "=", "_np", ".", "array", "(", "P", ")", ".", "shape", "[", "0", "]", "# initialize output model", "output_model", "=", "GaussianOutputModel", "(", "nstates", ",", "means", ",", "sigmas", ")", "# initialize general HMM", "from", "bhmm", ".", "hmm", ".", "generic_hmm", "import", "HMM", "as", "_HMM", "ghmm", "=", "_HMM", "(", "pi", ",", "P", ",", "output_model", ")", "# turn it into a Gaussian HMM", "ghmm", "=", "GaussianHMM", "(", "ghmm", ")", "return", "ghmm" ]
Initializes a 1D-Gaussian HMM Parameters ---------- pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix means : ndarray(nstates, ) Means of Gaussian output distributions sigmas : ndarray(nstates, ) Standard deviations of Gaussian output distributions stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints.
[ "Initializes", "a", "1D", "-", "Gaussian", "HMM" ]
python
train
35.580645
gwastro/pycbc
pycbc/conversions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/conversions.py#L1274-L1313
def nltides_coefs(amplitude, n, m1, m2): """Calculate the coefficents needed to compute the shift in t(f) and phi(f) due to non-linear tides. Parameters ---------- amplitude: float Amplitude of effect n: float Growth dependence of effect m1: float Mass of component 1 m2: float Mass of component 2 Returns ------- f_ref : float Reference frequency used to define A and n t_of_f_factor: float The constant factor needed to compute t(f) phi_of_f_factor: float The constant factor needed to compute phi(f) """ # Use 100.0 Hz as a reference frequency f_ref = 100.0 # Calculate chirp mass mc = mchirp_from_mass1_mass2(m1, m2) mc *= lal.lal.MSUN_SI # Calculate constants in phasing a = (96./5.) * \ (lal.lal.G_SI * lal.lal.PI * mc * f_ref / lal.lal.C_SI**3.)**(5./3.) b = 6. * amplitude t_of_f_factor = -1./(lal.lal.PI*f_ref) * b/(a*a * (n-4.)) phi_of_f_factor = -2.*b / (a*a * (n-3.)) return f_ref, t_of_f_factor, phi_of_f_factor
[ "def", "nltides_coefs", "(", "amplitude", ",", "n", ",", "m1", ",", "m2", ")", ":", "# Use 100.0 Hz as a reference frequency", "f_ref", "=", "100.0", "# Calculate chirp mass", "mc", "=", "mchirp_from_mass1_mass2", "(", "m1", ",", "m2", ")", "mc", "*=", "lal", ".", "lal", ".", "MSUN_SI", "# Calculate constants in phasing", "a", "=", "(", "96.", "/", "5.", ")", "*", "(", "lal", ".", "lal", ".", "G_SI", "*", "lal", ".", "lal", ".", "PI", "*", "mc", "*", "f_ref", "/", "lal", ".", "lal", ".", "C_SI", "**", "3.", ")", "**", "(", "5.", "/", "3.", ")", "b", "=", "6.", "*", "amplitude", "t_of_f_factor", "=", "-", "1.", "/", "(", "lal", ".", "lal", ".", "PI", "*", "f_ref", ")", "*", "b", "/", "(", "a", "*", "a", "*", "(", "n", "-", "4.", ")", ")", "phi_of_f_factor", "=", "-", "2.", "*", "b", "/", "(", "a", "*", "a", "*", "(", "n", "-", "3.", ")", ")", "return", "f_ref", ",", "t_of_f_factor", ",", "phi_of_f_factor" ]
Calculate the coefficents needed to compute the shift in t(f) and phi(f) due to non-linear tides. Parameters ---------- amplitude: float Amplitude of effect n: float Growth dependence of effect m1: float Mass of component 1 m2: float Mass of component 2 Returns ------- f_ref : float Reference frequency used to define A and n t_of_f_factor: float The constant factor needed to compute t(f) phi_of_f_factor: float The constant factor needed to compute phi(f)
[ "Calculate", "the", "coefficents", "needed", "to", "compute", "the", "shift", "in", "t", "(", "f", ")", "and", "phi", "(", "f", ")", "due", "to", "non", "-", "linear", "tides", "." ]
python
train
26.375
angr/angr
angr/exploration_techniques/tracer.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/exploration_techniques/tracer.py#L459-L470
def _grab_concretization_results(cls, state): """ Grabs the concretized result so we can add the constraint ourselves. """ # only grab ones that match the constrained addrs if cls._should_add_constraints(state): addr = state.inspect.address_concretization_expr result = state.inspect.address_concretization_result if result is None: l.warning("addr concretization result is None") return state.preconstrainer.address_concretization.append((addr, result))
[ "def", "_grab_concretization_results", "(", "cls", ",", "state", ")", ":", "# only grab ones that match the constrained addrs", "if", "cls", ".", "_should_add_constraints", "(", "state", ")", ":", "addr", "=", "state", ".", "inspect", ".", "address_concretization_expr", "result", "=", "state", ".", "inspect", ".", "address_concretization_result", "if", "result", "is", "None", ":", "l", ".", "warning", "(", "\"addr concretization result is None\"", ")", "return", "state", ".", "preconstrainer", ".", "address_concretization", ".", "append", "(", "(", "addr", ",", "result", ")", ")" ]
Grabs the concretized result so we can add the constraint ourselves.
[ "Grabs", "the", "concretized", "result", "so", "we", "can", "add", "the", "constraint", "ourselves", "." ]
python
train
46.916667
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2052-L2063
def subtract(self, other, numPartitions=None): """ Return each value in C{self} that is not contained in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtract(y).collect()) [('a', 1), ('b', 4), ('b', 5)] """ # note: here 'True' is just a placeholder rdd = other.map(lambda x: (x, True)) return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
[ "def", "subtract", "(", "self", ",", "other", ",", "numPartitions", "=", "None", ")", ":", "# note: here 'True' is just a placeholder", "rdd", "=", "other", ".", "map", "(", "lambda", "x", ":", "(", "x", ",", "True", ")", ")", "return", "self", ".", "map", "(", "lambda", "x", ":", "(", "x", ",", "True", ")", ")", ".", "subtractByKey", "(", "rdd", ",", "numPartitions", ")", ".", "keys", "(", ")" ]
Return each value in C{self} that is not contained in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtract(y).collect()) [('a', 1), ('b', 4), ('b', 5)]
[ "Return", "each", "value", "in", "C", "{", "self", "}", "that", "is", "not", "contained", "in", "C", "{", "other", "}", "." ]
python
train
43.75
jobovy/galpy
galpy/df/quasiisothermaldf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/quasiisothermaldf.py#L2530-L2532
def _vmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o): """Internal function that is the integrand for the vmomentsurface mass integration""" return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False)*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.)
[ "def", "_vmomentsurfaceMCIntegrand", "(", "vz", ",", "vR", ",", "vT", ",", "R", ",", "z", ",", "df", ",", "sigmaR1", ",", "gamma", ",", "sigmaz1", ",", "mvT", ",", "n", ",", "m", ",", "o", ")", ":", "return", "vR", "**", "n", "*", "vT", "**", "m", "*", "vz", "**", "o", "*", "df", "(", "R", ",", "vR", "*", "sigmaR1", ",", "vT", "*", "sigmaR1", "*", "gamma", ",", "z", ",", "vz", "*", "sigmaz1", ",", "use_physical", "=", "False", ")", "*", "numpy", ".", "exp", "(", "vR", "**", "2.", "/", "2.", "+", "(", "vT", "-", "mvT", ")", "**", "2.", "/", "2.", "+", "vz", "**", "2.", "/", "2.", ")" ]
Internal function that is the integrand for the vmomentsurface mass integration
[ "Internal", "function", "that", "is", "the", "integrand", "for", "the", "vmomentsurface", "mass", "integration" ]
python
train
103.333333
Tanganelli/CoAPthon3
coapthon/messages/response.py
https://github.com/Tanganelli/CoAPthon3/blob/985763bfe2eb9e00f49ec100c5b8877c2ed7d531/coapthon/messages/response.py#L55-L66
def location_query(self): """ Return the Location-Query of the response. :rtype : String :return: the Location-Query option """ value = [] for option in self.options: if option.number == defines.OptionRegistry.LOCATION_QUERY.number: value.append(option.value) return value
[ "def", "location_query", "(", "self", ")", ":", "value", "=", "[", "]", "for", "option", "in", "self", ".", "options", ":", "if", "option", ".", "number", "==", "defines", ".", "OptionRegistry", ".", "LOCATION_QUERY", ".", "number", ":", "value", ".", "append", "(", "option", ".", "value", ")", "return", "value" ]
Return the Location-Query of the response. :rtype : String :return: the Location-Query option
[ "Return", "the", "Location", "-", "Query", "of", "the", "response", "." ]
python
train
29.5
rmorshea/spectate
spectate/core.py
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/core.py#L74-L117
def callback(self, name, before=None, after=None): """Add a callback pair to this spectator. You can specify, with keywords, whether each callback should be triggered before, and/or or after a given method is called - hereafter refered to as "beforebacks" and "afterbacks" respectively. Parameters ---------- name: str The name of the method to which callbacks should respond. before: None or callable A callable of the form ``before(obj, call)`` where ``obj`` is the instance which called a watched method, and ``call`` is a :class:`Data` containing the name of the called method, along with its positional and keyword arguments under the attributes "name" "args", and "kwargs" respectively. after: None or callable A callable of the form ``after(obj, answer)`` where ``obj` is the instance which alled a watched method, and ``answer`` is a :class:`Data` containing the name of the called method, along with the value it returned, and data ``before`` may have returned under the attributes "name", "value", and "before" respectively. """ if isinstance(name, (list, tuple)): for name in name: self.callback(name, before, after) else: if not isinstance(getattr(self.subclass, name), MethodSpectator): raise ValueError("No method specator for '%s'" % name) if before is None and after is None: raise ValueError("No pre or post '%s' callbacks were given" % name) elif before is not None and not callable(before): raise ValueError("Expected a callable, not %r." % before) elif after is not None and not callable(after): raise ValueError("Expected a callable, not %r." % after) elif before is None and after is None: raise ValueError("No callbacks were given.") if name in self._callback_registry: callback_list = self._callback_registry[name] else: callback_list = [] self._callback_registry[name] = callback_list callback_list.append((before, after))
[ "def", "callback", "(", "self", ",", "name", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "if", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", ":", "for", "name", "in", "name", ":", "self", ".", "callback", "(", "name", ",", "before", ",", "after", ")", "else", ":", "if", "not", "isinstance", "(", "getattr", "(", "self", ".", "subclass", ",", "name", ")", ",", "MethodSpectator", ")", ":", "raise", "ValueError", "(", "\"No method specator for '%s'\"", "%", "name", ")", "if", "before", "is", "None", "and", "after", "is", "None", ":", "raise", "ValueError", "(", "\"No pre or post '%s' callbacks were given\"", "%", "name", ")", "elif", "before", "is", "not", "None", "and", "not", "callable", "(", "before", ")", ":", "raise", "ValueError", "(", "\"Expected a callable, not %r.\"", "%", "before", ")", "elif", "after", "is", "not", "None", "and", "not", "callable", "(", "after", ")", ":", "raise", "ValueError", "(", "\"Expected a callable, not %r.\"", "%", "after", ")", "elif", "before", "is", "None", "and", "after", "is", "None", ":", "raise", "ValueError", "(", "\"No callbacks were given.\"", ")", "if", "name", "in", "self", ".", "_callback_registry", ":", "callback_list", "=", "self", ".", "_callback_registry", "[", "name", "]", "else", ":", "callback_list", "=", "[", "]", "self", ".", "_callback_registry", "[", "name", "]", "=", "callback_list", "callback_list", ".", "append", "(", "(", "before", ",", "after", ")", ")" ]
Add a callback pair to this spectator. You can specify, with keywords, whether each callback should be triggered before, and/or or after a given method is called - hereafter refered to as "beforebacks" and "afterbacks" respectively. Parameters ---------- name: str The name of the method to which callbacks should respond. before: None or callable A callable of the form ``before(obj, call)`` where ``obj`` is the instance which called a watched method, and ``call`` is a :class:`Data` containing the name of the called method, along with its positional and keyword arguments under the attributes "name" "args", and "kwargs" respectively. after: None or callable A callable of the form ``after(obj, answer)`` where ``obj` is the instance which alled a watched method, and ``answer`` is a :class:`Data` containing the name of the called method, along with the value it returned, and data ``before`` may have returned under the attributes "name", "value", and "before" respectively.
[ "Add", "a", "callback", "pair", "to", "this", "spectator", "." ]
python
train
51.954545
jobovy/galpy
galpy/util/multi.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/multi.py#L82-L126
def run_tasks(procs, err_q, out_q, num): """ A function that executes populated processes and processes the resultant array. Checks error queue for any exceptions. :param procs: list of Process objects :param out_q: thread-safe output queue :param err_q: thread-safe queue to populate on exception :param num : length of resultant array """ # function to terminate processes that are still running. die = (lambda vals : [val.terminate() for val in vals if val.exitcode is None]) try: for proc in procs: proc.start() for proc in procs: proc.join() except Exception as e: # kill all slave processes on ctrl-C try: die(procs) finally: raise e if not err_q.empty(): # kill all on any exception from any one slave try: die(procs) finally: raise err_q.get() # Processes finish in arbitrary order. Process IDs double # as index in the resultant array. results=[None]*num; while not out_q.empty(): idx, result = out_q.get() results[idx] = result # Remove extra dimension added by array_split return list(numpy.concatenate(results))
[ "def", "run_tasks", "(", "procs", ",", "err_q", ",", "out_q", ",", "num", ")", ":", "# function to terminate processes that are still running.", "die", "=", "(", "lambda", "vals", ":", "[", "val", ".", "terminate", "(", ")", "for", "val", "in", "vals", "if", "val", ".", "exitcode", "is", "None", "]", ")", "try", ":", "for", "proc", "in", "procs", ":", "proc", ".", "start", "(", ")", "for", "proc", "in", "procs", ":", "proc", ".", "join", "(", ")", "except", "Exception", "as", "e", ":", "# kill all slave processes on ctrl-C", "try", ":", "die", "(", "procs", ")", "finally", ":", "raise", "e", "if", "not", "err_q", ".", "empty", "(", ")", ":", "# kill all on any exception from any one slave", "try", ":", "die", "(", "procs", ")", "finally", ":", "raise", "err_q", ".", "get", "(", ")", "# Processes finish in arbitrary order. Process IDs double", "# as index in the resultant array.", "results", "=", "[", "None", "]", "*", "num", "while", "not", "out_q", ".", "empty", "(", ")", ":", "idx", ",", "result", "=", "out_q", ".", "get", "(", ")", "results", "[", "idx", "]", "=", "result", "# Remove extra dimension added by array_split", "return", "list", "(", "numpy", ".", "concatenate", "(", "results", ")", ")" ]
A function that executes populated processes and processes the resultant array. Checks error queue for any exceptions. :param procs: list of Process objects :param out_q: thread-safe output queue :param err_q: thread-safe queue to populate on exception :param num : length of resultant array
[ "A", "function", "that", "executes", "populated", "processes", "and", "processes", "the", "resultant", "array", ".", "Checks", "error", "queue", "for", "any", "exceptions", "." ]
python
train
24.755556
pywbem/pywbem
pywbem_mock/_wbemconnection_mock.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_wbemconnection_mock.py#L846-L905
def add_method_callback(self, classname, methodname, method_callback, namespace=None,): """ Register a callback function for a CIM method that will be called when the CIM method is invoked via `InvokeMethod`. If the namespace does not exist, :exc:`~pywbem.CIMError` with status CIM_ERR_INVALID_NAMESPACE is raised. Parameters: classname (:term:`string`): The CIM class name for which the callback function is registered. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. For method invocations on a target instance, this must be the class name of the creation class of the target instance. For method invocations on a target class, this must be the class name of the target class. methodname (:term:`string`): The CIM method name for which the callback function is registered. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. method_callback (:func:`~pywbem_mock.method_callback_interface`): The callback function. namespace (:term:`string`): The CIM namespace for which the callback function is registered. If `None`, the callback function is registered for the default namespace of the connection. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. Raises: ValueError: Duplicate method specification. :exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does not exist. """ if namespace is None: namespace = self.default_namespace # Validate namespace method_repo = self._get_method_repo(namespace) if classname not in method_repo: method_repo[classname] = NocaseDict() if methodname in method_repo[classname]: raise ValueError("Duplicate method specification") method_repo[classname][methodname] = method_callback
[ "def", "add_method_callback", "(", "self", ",", "classname", ",", "methodname", ",", "method_callback", ",", "namespace", "=", "None", ",", ")", ":", "if", "namespace", "is", "None", ":", "namespace", "=", "self", ".", "default_namespace", "# Validate namespace", "method_repo", "=", "self", ".", "_get_method_repo", "(", "namespace", ")", "if", "classname", "not", "in", "method_repo", ":", "method_repo", "[", "classname", "]", "=", "NocaseDict", "(", ")", "if", "methodname", "in", "method_repo", "[", "classname", "]", ":", "raise", "ValueError", "(", "\"Duplicate method specification\"", ")", "method_repo", "[", "classname", "]", "[", "methodname", "]", "=", "method_callback" ]
Register a callback function for a CIM method that will be called when the CIM method is invoked via `InvokeMethod`. If the namespace does not exist, :exc:`~pywbem.CIMError` with status CIM_ERR_INVALID_NAMESPACE is raised. Parameters: classname (:term:`string`): The CIM class name for which the callback function is registered. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. For method invocations on a target instance, this must be the class name of the creation class of the target instance. For method invocations on a target class, this must be the class name of the target class. methodname (:term:`string`): The CIM method name for which the callback function is registered. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. method_callback (:func:`~pywbem_mock.method_callback_interface`): The callback function. namespace (:term:`string`): The CIM namespace for which the callback function is registered. If `None`, the callback function is registered for the default namespace of the connection. The faked `InvokeMethod` implementation uses this information to look up the callback function from its parameters. Raises: ValueError: Duplicate method specification. :exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does not exist.
[ "Register", "a", "callback", "function", "for", "a", "CIM", "method", "that", "will", "be", "called", "when", "the", "CIM", "method", "is", "invoked", "via", "InvokeMethod", "." ]
python
train
37.033333
razor-x/scipy-data_fitting
scipy_data_fitting/fit.py
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/fit.py#L167-L189
def limits(self): """ Limits to use for the independent variable whenever creating a linespace, plot, etc. Defaults to `(-x, x)` where `x` is the largest absolute value of the data corresponding to the independent variable. If no such values are negative, defaults to `(0, x)` instead. """ if not hasattr(self, '_limits'): xmax = max(abs(self.data.array[0])) xmin = min(self.data.array[0]) x_error = self.data.error[0] if isinstance(x_error, numpy.ndarray): if x_error.ndim == 0: xmax = xmax + x_error if xmin < 0: self._limits = (-xmax, xmax) else: self._limits = (0, xmax) return self._limits
[ "def", "limits", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_limits'", ")", ":", "xmax", "=", "max", "(", "abs", "(", "self", ".", "data", ".", "array", "[", "0", "]", ")", ")", "xmin", "=", "min", "(", "self", ".", "data", ".", "array", "[", "0", "]", ")", "x_error", "=", "self", ".", "data", ".", "error", "[", "0", "]", "if", "isinstance", "(", "x_error", ",", "numpy", ".", "ndarray", ")", ":", "if", "x_error", ".", "ndim", "==", "0", ":", "xmax", "=", "xmax", "+", "x_error", "if", "xmin", "<", "0", ":", "self", ".", "_limits", "=", "(", "-", "xmax", ",", "xmax", ")", "else", ":", "self", ".", "_limits", "=", "(", "0", ",", "xmax", ")", "return", "self", ".", "_limits" ]
Limits to use for the independent variable whenever creating a linespace, plot, etc. Defaults to `(-x, x)` where `x` is the largest absolute value of the data corresponding to the independent variable. If no such values are negative, defaults to `(0, x)` instead.
[ "Limits", "to", "use", "for", "the", "independent", "variable", "whenever", "creating", "a", "linespace", "plot", "etc", "." ]
python
train
33.391304
Xion/taipan
taipan/objective/base.py
https://github.com/Xion/taipan/blob/f333f0287c8bd0915182c7d5308e5f05ef0cca78/taipan/objective/base.py#L140-L172
def _get_override_base(self, override_wrapper): """Retrieve the override base class from the :class:`_OverriddenMethod` wrapper. """ base = override_wrapper.modifier.base if not base: return None if is_class(base): return base # resolve the (possibly qualified) class name if '.' in base: # repeatedly try to import the first N-1, N-2, etc. dot-separated # parts of the qualified name; this way we can handle all names # including `package.module.Class.InnerClass` dot_parts = base.split('.') for i in xrange(len(dot_parts) - 1, 1, -1): # n-1 -> 1 module_name = '.'.join(dot_parts[:i]) class_name = '.'.join(dot_parts[i:]) try: module = __import__(module_name, fromlist=[dot_parts[i]]) break except ImportError: pass else: # couldn't resolve class name, return it verbatim return base else: class_name = base module_name = override_wrapper.method.__module__ module = sys.modules[module_name] return getattr(module, class_name)
[ "def", "_get_override_base", "(", "self", ",", "override_wrapper", ")", ":", "base", "=", "override_wrapper", ".", "modifier", ".", "base", "if", "not", "base", ":", "return", "None", "if", "is_class", "(", "base", ")", ":", "return", "base", "# resolve the (possibly qualified) class name", "if", "'.'", "in", "base", ":", "# repeatedly try to import the first N-1, N-2, etc. dot-separated", "# parts of the qualified name; this way we can handle all names", "# including `package.module.Class.InnerClass`", "dot_parts", "=", "base", ".", "split", "(", "'.'", ")", "for", "i", "in", "xrange", "(", "len", "(", "dot_parts", ")", "-", "1", ",", "1", ",", "-", "1", ")", ":", "# n-1 -> 1", "module_name", "=", "'.'", ".", "join", "(", "dot_parts", "[", ":", "i", "]", ")", "class_name", "=", "'.'", ".", "join", "(", "dot_parts", "[", "i", ":", "]", ")", "try", ":", "module", "=", "__import__", "(", "module_name", ",", "fromlist", "=", "[", "dot_parts", "[", "i", "]", "]", ")", "break", "except", "ImportError", ":", "pass", "else", ":", "# couldn't resolve class name, return it verbatim", "return", "base", "else", ":", "class_name", "=", "base", "module_name", "=", "override_wrapper", ".", "method", ".", "__module__", "module", "=", "sys", ".", "modules", "[", "module_name", "]", "return", "getattr", "(", "module", ",", "class_name", ")" ]
Retrieve the override base class from the :class:`_OverriddenMethod` wrapper.
[ "Retrieve", "the", "override", "base", "class", "from", "the", ":", "class", ":", "_OverriddenMethod", "wrapper", "." ]
python
train
38.333333
nickoala/telepot
telepot/__init__.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/__init__.py#L728-L731
def sendChatAction(self, chat_id, action): """ See: https://core.telegram.org/bots/api#sendchataction """ p = _strip(locals()) return self._api_request('sendChatAction', _rectify(p))
[ "def", "sendChatAction", "(", "self", ",", "chat_id", ",", "action", ")", ":", "p", "=", "_strip", "(", "locals", "(", ")", ")", "return", "self", ".", "_api_request", "(", "'sendChatAction'", ",", "_rectify", "(", "p", ")", ")" ]
See: https://core.telegram.org/bots/api#sendchataction
[ "See", ":", "https", ":", "//", "core", ".", "telegram", ".", "org", "/", "bots", "/", "api#sendchataction" ]
python
train
50.75
nutechsoftware/alarmdecoder
alarmdecoder/decoder.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L356-L377
def fault_zone(self, zone, simulate_wire_problem=False): """ Faults a zone if we are emulating a zone expander. :param zone: zone to fault :type zone: int :param simulate_wire_problem: Whether or not to simulate a wire fault :type simulate_wire_problem: bool """ # Allow ourselves to also be passed an address/channel combination # for zone expanders. # # Format (expander index, channel) if isinstance(zone, tuple): expander_idx, channel = zone zone = self._zonetracker.expander_to_zone(expander_idx, channel) status = 2 if simulate_wire_problem else 1 self.send("L{0:02}{1}\r".format(zone, status))
[ "def", "fault_zone", "(", "self", ",", "zone", ",", "simulate_wire_problem", "=", "False", ")", ":", "# Allow ourselves to also be passed an address/channel combination", "# for zone expanders.", "#", "# Format (expander index, channel)", "if", "isinstance", "(", "zone", ",", "tuple", ")", ":", "expander_idx", ",", "channel", "=", "zone", "zone", "=", "self", ".", "_zonetracker", ".", "expander_to_zone", "(", "expander_idx", ",", "channel", ")", "status", "=", "2", "if", "simulate_wire_problem", "else", "1", "self", ".", "send", "(", "\"L{0:02}{1}\\r\"", ".", "format", "(", "zone", ",", "status", ")", ")" ]
Faults a zone if we are emulating a zone expander. :param zone: zone to fault :type zone: int :param simulate_wire_problem: Whether or not to simulate a wire fault :type simulate_wire_problem: bool
[ "Faults", "a", "zone", "if", "we", "are", "emulating", "a", "zone", "expander", "." ]
python
train
32.727273
aetros/aetros-cli
aetros/backend.py
https://github.com/aetros/aetros-cli/blob/a2a1f38d6af1660e1e2680c7d413ec2aef45faab/aetros/backend.py#L1311-L1327
def load_job_from_ref(self): """ Loads the job.json into self.job """ if not self.job_id: raise Exception('Job not loaded yet. Use load(id) first.') if not os.path.exists(self.git.work_tree + '/aetros/job.json'): raise Exception('Could not load aetros/job.json from git repository. Make sure you have created the job correctly.') with open(self.git.work_tree + '/aetros/job.json') as f: self.job = simplejson.loads(f.read(), object_pairs_hook=collections.OrderedDict) if not self.job: raise Exception('Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.') self.logger.debug('job: ' + str(self.job))
[ "def", "load_job_from_ref", "(", "self", ")", ":", "if", "not", "self", ".", "job_id", ":", "raise", "Exception", "(", "'Job not loaded yet. Use load(id) first.'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "git", ".", "work_tree", "+", "'/aetros/job.json'", ")", ":", "raise", "Exception", "(", "'Could not load aetros/job.json from git repository. Make sure you have created the job correctly.'", ")", "with", "open", "(", "self", ".", "git", ".", "work_tree", "+", "'/aetros/job.json'", ")", "as", "f", ":", "self", ".", "job", "=", "simplejson", ".", "loads", "(", "f", ".", "read", "(", ")", ",", "object_pairs_hook", "=", "collections", ".", "OrderedDict", ")", "if", "not", "self", ".", "job", ":", "raise", "Exception", "(", "'Could not parse aetros/job.json from git repository. Make sure you have created the job correctly.'", ")", "self", ".", "logger", ".", "debug", "(", "'job: '", "+", "str", "(", "self", ".", "job", ")", ")" ]
Loads the job.json into self.job
[ "Loads", "the", "job", ".", "json", "into", "self", ".", "job" ]
python
train
43.823529
paylogic/pip-accel
pip_accel/__init__.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/__init__.py#L414-L506
def get_pip_requirement_set(self, arguments, use_remote_index, use_wheels=False): """ Get the unpacked requirement(s) specified by the caller by running pip. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_remote_index: A boolean indicating whether pip is allowed to connect to the main package index (http://pypi.python.org by default). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :returns: A :class:`pip.req.RequirementSet` object created by pip. :raises: Any exceptions raised by pip. """ # Compose the pip command line arguments. This is where a lot of the # core logic of pip-accel is hidden and it uses some esoteric features # of pip so this method is heavily commented. command_line = [] # Use `--download' to instruct pip to download requirement(s) into # pip-accel's local source distribution index directory. This has the # following documented side effects (see `pip install --help'): # 1. It disables the installation of requirements (without using the # `--no-install' option which is deprecated and slated for removal # in pip 7.x). # 2. It ignores requirements that are already installed (because # pip-accel doesn't actually need to re-install requirements that # are already installed we will have work around this later, but # that seems fairly simple to do). command_line.append('--download=%s' % self.config.source_index) # Use `--find-links' to point pip at pip-accel's local source # distribution index directory. This ensures that source distribution # archives are never downloaded more than once (regardless of the HTTP # cache that was introduced in pip 6.x). command_line.append('--find-links=%s' % create_file_url(self.config.source_index)) # Use `--no-binary=:all:' to ignore wheel distributions by default in # order to preserve backwards compatibility with callers that expect a # requirement set consisting only of source distributions that can be # converted to `dumb binary distributions'. if not use_wheels and self.arguments_allow_wheels(arguments): command_line.append('--no-binary=:all:') # Use `--no-index' to force pip to only consider source distribution # archives contained in pip-accel's local source distribution index # directory. This enables pip-accel to ask pip "Can the local source # distribution index satisfy all requirements in the given requirement # set?" which enables pip-accel to keep pip off the internet unless # absolutely necessary :-). if not use_remote_index: command_line.append('--no-index') # Use `--no-clean' to instruct pip to unpack the source distribution # archives and *not* clean up the unpacked source distributions # afterwards. This enables pip-accel to replace pip's installation # logic with cached binary distribution archives. command_line.append('--no-clean') # Use `--build-directory' to instruct pip to unpack the source # distribution archives to a temporary directory managed by pip-accel. # We will clean up the build directory when we're done using the # unpacked source distributions. command_line.append('--build-directory=%s' % self.build_directory) # Append the user's `pip install ...' arguments to the command line # that we just assembled. command_line.extend(arguments) logger.info("Executing command: pip install %s", ' '.join(command_line)) # Clear the build directory to prevent PreviousBuildDirError exceptions. self.clear_build_directory() # During the pip 6.x upgrade pip-accel switched to using `pip install # --download' which can produce an interactive prompt as described in # issue 51 [1]. The documented way [2] to get rid of this interactive # prompt is pip's --exists-action option, but due to what is most # likely a bug in pip this doesn't actually work. The environment # variable $PIP_EXISTS_ACTION does work however, so if the user didn't # set it we will set a reasonable default for them. # [1] https://github.com/paylogic/pip-accel/issues/51 # [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option os.environ.setdefault('PIP_EXISTS_ACTION', 'w') # Initialize and run the `pip install' command. command = InstallCommand() opts, args = command.parse_args(command_line) if not opts.ignore_installed: # If the user didn't supply the -I, --ignore-installed option we # will forcefully disable the option. Refer to the documentation of # the AttributeOverrides class for further details. opts = AttributeOverrides(opts, ignore_installed=False) requirement_set = command.run(opts, args) # Make sure the output of pip and pip-accel are not intermingled. sys.stdout.flush() if requirement_set is None: raise NothingToDoError(""" pip didn't generate a requirement set, most likely you specified an empty requirements file? """) else: return self.transform_pip_requirement_set(requirement_set)
[ "def", "get_pip_requirement_set", "(", "self", ",", "arguments", ",", "use_remote_index", ",", "use_wheels", "=", "False", ")", ":", "# Compose the pip command line arguments. This is where a lot of the", "# core logic of pip-accel is hidden and it uses some esoteric features", "# of pip so this method is heavily commented.", "command_line", "=", "[", "]", "# Use `--download' to instruct pip to download requirement(s) into", "# pip-accel's local source distribution index directory. This has the", "# following documented side effects (see `pip install --help'):", "# 1. It disables the installation of requirements (without using the", "# `--no-install' option which is deprecated and slated for removal", "# in pip 7.x).", "# 2. It ignores requirements that are already installed (because", "# pip-accel doesn't actually need to re-install requirements that", "# are already installed we will have work around this later, but", "# that seems fairly simple to do).", "command_line", ".", "append", "(", "'--download=%s'", "%", "self", ".", "config", ".", "source_index", ")", "# Use `--find-links' to point pip at pip-accel's local source", "# distribution index directory. This ensures that source distribution", "# archives are never downloaded more than once (regardless of the HTTP", "# cache that was introduced in pip 6.x).", "command_line", ".", "append", "(", "'--find-links=%s'", "%", "create_file_url", "(", "self", ".", "config", ".", "source_index", ")", ")", "# Use `--no-binary=:all:' to ignore wheel distributions by default in", "# order to preserve backwards compatibility with callers that expect a", "# requirement set consisting only of source distributions that can be", "# converted to `dumb binary distributions'.", "if", "not", "use_wheels", "and", "self", ".", "arguments_allow_wheels", "(", "arguments", ")", ":", "command_line", ".", "append", "(", "'--no-binary=:all:'", ")", "# Use `--no-index' to force pip to only consider source distribution", "# archives contained in pip-accel's local source distribution index", "# directory. This enables pip-accel to ask pip \"Can the local source", "# distribution index satisfy all requirements in the given requirement", "# set?\" which enables pip-accel to keep pip off the internet unless", "# absolutely necessary :-).", "if", "not", "use_remote_index", ":", "command_line", ".", "append", "(", "'--no-index'", ")", "# Use `--no-clean' to instruct pip to unpack the source distribution", "# archives and *not* clean up the unpacked source distributions", "# afterwards. This enables pip-accel to replace pip's installation", "# logic with cached binary distribution archives.", "command_line", ".", "append", "(", "'--no-clean'", ")", "# Use `--build-directory' to instruct pip to unpack the source", "# distribution archives to a temporary directory managed by pip-accel.", "# We will clean up the build directory when we're done using the", "# unpacked source distributions.", "command_line", ".", "append", "(", "'--build-directory=%s'", "%", "self", ".", "build_directory", ")", "# Append the user's `pip install ...' arguments to the command line", "# that we just assembled.", "command_line", ".", "extend", "(", "arguments", ")", "logger", ".", "info", "(", "\"Executing command: pip install %s\"", ",", "' '", ".", "join", "(", "command_line", ")", ")", "# Clear the build directory to prevent PreviousBuildDirError exceptions.", "self", ".", "clear_build_directory", "(", ")", "# During the pip 6.x upgrade pip-accel switched to using `pip install", "# --download' which can produce an interactive prompt as described in", "# issue 51 [1]. The documented way [2] to get rid of this interactive", "# prompt is pip's --exists-action option, but due to what is most", "# likely a bug in pip this doesn't actually work. The environment", "# variable $PIP_EXISTS_ACTION does work however, so if the user didn't", "# set it we will set a reasonable default for them.", "# [1] https://github.com/paylogic/pip-accel/issues/51", "# [2] https://pip.pypa.io/en/latest/reference/pip.html#exists-action-option", "os", ".", "environ", ".", "setdefault", "(", "'PIP_EXISTS_ACTION'", ",", "'w'", ")", "# Initialize and run the `pip install' command.", "command", "=", "InstallCommand", "(", ")", "opts", ",", "args", "=", "command", ".", "parse_args", "(", "command_line", ")", "if", "not", "opts", ".", "ignore_installed", ":", "# If the user didn't supply the -I, --ignore-installed option we", "# will forcefully disable the option. Refer to the documentation of", "# the AttributeOverrides class for further details.", "opts", "=", "AttributeOverrides", "(", "opts", ",", "ignore_installed", "=", "False", ")", "requirement_set", "=", "command", ".", "run", "(", "opts", ",", "args", ")", "# Make sure the output of pip and pip-accel are not intermingled.", "sys", ".", "stdout", ".", "flush", "(", ")", "if", "requirement_set", "is", "None", ":", "raise", "NothingToDoError", "(", "\"\"\"\n pip didn't generate a requirement set, most likely you\n specified an empty requirements file?\n \"\"\"", ")", "else", ":", "return", "self", ".", "transform_pip_requirement_set", "(", "requirement_set", ")" ]
Get the unpacked requirement(s) specified by the caller by running pip. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_remote_index: A boolean indicating whether pip is allowed to connect to the main package index (http://pypi.python.org by default). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :returns: A :class:`pip.req.RequirementSet` object created by pip. :raises: Any exceptions raised by pip.
[ "Get", "the", "unpacked", "requirement", "(", "s", ")", "specified", "by", "the", "caller", "by", "running", "pip", "." ]
python
train
61.537634
ClericPy/torequests
torequests/dummy.py
https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/dummy.py#L287-L290
def done_tasks(self): """Return tasks in loop which its state is not pending.""" tasks = [task for task in self.all_tasks if task._state != NewTask._PENDING] return tasks
[ "def", "done_tasks", "(", "self", ")", ":", "tasks", "=", "[", "task", "for", "task", "in", "self", ".", "all_tasks", "if", "task", ".", "_state", "!=", "NewTask", ".", "_PENDING", "]", "return", "tasks" ]
Return tasks in loop which its state is not pending.
[ "Return", "tasks", "in", "loop", "which", "its", "state", "is", "not", "pending", "." ]
python
train
47.75
Jajcus/pyxmpp2
pyxmpp2/xmppstringprep.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/xmppstringprep.py#L173-L180
def prohibit(self, data): """Checks for prohibited characters.""" for char in data: for lookup in self.prohibited: if lookup(char): raise StringprepError("Prohibited character: {0!r}" .format(char)) return data
[ "def", "prohibit", "(", "self", ",", "data", ")", ":", "for", "char", "in", "data", ":", "for", "lookup", "in", "self", ".", "prohibited", ":", "if", "lookup", "(", "char", ")", ":", "raise", "StringprepError", "(", "\"Prohibited character: {0!r}\"", ".", "format", "(", "char", ")", ")", "return", "data" ]
Checks for prohibited characters.
[ "Checks", "for", "prohibited", "characters", "." ]
python
valid
42.375
google/grr
grr/server/grr_response_server/hunts/implementation.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/hunts/implementation.py#L465-L508
def _CallFlowRelational(self, flow_name=None, args=None, runner_args=None, client_id=None, **kwargs): """Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. args: Flow arguments. runner_args: Flow runner arguments. client_id: If given, the flow is started for this client. **kwargs: Arguments for the child flow. Returns: The URN of the child flow which was created. Raises: RuntimeError: In case of no cpu quota left to start more clients. """ if isinstance(client_id, rdfvalue.RDFURN): client_id = client_id.Basename() if flow_name is None and runner_args is not None: flow_name = runner_args.flow_name flow_cls = registry.FlowRegistry.FlowClassByName(flow_name) flow_id = flow.StartFlow( client_id=client_id, creator=self.hunt_obj.creator, cpu_limit=self._GetSubFlowCPULimit(), network_bytes_limit=self._GetSubFlowNetworkLimit(), flow_cls=flow_cls, flow_args=args, parent_hunt_id=self.hunt_obj.urn.Basename(), **kwargs) return rdfvalue.RDFURN(client_id).Add("flows").Add(flow_id)
[ "def", "_CallFlowRelational", "(", "self", ",", "flow_name", "=", "None", ",", "args", "=", "None", ",", "runner_args", "=", "None", ",", "client_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "client_id", ",", "rdfvalue", ".", "RDFURN", ")", ":", "client_id", "=", "client_id", ".", "Basename", "(", ")", "if", "flow_name", "is", "None", "and", "runner_args", "is", "not", "None", ":", "flow_name", "=", "runner_args", ".", "flow_name", "flow_cls", "=", "registry", ".", "FlowRegistry", ".", "FlowClassByName", "(", "flow_name", ")", "flow_id", "=", "flow", ".", "StartFlow", "(", "client_id", "=", "client_id", ",", "creator", "=", "self", ".", "hunt_obj", ".", "creator", ",", "cpu_limit", "=", "self", ".", "_GetSubFlowCPULimit", "(", ")", ",", "network_bytes_limit", "=", "self", ".", "_GetSubFlowNetworkLimit", "(", ")", ",", "flow_cls", "=", "flow_cls", ",", "flow_args", "=", "args", ",", "parent_hunt_id", "=", "self", ".", "hunt_obj", ".", "urn", ".", "Basename", "(", ")", ",", "*", "*", "kwargs", ")", "return", "rdfvalue", ".", "RDFURN", "(", "client_id", ")", ".", "Add", "(", "\"flows\"", ")", ".", "Add", "(", "flow_id", ")" ]
Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. args: Flow arguments. runner_args: Flow runner arguments. client_id: If given, the flow is started for this client. **kwargs: Arguments for the child flow. Returns: The URN of the child flow which was created. Raises: RuntimeError: In case of no cpu quota left to start more clients.
[ "Creates", "a", "new", "flow", "and", "send", "its", "responses", "to", "a", "state", "." ]
python
train
34.818182
SBRG/ssbio
ssbio/protein/sequence/properties/cctop.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/properties/cctop.py#L39-L58
def cctop_save_xml(jobid, outpath): """Save the CCTOP results file in XML format. Args: jobid (str): Job ID obtained when job was submitted outpath (str): Path to output filename Returns: str: Path to output filename """ status = cctop_check_status(jobid=jobid) if status == 'Finished': result = 'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'.format(jobid) result_text = requests.post(result) with open(outpath, 'w') as f: f.write(result_text.text) return outpath else: raise ConnectionRefusedError('CCTOP job incomplete, status is "{}"'.format(status))
[ "def", "cctop_save_xml", "(", "jobid", ",", "outpath", ")", ":", "status", "=", "cctop_check_status", "(", "jobid", "=", "jobid", ")", "if", "status", "==", "'Finished'", ":", "result", "=", "'http://cctop.enzim.ttk.mta.hu/php/result.php?jobId={}'", ".", "format", "(", "jobid", ")", "result_text", "=", "requests", ".", "post", "(", "result", ")", "with", "open", "(", "outpath", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "result_text", ".", "text", ")", "return", "outpath", "else", ":", "raise", "ConnectionRefusedError", "(", "'CCTOP job incomplete, status is \"{}\"'", ".", "format", "(", "status", ")", ")" ]
Save the CCTOP results file in XML format. Args: jobid (str): Job ID obtained when job was submitted outpath (str): Path to output filename Returns: str: Path to output filename
[ "Save", "the", "CCTOP", "results", "file", "in", "XML", "format", "." ]
python
train
32.5
andrewramsay/sk8-drivers
pysk8/pysk8/util.py
https://github.com/andrewramsay/sk8-drivers/blob/67347a71762fb421f5ae65a595def5c7879e8b0c/pysk8/pysk8/util.py#L14-L32
def fmt_addr_raw(addr, reverse=True): """Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence. Args: addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format. reverse (bool): True if the byte ordering should be reversed in the output. Returns: A bytearray containing the converted address. """ addr = addr.replace(':', '') raw_addr = [int(addr[i:i+2], 16) for i in range(0, len(addr), 2)] if reverse: raw_addr.reverse() # for Python 2, this needs to be a string instead of a bytearray if sys.version_info[0] == 2: return str(bytearray(raw_addr)) return bytearray(raw_addr)
[ "def", "fmt_addr_raw", "(", "addr", ",", "reverse", "=", "True", ")", ":", "addr", "=", "addr", ".", "replace", "(", "':'", ",", "''", ")", "raw_addr", "=", "[", "int", "(", "addr", "[", "i", ":", "i", "+", "2", "]", ",", "16", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "addr", ")", ",", "2", ")", "]", "if", "reverse", ":", "raw_addr", ".", "reverse", "(", ")", "# for Python 2, this needs to be a string instead of a bytearray", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "return", "str", "(", "bytearray", "(", "raw_addr", ")", ")", "return", "bytearray", "(", "raw_addr", ")" ]
Given a string containing a xx:xx:xx:xx:xx:xx address, return as a byte sequence. Args: addr (str): Bluetooth address in xx:xx:xx:xx:xx:xx format. reverse (bool): True if the byte ordering should be reversed in the output. Returns: A bytearray containing the converted address.
[ "Given", "a", "string", "containing", "a", "xx", ":", "xx", ":", "xx", ":", "xx", ":", "xx", ":", "xx", "address", "return", "as", "a", "byte", "sequence", "." ]
python
train
35.052632
spotify/luigi
luigi/contrib/hive.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hive.py#L335-L352
def prepare_outputs(self, job): """ Called before job is started. If output is a `FileSystemTarget`, create parent directories so the hive command won't fail """ outputs = flatten(job.output()) for o in outputs: if isinstance(o, FileSystemTarget): parent_dir = os.path.dirname(o.path) if parent_dir and not o.fs.exists(parent_dir): logger.info("Creating parent directory %r", parent_dir) try: # there is a possible race condition # which needs to be handled here o.fs.mkdir(parent_dir) except FileAlreadyExists: pass
[ "def", "prepare_outputs", "(", "self", ",", "job", ")", ":", "outputs", "=", "flatten", "(", "job", ".", "output", "(", ")", ")", "for", "o", "in", "outputs", ":", "if", "isinstance", "(", "o", ",", "FileSystemTarget", ")", ":", "parent_dir", "=", "os", ".", "path", ".", "dirname", "(", "o", ".", "path", ")", "if", "parent_dir", "and", "not", "o", ".", "fs", ".", "exists", "(", "parent_dir", ")", ":", "logger", ".", "info", "(", "\"Creating parent directory %r\"", ",", "parent_dir", ")", "try", ":", "# there is a possible race condition", "# which needs to be handled here", "o", ".", "fs", ".", "mkdir", "(", "parent_dir", ")", "except", "FileAlreadyExists", ":", "pass" ]
Called before job is started. If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
[ "Called", "before", "job", "is", "started", "." ]
python
train
41.555556
openstack/horizon
openstack_dashboard/dashboards/project/instances/utils.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/dashboards/project/instances/utils.py#L73-L80
def availability_zone_list(request): """Utility method to retrieve a list of availability zones.""" try: return api.nova.availability_zone_list(request) except Exception: exceptions.handle(request, _('Unable to retrieve Nova availability zones.')) return []
[ "def", "availability_zone_list", "(", "request", ")", ":", "try", ":", "return", "api", ".", "nova", ".", "availability_zone_list", "(", "request", ")", "except", "Exception", ":", "exceptions", ".", "handle", "(", "request", ",", "_", "(", "'Unable to retrieve Nova availability zones.'", ")", ")", "return", "[", "]" ]
Utility method to retrieve a list of availability zones.
[ "Utility", "method", "to", "retrieve", "a", "list", "of", "availability", "zones", "." ]
python
train
39
trailofbits/manticore
manticore/platforms/linux.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L1672-L1674
def sys_mmap_pgoff(self, address, size, prot, flags, fd, offset): """Wrapper for mmap2""" return self.sys_mmap2(address, size, prot, flags, fd, offset)
[ "def", "sys_mmap_pgoff", "(", "self", ",", "address", ",", "size", ",", "prot", ",", "flags", ",", "fd", ",", "offset", ")", ":", "return", "self", ".", "sys_mmap2", "(", "address", ",", "size", ",", "prot", ",", "flags", ",", "fd", ",", "offset", ")" ]
Wrapper for mmap2
[ "Wrapper", "for", "mmap2" ]
python
valid
55
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/zmq/__init__.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/zmq/__init__.py#L16-L46
def patch_pyzmq(): """backport a few patches from newer pyzmq These can be removed as we bump our minimum pyzmq version """ import zmq # ioloop.install, introduced in pyzmq 2.1.7 from zmq.eventloop import ioloop def install(): import tornado.ioloop tornado.ioloop.IOLoop = ioloop.IOLoop if not hasattr(ioloop, 'install'): ioloop.install = install # fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9 if not hasattr(zmq, 'DEALER'): zmq.DEALER = zmq.XREQ if not hasattr(zmq, 'ROUTER'): zmq.ROUTER = zmq.XREP # fallback on stdlib json if jsonlib is selected, because jsonlib breaks things. # jsonlib support is removed from pyzmq >= 2.2.0 from zmq.utils import jsonapi if jsonapi.jsonmod.__name__ == 'jsonlib': import json jsonapi.jsonmod = json
[ "def", "patch_pyzmq", "(", ")", ":", "import", "zmq", "# ioloop.install, introduced in pyzmq 2.1.7", "from", "zmq", ".", "eventloop", "import", "ioloop", "def", "install", "(", ")", ":", "import", "tornado", ".", "ioloop", "tornado", ".", "ioloop", ".", "IOLoop", "=", "ioloop", ".", "IOLoop", "if", "not", "hasattr", "(", "ioloop", ",", "'install'", ")", ":", "ioloop", ".", "install", "=", "install", "# fix missing DEALER/ROUTER aliases in pyzmq < 2.1.9", "if", "not", "hasattr", "(", "zmq", ",", "'DEALER'", ")", ":", "zmq", ".", "DEALER", "=", "zmq", ".", "XREQ", "if", "not", "hasattr", "(", "zmq", ",", "'ROUTER'", ")", ":", "zmq", ".", "ROUTER", "=", "zmq", ".", "XREP", "# fallback on stdlib json if jsonlib is selected, because jsonlib breaks things.", "# jsonlib support is removed from pyzmq >= 2.2.0", "from", "zmq", ".", "utils", "import", "jsonapi", "if", "jsonapi", ".", "jsonmod", ".", "__name__", "==", "'jsonlib'", ":", "import", "json", "jsonapi", ".", "jsonmod", "=", "json" ]
backport a few patches from newer pyzmq These can be removed as we bump our minimum pyzmq version
[ "backport", "a", "few", "patches", "from", "newer", "pyzmq", "These", "can", "be", "removed", "as", "we", "bump", "our", "minimum", "pyzmq", "version" ]
python
test
27.870968
candango/firenado
firenado/config.py
https://github.com/candango/firenado/blob/4b1f628e485b521e161d64169c46a9818f26949f/firenado/config.py#L172-L223
def process_app_config_section(config, app_config): """ Processes the app section from a configuration data dict. :param config: The config reference of the object that will hold the configuration data from the config_data. :param app_config: App section from a config data dict. """ if 'addresses' in app_config: config.app['addresses'] = app_config['addresses'] if 'component' in app_config: config.app['component'] = app_config['component'] if 'data' in app_config: if 'sources' in app_config['data']: config.app['data']['sources'] = app_config['data']['sources'] if 'id' in app_config: config.app['id'] = app_config['id'] if 'login' in app_config: if 'urls' in app_config['login']: for url in app_config['login']['urls']: config.app['login']['urls'][url['name']] = url['value'] if 'pythonpath' in app_config: config.app['pythonpath'] = app_config['pythonpath'] if 'port' in app_config: config.app['port'] = app_config['port'] if 'process' in app_config: if 'num_processes' in app_config['process']: config.app['process']['num_processes'] = app_config[ 'process']['num_processes'] if 'url_root_path' in app_config: root_url = app_config['url_root_path'].strip() if root_url[0] == "/": root_url = root_url[1:] if root_url == "": root_url = None config.app['url_root_path'] = root_url if 'settings' in app_config: config.app['settings'] = app_config['settings'] if 'socket' in app_config: config.app['socket'] = app_config['socket'] if 'static_path' in app_config: config.app['static_path'] = app_config['static_path'] if 'static_url_prefix' in app_config: config.app['static_url_prefix'] = app_config['static_url_prefix'] if 'type' in app_config: config.app['type'] = app_config['type'] if 'types' in app_config: for app_type in app_config['types']: app_type['launcher'] = get_config_from_package( app_type['launcher']) config.app['types'][app_type['name']] = app_type if 'wait_before_shutdown' in app_config: config.app['wait_before_shutdown'] = app_config['wait_before_shutdown']
[ "def", "process_app_config_section", "(", "config", ",", "app_config", ")", ":", "if", "'addresses'", "in", "app_config", ":", "config", ".", "app", "[", "'addresses'", "]", "=", "app_config", "[", "'addresses'", "]", "if", "'component'", "in", "app_config", ":", "config", ".", "app", "[", "'component'", "]", "=", "app_config", "[", "'component'", "]", "if", "'data'", "in", "app_config", ":", "if", "'sources'", "in", "app_config", "[", "'data'", "]", ":", "config", ".", "app", "[", "'data'", "]", "[", "'sources'", "]", "=", "app_config", "[", "'data'", "]", "[", "'sources'", "]", "if", "'id'", "in", "app_config", ":", "config", ".", "app", "[", "'id'", "]", "=", "app_config", "[", "'id'", "]", "if", "'login'", "in", "app_config", ":", "if", "'urls'", "in", "app_config", "[", "'login'", "]", ":", "for", "url", "in", "app_config", "[", "'login'", "]", "[", "'urls'", "]", ":", "config", ".", "app", "[", "'login'", "]", "[", "'urls'", "]", "[", "url", "[", "'name'", "]", "]", "=", "url", "[", "'value'", "]", "if", "'pythonpath'", "in", "app_config", ":", "config", ".", "app", "[", "'pythonpath'", "]", "=", "app_config", "[", "'pythonpath'", "]", "if", "'port'", "in", "app_config", ":", "config", ".", "app", "[", "'port'", "]", "=", "app_config", "[", "'port'", "]", "if", "'process'", "in", "app_config", ":", "if", "'num_processes'", "in", "app_config", "[", "'process'", "]", ":", "config", ".", "app", "[", "'process'", "]", "[", "'num_processes'", "]", "=", "app_config", "[", "'process'", "]", "[", "'num_processes'", "]", "if", "'url_root_path'", "in", "app_config", ":", "root_url", "=", "app_config", "[", "'url_root_path'", "]", ".", "strip", "(", ")", "if", "root_url", "[", "0", "]", "==", "\"/\"", ":", "root_url", "=", "root_url", "[", "1", ":", "]", "if", "root_url", "==", "\"\"", ":", "root_url", "=", "None", "config", ".", "app", "[", "'url_root_path'", "]", "=", "root_url", "if", "'settings'", "in", "app_config", ":", "config", ".", "app", "[", "'settings'", "]", "=", "app_config", "[", "'settings'", "]", "if", "'socket'", "in", "app_config", ":", "config", ".", "app", "[", "'socket'", "]", "=", "app_config", "[", "'socket'", "]", "if", "'static_path'", "in", "app_config", ":", "config", ".", "app", "[", "'static_path'", "]", "=", "app_config", "[", "'static_path'", "]", "if", "'static_url_prefix'", "in", "app_config", ":", "config", ".", "app", "[", "'static_url_prefix'", "]", "=", "app_config", "[", "'static_url_prefix'", "]", "if", "'type'", "in", "app_config", ":", "config", ".", "app", "[", "'type'", "]", "=", "app_config", "[", "'type'", "]", "if", "'types'", "in", "app_config", ":", "for", "app_type", "in", "app_config", "[", "'types'", "]", ":", "app_type", "[", "'launcher'", "]", "=", "get_config_from_package", "(", "app_type", "[", "'launcher'", "]", ")", "config", ".", "app", "[", "'types'", "]", "[", "app_type", "[", "'name'", "]", "]", "=", "app_type", "if", "'wait_before_shutdown'", "in", "app_config", ":", "config", ".", "app", "[", "'wait_before_shutdown'", "]", "=", "app_config", "[", "'wait_before_shutdown'", "]" ]
Processes the app section from a configuration data dict. :param config: The config reference of the object that will hold the configuration data from the config_data. :param app_config: App section from a config data dict.
[ "Processes", "the", "app", "section", "from", "a", "configuration", "data", "dict", "." ]
python
train
44.25
gem/oq-engine
openquake/hazardlib/site.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/site.py#L345-L370
def filter(self, mask): """ Create a SiteCollection with only a subset of sites. :param mask: Numpy array of boolean values of the same length as the site collection. ``True`` values should indicate that site with that index should be included into the filtered collection. :returns: A new :class:`SiteCollection` instance, unless all the values in ``mask`` are ``True``, in which case this site collection is returned, or if all the values in ``mask`` are ``False``, in which case method returns ``None``. New collection has data of only those sites that were marked for inclusion in the mask. """ assert len(mask) == len(self), (len(mask), len(self)) if mask.all(): # all sites satisfy the filter, return # this collection unchanged return self if not mask.any(): # no sites pass the filter, return None return None # extract indices of Trues from the mask indices, = mask.nonzero() return self.filtered(indices)
[ "def", "filter", "(", "self", ",", "mask", ")", ":", "assert", "len", "(", "mask", ")", "==", "len", "(", "self", ")", ",", "(", "len", "(", "mask", ")", ",", "len", "(", "self", ")", ")", "if", "mask", ".", "all", "(", ")", ":", "# all sites satisfy the filter, return", "# this collection unchanged", "return", "self", "if", "not", "mask", ".", "any", "(", ")", ":", "# no sites pass the filter, return None", "return", "None", "# extract indices of Trues from the mask", "indices", ",", "=", "mask", ".", "nonzero", "(", ")", "return", "self", ".", "filtered", "(", "indices", ")" ]
Create a SiteCollection with only a subset of sites. :param mask: Numpy array of boolean values of the same length as the site collection. ``True`` values should indicate that site with that index should be included into the filtered collection. :returns: A new :class:`SiteCollection` instance, unless all the values in ``mask`` are ``True``, in which case this site collection is returned, or if all the values in ``mask`` are ``False``, in which case method returns ``None``. New collection has data of only those sites that were marked for inclusion in the mask.
[ "Create", "a", "SiteCollection", "with", "only", "a", "subset", "of", "sites", "." ]
python
train
43.615385
googledatalab/pydatalab
google/datalab/ml/_cloud_models.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/ml/_cloud_models.py#L224-L234
def delete(self, version_name): """Delete a version of model. Args: version_name: the name of the version in short form, such as "v1". """ name = ('%s/versions/%s' % (self._full_model_name, version_name)) response = self._api.projects().models().versions().delete(name=name).execute() if 'name' not in response: raise Exception('Invalid response from service. "name" is not found.') _util.wait_for_long_running_operation(response['name'])
[ "def", "delete", "(", "self", ",", "version_name", ")", ":", "name", "=", "(", "'%s/versions/%s'", "%", "(", "self", ".", "_full_model_name", ",", "version_name", ")", ")", "response", "=", "self", ".", "_api", ".", "projects", "(", ")", ".", "models", "(", ")", ".", "versions", "(", ")", ".", "delete", "(", "name", "=", "name", ")", ".", "execute", "(", ")", "if", "'name'", "not", "in", "response", ":", "raise", "Exception", "(", "'Invalid response from service. \"name\" is not found.'", ")", "_util", ".", "wait_for_long_running_operation", "(", "response", "[", "'name'", "]", ")" ]
Delete a version of model. Args: version_name: the name of the version in short form, such as "v1".
[ "Delete", "a", "version", "of", "model", "." ]
python
train
42.636364
OCR-D/core
ocrd_models/ocrd_models/ocrd_page_generateds.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd_models/ocrd_models/ocrd_page_generateds.py#L9470-L9493
def parseString(inString, silence=False): '''Parse a string, create the object tree, and export it. Arguments: - inString -- A string. This XML fragment should not start with an XML declaration containing an encoding. - silence -- A boolean. If False, export the object. Returns -- The root object in the tree. ''' parser = None rootNode= parsexmlstring_(inString, parser) rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'PcGts' rootClass = PcGts rootObj = rootClass.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='xmlns:pc="http://schema.primaresearch.org/PAGE/gts/pagecontent/2018-07-15"') return rootObj
[ "def", "parseString", "(", "inString", ",", "silence", "=", "False", ")", ":", "parser", "=", "None", "rootNode", "=", "parsexmlstring_", "(", "inString", ",", "parser", ")", "rootTag", ",", "rootClass", "=", "get_root_tag", "(", "rootNode", ")", "if", "rootClass", "is", "None", ":", "rootTag", "=", "'PcGts'", "rootClass", "=", "PcGts", "rootObj", "=", "rootClass", ".", "factory", "(", ")", "rootObj", ".", "build", "(", "rootNode", ")", "# Enable Python to collect the space used by the DOM.", "if", "not", "silence", ":", "sys", ".", "stdout", ".", "write", "(", "'<?xml version=\"1.0\" ?>\\n'", ")", "rootObj", ".", "export", "(", "sys", ".", "stdout", ",", "0", ",", "name_", "=", "rootTag", ",", "namespacedef_", "=", "'xmlns:pc=\"http://schema.primaresearch.org/PAGE/gts/pagecontent/2018-07-15\"'", ")", "return", "rootObj" ]
Parse a string, create the object tree, and export it. Arguments: - inString -- A string. This XML fragment should not start with an XML declaration containing an encoding. - silence -- A boolean. If False, export the object. Returns -- The root object in the tree.
[ "Parse", "a", "string", "create", "the", "object", "tree", "and", "export", "it", "." ]
python
train
37.416667
saltstack/salt
salt/modules/dig.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dig.py#L171-L223
def SPF(domain, record='SPF', nameserver=None): ''' Return the allowed IPv4 ranges in the SPF record for ``domain``. If record is ``SPF`` and the SPF record is empty, the TXT record will be searched automatically. If you know the domain uses TXT and not SPF, specifying that will save a lookup. CLI Example: .. code-block:: bash salt ns1 dig.SPF google.com ''' spf_re = re.compile(r'(?:\+|~)?(ip[46]|include):(.+)') cmd = ['dig', '+short', six.text_type(domain), record] if nameserver is not None: cmd.append('@{0}'.format(nameserver)) result = __salt__['cmd.run_all'](cmd, python_shell=False) # In this case, 0 is not the same as False if result['retcode'] != 0: log.warning( 'dig returned exit code \'%s\'. Returning empty list as fallback.', result['retcode'] ) return [] if result['stdout'] == '' and record == 'SPF': # empty string is successful query, but nothing to return. So, try TXT # record. return SPF(domain, 'TXT', nameserver) sections = re.sub('"', '', result['stdout']).split() if not sections or sections[0] != 'v=spf1': return [] if sections[1].startswith('redirect='): # Run a lookup on the part after 'redirect=' (9 chars) return SPF(sections[1][9:], 'SPF', nameserver) ret = [] for section in sections[1:]: try: mechanism, address = spf_re.match(section).groups() except AttributeError: # Regex was not matched continue if mechanism == 'include': ret.extend(SPF(address, 'SPF', nameserver)) elif mechanism in ('ip4', 'ip6') and check_ip(address): ret.append(address) return ret
[ "def", "SPF", "(", "domain", ",", "record", "=", "'SPF'", ",", "nameserver", "=", "None", ")", ":", "spf_re", "=", "re", ".", "compile", "(", "r'(?:\\+|~)?(ip[46]|include):(.+)'", ")", "cmd", "=", "[", "'dig'", ",", "'+short'", ",", "six", ".", "text_type", "(", "domain", ")", ",", "record", "]", "if", "nameserver", "is", "not", "None", ":", "cmd", ".", "append", "(", "'@{0}'", ".", "format", "(", "nameserver", ")", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "# In this case, 0 is not the same as False", "if", "result", "[", "'retcode'", "]", "!=", "0", ":", "log", ".", "warning", "(", "'dig returned exit code \\'%s\\'. Returning empty list as fallback.'", ",", "result", "[", "'retcode'", "]", ")", "return", "[", "]", "if", "result", "[", "'stdout'", "]", "==", "''", "and", "record", "==", "'SPF'", ":", "# empty string is successful query, but nothing to return. So, try TXT", "# record.", "return", "SPF", "(", "domain", ",", "'TXT'", ",", "nameserver", ")", "sections", "=", "re", ".", "sub", "(", "'\"'", ",", "''", ",", "result", "[", "'stdout'", "]", ")", ".", "split", "(", ")", "if", "not", "sections", "or", "sections", "[", "0", "]", "!=", "'v=spf1'", ":", "return", "[", "]", "if", "sections", "[", "1", "]", ".", "startswith", "(", "'redirect='", ")", ":", "# Run a lookup on the part after 'redirect=' (9 chars)", "return", "SPF", "(", "sections", "[", "1", "]", "[", "9", ":", "]", ",", "'SPF'", ",", "nameserver", ")", "ret", "=", "[", "]", "for", "section", "in", "sections", "[", "1", ":", "]", ":", "try", ":", "mechanism", ",", "address", "=", "spf_re", ".", "match", "(", "section", ")", ".", "groups", "(", ")", "except", "AttributeError", ":", "# Regex was not matched", "continue", "if", "mechanism", "==", "'include'", ":", "ret", ".", "extend", "(", "SPF", "(", "address", ",", "'SPF'", ",", "nameserver", ")", ")", "elif", "mechanism", "in", "(", "'ip4'", ",", "'ip6'", ")", "and", "check_ip", "(", "address", ")", ":", "ret", ".", "append", "(", "address", ")", "return", "ret" ]
Return the allowed IPv4 ranges in the SPF record for ``domain``. If record is ``SPF`` and the SPF record is empty, the TXT record will be searched automatically. If you know the domain uses TXT and not SPF, specifying that will save a lookup. CLI Example: .. code-block:: bash salt ns1 dig.SPF google.com
[ "Return", "the", "allowed", "IPv4", "ranges", "in", "the", "SPF", "record", "for", "domain", "." ]
python
train
32.886792
openstax/cnx-epub
cnxepub/models.py
https://github.com/openstax/cnx-epub/blob/f648a309eff551b0a68a115a98ddf7858149a2ea/cnxepub/models.py#L108-L127
def model_to_tree(model, title=None, lucent_id=TRANSLUCENT_BINDER_ID): """Given an model, build the tree:: {'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]} """ id = model.ident_hash if id is None and isinstance(model, TranslucentBinder): id = lucent_id md = model.metadata shortid = md.get('shortId', md.get('cnx-archive-shortid')) title = title is not None and title or md.get('title') tree = {'id': id, 'title': title, 'shortId': shortid} if hasattr(model, '__iter__'): contents = tree['contents'] = [] for node in model: item = model_to_tree(node, model.get_title_for_node(node), lucent_id=lucent_id) contents.append(item) return tree
[ "def", "model_to_tree", "(", "model", ",", "title", "=", "None", ",", "lucent_id", "=", "TRANSLUCENT_BINDER_ID", ")", ":", "id", "=", "model", ".", "ident_hash", "if", "id", "is", "None", "and", "isinstance", "(", "model", ",", "TranslucentBinder", ")", ":", "id", "=", "lucent_id", "md", "=", "model", ".", "metadata", "shortid", "=", "md", ".", "get", "(", "'shortId'", ",", "md", ".", "get", "(", "'cnx-archive-shortid'", ")", ")", "title", "=", "title", "is", "not", "None", "and", "title", "or", "md", ".", "get", "(", "'title'", ")", "tree", "=", "{", "'id'", ":", "id", ",", "'title'", ":", "title", ",", "'shortId'", ":", "shortid", "}", "if", "hasattr", "(", "model", ",", "'__iter__'", ")", ":", "contents", "=", "tree", "[", "'contents'", "]", "=", "[", "]", "for", "node", "in", "model", ":", "item", "=", "model_to_tree", "(", "node", ",", "model", ".", "get_title_for_node", "(", "node", ")", ",", "lucent_id", "=", "lucent_id", ")", "contents", ".", "append", "(", "item", ")", "return", "tree" ]
Given an model, build the tree:: {'id': <id>|'subcol', 'title': <title>, 'contents': [<tree>, ...]}
[ "Given", "an", "model", "build", "the", "tree", "::" ]
python
train
38.35
python-diamond/Diamond
src/collectors/jcollectd/collectd_network.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/jcollectd/collectd_network.py#L83-L111
def decode_network_values(ptype, plen, buf): """Decodes a list of DS values in collectd network format """ nvalues = short.unpack_from(buf, header.size)[0] off = header.size + short.size + nvalues valskip = double.size # Check whether our expected packet size is the reported one assert ((valskip + 1) * nvalues + short.size + header.size) == plen assert double.size == number.size result = [] for dstype in [ord(x) for x in buf[header.size + short.size:off]]: if dstype == DS_TYPE_COUNTER: result.append((dstype, number.unpack_from(buf, off)[0])) off += valskip elif dstype == DS_TYPE_GAUGE: result.append((dstype, double.unpack_from(buf, off)[0])) off += valskip elif dstype == DS_TYPE_DERIVE: result.append((dstype, number.unpack_from(buf, off)[0])) off += valskip elif dstype == DS_TYPE_ABSOLUTE: result.append((dstype, number.unpack_from(buf, off)[0])) off += valskip else: raise ValueError("DS type %i unsupported" % dstype) return result
[ "def", "decode_network_values", "(", "ptype", ",", "plen", ",", "buf", ")", ":", "nvalues", "=", "short", ".", "unpack_from", "(", "buf", ",", "header", ".", "size", ")", "[", "0", "]", "off", "=", "header", ".", "size", "+", "short", ".", "size", "+", "nvalues", "valskip", "=", "double", ".", "size", "# Check whether our expected packet size is the reported one", "assert", "(", "(", "valskip", "+", "1", ")", "*", "nvalues", "+", "short", ".", "size", "+", "header", ".", "size", ")", "==", "plen", "assert", "double", ".", "size", "==", "number", ".", "size", "result", "=", "[", "]", "for", "dstype", "in", "[", "ord", "(", "x", ")", "for", "x", "in", "buf", "[", "header", ".", "size", "+", "short", ".", "size", ":", "off", "]", "]", ":", "if", "dstype", "==", "DS_TYPE_COUNTER", ":", "result", ".", "append", "(", "(", "dstype", ",", "number", ".", "unpack_from", "(", "buf", ",", "off", ")", "[", "0", "]", ")", ")", "off", "+=", "valskip", "elif", "dstype", "==", "DS_TYPE_GAUGE", ":", "result", ".", "append", "(", "(", "dstype", ",", "double", ".", "unpack_from", "(", "buf", ",", "off", ")", "[", "0", "]", ")", ")", "off", "+=", "valskip", "elif", "dstype", "==", "DS_TYPE_DERIVE", ":", "result", ".", "append", "(", "(", "dstype", ",", "number", ".", "unpack_from", "(", "buf", ",", "off", ")", "[", "0", "]", ")", ")", "off", "+=", "valskip", "elif", "dstype", "==", "DS_TYPE_ABSOLUTE", ":", "result", ".", "append", "(", "(", "dstype", ",", "number", ".", "unpack_from", "(", "buf", ",", "off", ")", "[", "0", "]", ")", ")", "off", "+=", "valskip", "else", ":", "raise", "ValueError", "(", "\"DS type %i unsupported\"", "%", "dstype", ")", "return", "result" ]
Decodes a list of DS values in collectd network format
[ "Decodes", "a", "list", "of", "DS", "values", "in", "collectd", "network", "format" ]
python
train
38.310345
tensorflow/probability
tensorflow_probability/python/layers/distribution_layer.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L563-L583
def new(params, event_size, num_components, dtype=None, validate_args=False, name=None): """Create the distribution instance from a `params` vector.""" with tf.compat.v1.name_scope(name, 'CategoricalMixtureOfOneHotCategorical', [params, event_size, num_components]): dist = MixtureSameFamily.new( params, num_components, OneHotCategorical( event_size, validate_args=False, # So we can eval on simplex interior. name=name), validate_args=validate_args, name=name) # pylint: disable=protected-access dist._mean = functools.partial( _eval_all_one_hot, tfd.Distribution.prob, dist) dist.log_mean = functools.partial( _eval_all_one_hot, tfd.Distribution.log_prob, dist) # pylint: enable=protected-access return dist
[ "def", "new", "(", "params", ",", "event_size", ",", "num_components", ",", "dtype", "=", "None", ",", "validate_args", "=", "False", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "'CategoricalMixtureOfOneHotCategorical'", ",", "[", "params", ",", "event_size", ",", "num_components", "]", ")", ":", "dist", "=", "MixtureSameFamily", ".", "new", "(", "params", ",", "num_components", ",", "OneHotCategorical", "(", "event_size", ",", "validate_args", "=", "False", ",", "# So we can eval on simplex interior.", "name", "=", "name", ")", ",", "validate_args", "=", "validate_args", ",", "name", "=", "name", ")", "# pylint: disable=protected-access", "dist", ".", "_mean", "=", "functools", ".", "partial", "(", "_eval_all_one_hot", ",", "tfd", ".", "Distribution", ".", "prob", ",", "dist", ")", "dist", ".", "log_mean", "=", "functools", ".", "partial", "(", "_eval_all_one_hot", ",", "tfd", ".", "Distribution", ".", "log_prob", ",", "dist", ")", "# pylint: enable=protected-access", "return", "dist" ]
Create the distribution instance from a `params` vector.
[ "Create", "the", "distribution", "instance", "from", "a", "params", "vector", "." ]
python
test
42.380952
abarmat/python-oembed
oembed/__init__.py
https://github.com/abarmat/python-oembed/blob/bb3d14213e0ac91aa998af67182826b6f1529fe6/oembed/__init__.py#L339-L369
def fetch(self, url): ''' Fetch url and create a response object according to the mime-type. Args: url: The url to fetch data from Returns: OEmbedResponse object according to data fetched ''' opener = self._urllib.build_opener() opener.addheaders = self._requestHeaders.items() response = opener.open(url) headers = response.info() raw = response.read() raw = raw.decode('utf8') if not 'Content-Type' in headers: raise OEmbedError('Missing mime-type in response') if headers['Content-Type'].find('application/xml') != -1 or \ headers['Content-Type'].find('text/xml') != -1: response = OEmbedResponse.newFromXML(raw) elif headers['Content-Type'].find('application/json') != -1 or \ headers['Content-Type'].find('text/javascript') != -1 or \ headers['Content-Type'].find('text/json') != -1: response = OEmbedResponse.newFromJSON(raw) else: raise OEmbedError('Invalid mime-type in response - %s' % headers['Content-Type']) return response
[ "def", "fetch", "(", "self", ",", "url", ")", ":", "opener", "=", "self", ".", "_urllib", ".", "build_opener", "(", ")", "opener", ".", "addheaders", "=", "self", ".", "_requestHeaders", ".", "items", "(", ")", "response", "=", "opener", ".", "open", "(", "url", ")", "headers", "=", "response", ".", "info", "(", ")", "raw", "=", "response", ".", "read", "(", ")", "raw", "=", "raw", ".", "decode", "(", "'utf8'", ")", "if", "not", "'Content-Type'", "in", "headers", ":", "raise", "OEmbedError", "(", "'Missing mime-type in response'", ")", "if", "headers", "[", "'Content-Type'", "]", ".", "find", "(", "'application/xml'", ")", "!=", "-", "1", "or", "headers", "[", "'Content-Type'", "]", ".", "find", "(", "'text/xml'", ")", "!=", "-", "1", ":", "response", "=", "OEmbedResponse", ".", "newFromXML", "(", "raw", ")", "elif", "headers", "[", "'Content-Type'", "]", ".", "find", "(", "'application/json'", ")", "!=", "-", "1", "or", "headers", "[", "'Content-Type'", "]", ".", "find", "(", "'text/javascript'", ")", "!=", "-", "1", "or", "headers", "[", "'Content-Type'", "]", ".", "find", "(", "'text/json'", ")", "!=", "-", "1", ":", "response", "=", "OEmbedResponse", ".", "newFromJSON", "(", "raw", ")", "else", ":", "raise", "OEmbedError", "(", "'Invalid mime-type in response - %s'", "%", "headers", "[", "'Content-Type'", "]", ")", "return", "response" ]
Fetch url and create a response object according to the mime-type. Args: url: The url to fetch data from Returns: OEmbedResponse object according to data fetched
[ "Fetch", "url", "and", "create", "a", "response", "object", "according", "to", "the", "mime", "-", "type", "." ]
python
train
37
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L746-L779
def _bindDomain(self, domain_name, create=False, block=True): """ Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires. """ log.debug("Binding to job store domain '%s'.", domain_name) retryargs = dict(predicate=lambda e: no_such_sdb_domain(e) or sdb_unavailable(e)) if not block: retryargs['timeout'] = 15 for attempt in retry_sdb(**retryargs): with attempt: try: return self.db.get_domain(domain_name) except SDBResponseError as e: if no_such_sdb_domain(e): if create: return self.db.create_domain(domain_name) elif block: raise else: return None else: raise
[ "def", "_bindDomain", "(", "self", ",", "domain_name", ",", "create", "=", "False", ",", "block", "=", "True", ")", ":", "log", ".", "debug", "(", "\"Binding to job store domain '%s'.\"", ",", "domain_name", ")", "retryargs", "=", "dict", "(", "predicate", "=", "lambda", "e", ":", "no_such_sdb_domain", "(", "e", ")", "or", "sdb_unavailable", "(", "e", ")", ")", "if", "not", "block", ":", "retryargs", "[", "'timeout'", "]", "=", "15", "for", "attempt", "in", "retry_sdb", "(", "*", "*", "retryargs", ")", ":", "with", "attempt", ":", "try", ":", "return", "self", ".", "db", ".", "get_domain", "(", "domain_name", ")", "except", "SDBResponseError", "as", "e", ":", "if", "no_such_sdb_domain", "(", "e", ")", ":", "if", "create", ":", "return", "self", ".", "db", ".", "create_domain", "(", "domain_name", ")", "elif", "block", ":", "raise", "else", ":", "return", "None", "else", ":", "raise" ]
Return the Boto Domain object representing the SDB domain of the given name. If the domain does not exist and `create` is True, it will be created. :param str domain_name: the name of the domain to bind to :param bool create: True if domain should be created if it doesn't exist :param bool block: If False, return None if the domain doesn't exist. If True, wait until domain appears. This parameter is ignored if create is True. :rtype: Domain|None :raises SDBResponseError: If `block` is True and the domain still doesn't exist after the retry timeout expires.
[ "Return", "the", "Boto", "Domain", "object", "representing", "the", "SDB", "domain", "of", "the", "given", "name", ".", "If", "the", "domain", "does", "not", "exist", "and", "create", "is", "True", "it", "will", "be", "created", "." ]
python
train
43.235294
meyersj/geotweet
geotweet/twitter/stream_steps.py
https://github.com/meyersj/geotweet/blob/1a6b55f98adf34d1b91f172d9187d599616412d9/geotweet/twitter/stream_steps.py#L53-L58
def validate_geotweet(self, record): """ check that stream record is actual tweet with coordinates """ if record and self._validate('user', record) \ and self._validate('coordinates', record): return True return False
[ "def", "validate_geotweet", "(", "self", ",", "record", ")", ":", "if", "record", "and", "self", ".", "_validate", "(", "'user'", ",", "record", ")", "and", "self", ".", "_validate", "(", "'coordinates'", ",", "record", ")", ":", "return", "True", "return", "False" ]
check that stream record is actual tweet with coordinates
[ "check", "that", "stream", "record", "is", "actual", "tweet", "with", "coordinates" ]
python
train
44.166667
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L363-L413
def function(script, x_func='x', y_func='y', z_func='z'): """Geometric function using muparser lib to generate new Coordinates You can change x, y, z for every vertex according to the function specified. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use the following per-vertex variables in the expression: Variables (per vertex): x, y, z (coordinates) nx, ny, nz (normal) r, g, b, a (color) q (quality) rad (radius) vi (vertex index) vtu, vtv (texture coordinates) ti (texture index) vsel (is the vertex selected? 1 yes, 0 no) and all custom vertex attributes already defined by user. Args: x_func (str): function to generate new coordinates for x y_func (str): function to generate new coordinates for y z_func (str): function to generate new coordinates for z Layer stack: No impacts MeshLab versions: 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Geometric Function">\n', ' <Param name="x" ', 'value="{}" '.format(str(x_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func x = " ', 'type="RichString" ', '/>\n', ' <Param name="y" ', 'value="{}" '.format(str(y_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func y = " ', 'type="RichString" ', '/>\n', ' <Param name="z" ', 'value="{}" '.format(str(z_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func z = " ', 'type="RichString" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
[ "def", "function", "(", "script", ",", "x_func", "=", "'x'", ",", "y_func", "=", "'y'", ",", "z_func", "=", "'z'", ")", ":", "filter_xml", "=", "''", ".", "join", "(", "[", "' <filter name=\"Geometric Function\">\\n'", ",", "' <Param name=\"x\" '", ",", "'value=\"{}\" '", ".", "format", "(", "str", "(", "x_func", ")", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ")", ",", "'description=\"func x = \" '", ",", "'type=\"RichString\" '", ",", "'/>\\n'", ",", "' <Param name=\"y\" '", ",", "'value=\"{}\" '", ".", "format", "(", "str", "(", "y_func", ")", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ")", ",", "'description=\"func y = \" '", ",", "'type=\"RichString\" '", ",", "'/>\\n'", ",", "' <Param name=\"z\" '", ",", "'value=\"{}\" '", ".", "format", "(", "str", "(", "z_func", ")", ".", "replace", "(", "'&'", ",", "'&amp;'", ")", ".", "replace", "(", "'<'", ",", "'&lt;'", ")", ")", ",", "'description=\"func z = \" '", ",", "'type=\"RichString\" '", ",", "'/>\\n'", ",", "' </filter>\\n'", "]", ")", "util", ".", "write_filter", "(", "script", ",", "filter_xml", ")", "return", "None" ]
Geometric function using muparser lib to generate new Coordinates You can change x, y, z for every vertex according to the function specified. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use the following per-vertex variables in the expression: Variables (per vertex): x, y, z (coordinates) nx, ny, nz (normal) r, g, b, a (color) q (quality) rad (radius) vi (vertex index) vtu, vtv (texture coordinates) ti (texture index) vsel (is the vertex selected? 1 yes, 0 no) and all custom vertex attributes already defined by user. Args: x_func (str): function to generate new coordinates for x y_func (str): function to generate new coordinates for y z_func (str): function to generate new coordinates for z Layer stack: No impacts MeshLab versions: 1.3.4BETA
[ "Geometric", "function", "using", "muparser", "lib", "to", "generate", "new", "Coordinates" ]
python
test
33.882353
hannes-brt/hebel
hebel/pycuda_ops/cublas.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cublas.py#L2387-L2398
def cublasDger(handle, m, n, alpha, x, incx, y, incy, A, lda): """ Rank-1 operation on real general matrix. """ status = _libcublas.cublasDger_v2(handle, m, n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
[ "def", "cublasDger", "(", "handle", ",", "m", ",", "n", ",", "alpha", ",", "x", ",", "incx", ",", "y", ",", "incy", ",", "A", ",", "lda", ")", ":", "status", "=", "_libcublas", ".", "cublasDger_v2", "(", "handle", ",", "m", ",", "n", ",", "ctypes", ".", "byref", "(", "ctypes", ".", "c_double", "(", "alpha", ")", ")", ",", "int", "(", "x", ")", ",", "incx", ",", "int", "(", "y", ")", ",", "incy", ",", "int", "(", "A", ")", ",", "lda", ")", "cublasCheckStatus", "(", "status", ")" ]
Rank-1 operation on real general matrix.
[ "Rank", "-", "1", "operation", "on", "real", "general", "matrix", "." ]
python
train
35.916667
JohnVinyard/zounds
zounds/timeseries/audiosamples.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/timeseries/audiosamples.py#L149-L158
def mono(self): """ Return this instance summed to mono. If the instance is already mono, this is a no-op. """ if self.channels == 1: return self x = self.sum(axis=1) * 0.5 y = x * 0.5 return AudioSamples(y, self.samplerate)
[ "def", "mono", "(", "self", ")", ":", "if", "self", ".", "channels", "==", "1", ":", "return", "self", "x", "=", "self", ".", "sum", "(", "axis", "=", "1", ")", "*", "0.5", "y", "=", "x", "*", "0.5", "return", "AudioSamples", "(", "y", ",", "self", ".", "samplerate", ")" ]
Return this instance summed to mono. If the instance is already mono, this is a no-op.
[ "Return", "this", "instance", "summed", "to", "mono", ".", "If", "the", "instance", "is", "already", "mono", "this", "is", "a", "no", "-", "op", "." ]
python
train
29.2
pydot/pydot-ng
pydot_ng/__init__.py
https://github.com/pydot/pydot-ng/blob/16f39800b6f5dc28d291a4d7763bbac04b9efe72/pydot_ng/__init__.py#L402-L454
def __find_executables(path): """Used by find_graphviz path - single directory as a string If any of the executables are found, it will return a dictionary containing the program names as keys and their paths as values. Otherwise returns None """ success = False progs = { "dot": "", "twopi": "", "neato": "", "circo": "", "fdp": "", "sfdp": "", } was_quoted = False path = path.strip() if path.startswith('"') and path.endswith('"'): path = path[1:-1] was_quoted = True if not os.path.isdir(path): return None for prg in progs: if progs[prg]: continue prg_path = os.path.join(path, prg) prg_exe_path = prg_path + ".exe" if os.path.exists(prg_path): if was_quoted: prg_path = "\"{}\"".format(prg_path) progs[prg] = prg_path success = True elif os.path.exists(prg_exe_path): if was_quoted: prg_exe_path = "\"{}\"".format(prg_exe_path) progs[prg] = prg_exe_path success = True if success: return progs return None
[ "def", "__find_executables", "(", "path", ")", ":", "success", "=", "False", "progs", "=", "{", "\"dot\"", ":", "\"\"", ",", "\"twopi\"", ":", "\"\"", ",", "\"neato\"", ":", "\"\"", ",", "\"circo\"", ":", "\"\"", ",", "\"fdp\"", ":", "\"\"", ",", "\"sfdp\"", ":", "\"\"", ",", "}", "was_quoted", "=", "False", "path", "=", "path", ".", "strip", "(", ")", "if", "path", ".", "startswith", "(", "'\"'", ")", "and", "path", ".", "endswith", "(", "'\"'", ")", ":", "path", "=", "path", "[", "1", ":", "-", "1", "]", "was_quoted", "=", "True", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "return", "None", "for", "prg", "in", "progs", ":", "if", "progs", "[", "prg", "]", ":", "continue", "prg_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "prg", ")", "prg_exe_path", "=", "prg_path", "+", "\".exe\"", "if", "os", ".", "path", ".", "exists", "(", "prg_path", ")", ":", "if", "was_quoted", ":", "prg_path", "=", "\"\\\"{}\\\"\"", ".", "format", "(", "prg_path", ")", "progs", "[", "prg", "]", "=", "prg_path", "success", "=", "True", "elif", "os", ".", "path", ".", "exists", "(", "prg_exe_path", ")", ":", "if", "was_quoted", ":", "prg_exe_path", "=", "\"\\\"{}\\\"\"", ".", "format", "(", "prg_exe_path", ")", "progs", "[", "prg", "]", "=", "prg_exe_path", "success", "=", "True", "if", "success", ":", "return", "progs", "return", "None" ]
Used by find_graphviz path - single directory as a string If any of the executables are found, it will return a dictionary containing the program names as keys and their paths as values. Otherwise returns None
[ "Used", "by", "find_graphviz" ]
python
train
22.09434
funilrys/PyFunceble
PyFunceble/config.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/config.py#L195-L225
def _set_path_to_configs(cls, path_to_config): """ Set the paths to the configuration files. :param path_to_config: The possible path to the config to load. :type path_to_config: str :return: The path to the config to read (0), the path to the default configuration to read as fallback.(1) :rtype: tuple """ if not path_to_config.endswith(PyFunceble.directory_separator): # The path to the config does not ends with the directory separator. # We initiate the default and the parsed variable with the directory separator. default = parsed = path_to_config + PyFunceble.directory_separator else: # The path to the config does ends with the directory separator. # We initiate the default and the parsed variable. default = parsed = path_to_config # We append the `CONFIGURATION_FILENAME` to the parsed variable. parsed += PyFunceble.CONFIGURATION_FILENAME # And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable. default += PyFunceble.DEFAULT_CONFIGURATION_FILENAME # We finaly return a tuple which contain both informations. return (parsed, default)
[ "def", "_set_path_to_configs", "(", "cls", ",", "path_to_config", ")", ":", "if", "not", "path_to_config", ".", "endswith", "(", "PyFunceble", ".", "directory_separator", ")", ":", "# The path to the config does not ends with the directory separator.", "# We initiate the default and the parsed variable with the directory separator.", "default", "=", "parsed", "=", "path_to_config", "+", "PyFunceble", ".", "directory_separator", "else", ":", "# The path to the config does ends with the directory separator.", "# We initiate the default and the parsed variable.", "default", "=", "parsed", "=", "path_to_config", "# We append the `CONFIGURATION_FILENAME` to the parsed variable.", "parsed", "+=", "PyFunceble", ".", "CONFIGURATION_FILENAME", "# And we append the `DEFAULT_CONFIGURATION_FILENAME` to the default variable.", "default", "+=", "PyFunceble", ".", "DEFAULT_CONFIGURATION_FILENAME", "# We finaly return a tuple which contain both informations.", "return", "(", "parsed", ",", "default", ")" ]
Set the paths to the configuration files. :param path_to_config: The possible path to the config to load. :type path_to_config: str :return: The path to the config to read (0), the path to the default configuration to read as fallback.(1) :rtype: tuple
[ "Set", "the", "paths", "to", "the", "configuration", "files", "." ]
python
test
40.677419
paramiko/paramiko
paramiko/transport.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/transport.py#L1509-L1560
def auth_interactive(self, username, handler, submethods=""): """ Authenticate to the server interactively. A handler is used to answer arbitrary questions from the server. On many servers, this is just a dumb wrapper around PAM. This method will block until the authentication succeeds or fails, peroidically calling the handler asynchronously to get answers to authentication questions. The handler may be called more than once if the server continues to ask questions. The handler is expected to be a callable that will handle calls of the form: ``handler(title, instructions, prompt_list)``. The ``title`` is meant to be a dialog-window title, and the ``instructions`` are user instructions (both are strings). ``prompt_list`` will be a list of prompts, each prompt being a tuple of ``(str, bool)``. The string is the prompt and the boolean indicates whether the user text should be echoed. A sample call would thus be: ``handler('title', 'instructions', [('Password:', False)])``. The handler should return a list or tuple of answers to the server's questions. If the server requires multi-step authentication (which is very rare), this method will return a list of auth types permissible for the next step. Otherwise, in the normal case, an empty list is returned. :param str username: the username to authenticate as :param callable handler: a handler for responding to server questions :param str submethods: a string list of desired submethods (optional) :return: list of auth types permissible for the next stage of authentication (normally empty). :raises: `.BadAuthenticationType` -- if public-key authentication isn't allowed by the server for this user :raises: `.AuthenticationException` -- if the authentication failed :raises: `.SSHException` -- if there was a network error .. versionadded:: 1.5 """ if (not self.active) or (not self.initial_kex_done): # we should never try to authenticate unless we're on a secure link raise SSHException("No existing session") my_event = threading.Event() self.auth_handler = AuthHandler(self) self.auth_handler.auth_interactive( username, handler, my_event, submethods ) return self.auth_handler.wait_for_response(my_event)
[ "def", "auth_interactive", "(", "self", ",", "username", ",", "handler", ",", "submethods", "=", "\"\"", ")", ":", "if", "(", "not", "self", ".", "active", ")", "or", "(", "not", "self", ".", "initial_kex_done", ")", ":", "# we should never try to authenticate unless we're on a secure link", "raise", "SSHException", "(", "\"No existing session\"", ")", "my_event", "=", "threading", ".", "Event", "(", ")", "self", ".", "auth_handler", "=", "AuthHandler", "(", "self", ")", "self", ".", "auth_handler", ".", "auth_interactive", "(", "username", ",", "handler", ",", "my_event", ",", "submethods", ")", "return", "self", ".", "auth_handler", ".", "wait_for_response", "(", "my_event", ")" ]
Authenticate to the server interactively. A handler is used to answer arbitrary questions from the server. On many servers, this is just a dumb wrapper around PAM. This method will block until the authentication succeeds or fails, peroidically calling the handler asynchronously to get answers to authentication questions. The handler may be called more than once if the server continues to ask questions. The handler is expected to be a callable that will handle calls of the form: ``handler(title, instructions, prompt_list)``. The ``title`` is meant to be a dialog-window title, and the ``instructions`` are user instructions (both are strings). ``prompt_list`` will be a list of prompts, each prompt being a tuple of ``(str, bool)``. The string is the prompt and the boolean indicates whether the user text should be echoed. A sample call would thus be: ``handler('title', 'instructions', [('Password:', False)])``. The handler should return a list or tuple of answers to the server's questions. If the server requires multi-step authentication (which is very rare), this method will return a list of auth types permissible for the next step. Otherwise, in the normal case, an empty list is returned. :param str username: the username to authenticate as :param callable handler: a handler for responding to server questions :param str submethods: a string list of desired submethods (optional) :return: list of auth types permissible for the next stage of authentication (normally empty). :raises: `.BadAuthenticationType` -- if public-key authentication isn't allowed by the server for this user :raises: `.AuthenticationException` -- if the authentication failed :raises: `.SSHException` -- if there was a network error .. versionadded:: 1.5
[ "Authenticate", "to", "the", "server", "interactively", ".", "A", "handler", "is", "used", "to", "answer", "arbitrary", "questions", "from", "the", "server", ".", "On", "many", "servers", "this", "is", "just", "a", "dumb", "wrapper", "around", "PAM", "." ]
python
train
48.269231
theiviaxx/python-perforce
perforce/models.py
https://github.com/theiviaxx/python-perforce/blob/01a3b01fe5949126fa0097d9a8ad386887823b5a/perforce/models.py#L585-L601
def remove(self, rev, permanent=False): """Removes a revision from this changelist :param rev: Revision to remove :type rev: :class:`.Revision` :param permanent: Whether or not we need to set the changelist to default :type permanent: bool """ if not isinstance(rev, Revision): raise TypeError('argument needs to be an instance of Revision') if rev not in self: raise ValueError('{} not in changelist'.format(rev)) self._files.remove(rev) if not permanent: rev.changelist = self._connection.default
[ "def", "remove", "(", "self", ",", "rev", ",", "permanent", "=", "False", ")", ":", "if", "not", "isinstance", "(", "rev", ",", "Revision", ")", ":", "raise", "TypeError", "(", "'argument needs to be an instance of Revision'", ")", "if", "rev", "not", "in", "self", ":", "raise", "ValueError", "(", "'{} not in changelist'", ".", "format", "(", "rev", ")", ")", "self", ".", "_files", ".", "remove", "(", "rev", ")", "if", "not", "permanent", ":", "rev", ".", "changelist", "=", "self", ".", "_connection", ".", "default" ]
Removes a revision from this changelist :param rev: Revision to remove :type rev: :class:`.Revision` :param permanent: Whether or not we need to set the changelist to default :type permanent: bool
[ "Removes", "a", "revision", "from", "this", "changelist" ]
python
train
35.352941
consbio/parserutils
parserutils/urls.py
https://github.com/consbio/parserutils/blob/f13f80db99ed43479336b116e38512e3566e4623/parserutils/urls.py#L59-L77
def url_to_parts(url): """ Split url urlsplit style, but return path as a list and query as a dict """ if not url: return None scheme, netloc, path, query, fragment = _urlsplit(url) if not path or path == '/': path = [] else: path = path.strip('/').split('/') if not query: query = {} else: query = _parse_qs(query) return _urllib_parse.SplitResult(scheme, netloc, path, query, fragment)
[ "def", "url_to_parts", "(", "url", ")", ":", "if", "not", "url", ":", "return", "None", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", "=", "_urlsplit", "(", "url", ")", "if", "not", "path", "or", "path", "==", "'/'", ":", "path", "=", "[", "]", "else", ":", "path", "=", "path", ".", "strip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "if", "not", "query", ":", "query", "=", "{", "}", "else", ":", "query", "=", "_parse_qs", "(", "query", ")", "return", "_urllib_parse", ".", "SplitResult", "(", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", ")" ]
Split url urlsplit style, but return path as a list and query as a dict
[ "Split", "url", "urlsplit", "style", "but", "return", "path", "as", "a", "list", "and", "query", "as", "a", "dict" ]
python
train
23.473684
saltstack/salt
salt/modules/localemod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/localemod.py#L231-L251
def avail(locale): ''' Check if a locale is available. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' locale.avail 'en_US.UTF-8' ''' try: normalized_locale = salt.utils.locales.normalize_locale(locale) except IndexError: log.error('Unable to validate locale "%s"', locale) return False avail_locales = __salt__['locale.list_avail']() locale_exists = next((True for x in avail_locales if salt.utils.locales.normalize_locale(x.strip()) == normalized_locale), False) return locale_exists
[ "def", "avail", "(", "locale", ")", ":", "try", ":", "normalized_locale", "=", "salt", ".", "utils", ".", "locales", ".", "normalize_locale", "(", "locale", ")", "except", "IndexError", ":", "log", ".", "error", "(", "'Unable to validate locale \"%s\"'", ",", "locale", ")", "return", "False", "avail_locales", "=", "__salt__", "[", "'locale.list_avail'", "]", "(", ")", "locale_exists", "=", "next", "(", "(", "True", "for", "x", "in", "avail_locales", "if", "salt", ".", "utils", ".", "locales", ".", "normalize_locale", "(", "x", ".", "strip", "(", ")", ")", "==", "normalized_locale", ")", ",", "False", ")", "return", "locale_exists" ]
Check if a locale is available. .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' locale.avail 'en_US.UTF-8'
[ "Check", "if", "a", "locale", "is", "available", "." ]
python
train
28.238095
spacetelescope/drizzlepac
drizzlepac/util.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/util.py#L920-L935
def update_input(filelist, ivmlist=None, removed_files=None): """ Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present. """ newfilelist = [] if removed_files == []: return filelist, ivmlist else: sci_ivm = list(zip(filelist, ivmlist)) for f in removed_files: result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ] ivmlist = [el[1] for el in sci_ivm] newfilelist = [el[0] for el in sci_ivm] return newfilelist, ivmlist
[ "def", "update_input", "(", "filelist", ",", "ivmlist", "=", "None", ",", "removed_files", "=", "None", ")", ":", "newfilelist", "=", "[", "]", "if", "removed_files", "==", "[", "]", ":", "return", "filelist", ",", "ivmlist", "else", ":", "sci_ivm", "=", "list", "(", "zip", "(", "filelist", ",", "ivmlist", ")", ")", "for", "f", "in", "removed_files", ":", "result", "=", "[", "sci_ivm", ".", "remove", "(", "t", ")", "for", "t", "in", "sci_ivm", "if", "t", "[", "0", "]", "==", "f", "]", "ivmlist", "=", "[", "el", "[", "1", "]", "for", "el", "in", "sci_ivm", "]", "newfilelist", "=", "[", "el", "[", "0", "]", "for", "el", "in", "sci_ivm", "]", "return", "newfilelist", ",", "ivmlist" ]
Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present.
[ "Removes", "files", "flagged", "to", "be", "removed", "from", "the", "input", "filelist", ".", "Removes", "the", "corresponding", "ivm", "files", "if", "present", "." ]
python
train
34.3125
briancappello/flask-unchained
flask_unchained/bundles/security/services/security_service.py
https://github.com/briancappello/flask-unchained/blob/4d536cb90e2cc4829c1c05f2c74d3e22901a1399/flask_unchained/bundles/security/services/security_service.py#L209-L227
def send_email_confirmation_instructions(self, user): """ Sends the confirmation instructions email for the specified user. Sends signal `confirm_instructions_sent`. :param user: The user to send the instructions to. """ token = self.security_utils_service.generate_confirmation_token(user) confirmation_link = url_for('security_controller.confirm_email', token=token, _external=True) self.send_mail( _('flask_unchained.bundles.security:email_subject.email_confirmation_instructions'), to=user.email, template='security/email/email_confirmation_instructions.html', user=user, confirmation_link=confirmation_link) confirm_instructions_sent.send(app._get_current_object(), user=user, token=token)
[ "def", "send_email_confirmation_instructions", "(", "self", ",", "user", ")", ":", "token", "=", "self", ".", "security_utils_service", ".", "generate_confirmation_token", "(", "user", ")", "confirmation_link", "=", "url_for", "(", "'security_controller.confirm_email'", ",", "token", "=", "token", ",", "_external", "=", "True", ")", "self", ".", "send_mail", "(", "_", "(", "'flask_unchained.bundles.security:email_subject.email_confirmation_instructions'", ")", ",", "to", "=", "user", ".", "email", ",", "template", "=", "'security/email/email_confirmation_instructions.html'", ",", "user", "=", "user", ",", "confirmation_link", "=", "confirmation_link", ")", "confirm_instructions_sent", ".", "send", "(", "app", ".", "_get_current_object", "(", ")", ",", "user", "=", "user", ",", "token", "=", "token", ")" ]
Sends the confirmation instructions email for the specified user. Sends signal `confirm_instructions_sent`. :param user: The user to send the instructions to.
[ "Sends", "the", "confirmation", "instructions", "email", "for", "the", "specified", "user", "." ]
python
train
46.578947
rix0rrr/gcl
gcl/util.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/util.py#L99-L149
def walk(value, walker, path=None, seen=None): """Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree. """ seen = seen or set() path = path or [] # Recursion if id(value) in seen: walker.visitRecursion(path) return # Error if isinstance(value, Exception): walker.visitError(path, value) return # List if isinstance(value, list): # Not actually a tuple, but okay recurse = walker.enterList(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): for i, x in enumerate(value): walk(x, next_walker, path=path + ['[%d]' % i], seen=seen) walker.leaveList(value, path) return # Scalar if not isinstance(value, framework.TupleLike): walker.visitScalar(path, value) return # Tuple recurse = walker.enterTuple(value, path) if not recurse: return next_walker = walker if recurse is True else recurse with TempSetAdd(seen, id(value)): keys = sorted(value.keys()) for key in keys: key_path = path + [key] elm = get_or_error(value, key) walk(elm, next_walker, path=key_path, seen=seen) walker.leaveTuple(value, path)
[ "def", "walk", "(", "value", ",", "walker", ",", "path", "=", "None", ",", "seen", "=", "None", ")", ":", "seen", "=", "seen", "or", "set", "(", ")", "path", "=", "path", "or", "[", "]", "# Recursion", "if", "id", "(", "value", ")", "in", "seen", ":", "walker", ".", "visitRecursion", "(", "path", ")", "return", "# Error", "if", "isinstance", "(", "value", ",", "Exception", ")", ":", "walker", ".", "visitError", "(", "path", ",", "value", ")", "return", "# List", "if", "isinstance", "(", "value", ",", "list", ")", ":", "# Not actually a tuple, but okay", "recurse", "=", "walker", ".", "enterList", "(", "value", ",", "path", ")", "if", "not", "recurse", ":", "return", "next_walker", "=", "walker", "if", "recurse", "is", "True", "else", "recurse", "with", "TempSetAdd", "(", "seen", ",", "id", "(", "value", ")", ")", ":", "for", "i", ",", "x", "in", "enumerate", "(", "value", ")", ":", "walk", "(", "x", ",", "next_walker", ",", "path", "=", "path", "+", "[", "'[%d]'", "%", "i", "]", ",", "seen", "=", "seen", ")", "walker", ".", "leaveList", "(", "value", ",", "path", ")", "return", "# Scalar", "if", "not", "isinstance", "(", "value", ",", "framework", ".", "TupleLike", ")", ":", "walker", ".", "visitScalar", "(", "path", ",", "value", ")", "return", "# Tuple", "recurse", "=", "walker", ".", "enterTuple", "(", "value", ",", "path", ")", "if", "not", "recurse", ":", "return", "next_walker", "=", "walker", "if", "recurse", "is", "True", "else", "recurse", "with", "TempSetAdd", "(", "seen", ",", "id", "(", "value", ")", ")", ":", "keys", "=", "sorted", "(", "value", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "key_path", "=", "path", "+", "[", "key", "]", "elm", "=", "get_or_error", "(", "value", ",", "key", ")", "walk", "(", "elm", ",", "next_walker", ",", "path", "=", "key_path", ",", "seen", "=", "seen", ")", "walker", ".", "leaveTuple", "(", "value", ",", "path", ")" ]
Walks the _evaluated_ tree of the given GCL tuple. The appropriate methods of walker will be invoked for every element in the tree.
[ "Walks", "the", "_evaluated_", "tree", "of", "the", "given", "GCL", "tuple", "." ]
python
train
24.411765
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L1009-L1041
def shift_multi( x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1 ): """Shift images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.shift``. Returns ------- numpy.array A list of processed images. """ h, w = x[0].shape[row_index], x[0].shape[col_index] if is_random: tx = np.random.uniform(-hrg, hrg) * h ty = np.random.uniform(-wrg, wrg) * w else: tx, ty = hrg * h, wrg * w translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) transform_matrix = translation_matrix # no need to do offset results = [] for data in x: results.append(affine_transform(data, transform_matrix, channel_index, fill_mode, cval, order)) return np.asarray(results)
[ "def", "shift_multi", "(", "x", ",", "wrg", "=", "0.1", ",", "hrg", "=", "0.1", ",", "is_random", "=", "False", ",", "row_index", "=", "0", ",", "col_index", "=", "1", ",", "channel_index", "=", "2", ",", "fill_mode", "=", "'nearest'", ",", "cval", "=", "0.", ",", "order", "=", "1", ")", ":", "h", ",", "w", "=", "x", "[", "0", "]", ".", "shape", "[", "row_index", "]", ",", "x", "[", "0", "]", ".", "shape", "[", "col_index", "]", "if", "is_random", ":", "tx", "=", "np", ".", "random", ".", "uniform", "(", "-", "hrg", ",", "hrg", ")", "*", "h", "ty", "=", "np", ".", "random", ".", "uniform", "(", "-", "wrg", ",", "wrg", ")", "*", "w", "else", ":", "tx", ",", "ty", "=", "hrg", "*", "h", ",", "wrg", "*", "w", "translation_matrix", "=", "np", ".", "array", "(", "[", "[", "1", ",", "0", ",", "tx", "]", ",", "[", "0", ",", "1", ",", "ty", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "transform_matrix", "=", "translation_matrix", "# no need to do offset", "results", "=", "[", "]", "for", "data", "in", "x", ":", "results", ".", "append", "(", "affine_transform", "(", "data", ",", "transform_matrix", ",", "channel_index", ",", "fill_mode", ",", "cval", ",", "order", ")", ")", "return", "np", ".", "asarray", "(", "results", ")" ]
Shift images with the same arguments, randomly or non-randomly. Usually be used for image segmentation which x=[X, Y], X and Y should be matched. Parameters ----------- x : list of numpy.array List of images with dimension of [n_images, row, col, channel] (default). others : args See ``tl.prepro.shift``. Returns ------- numpy.array A list of processed images.
[ "Shift", "images", "with", "the", "same", "arguments", "randomly", "or", "non", "-", "randomly", ".", "Usually", "be", "used", "for", "image", "segmentation", "which", "x", "=", "[", "X", "Y", "]", "X", "and", "Y", "should", "be", "matched", "." ]
python
valid
32.636364
Parsl/parsl
parsl/app/app.py
https://github.com/Parsl/parsl/blob/d7afb3bc37f50dcf224ae78637944172edb35dac/parsl/app/app.py#L154-L186
def bash_app(function=None, data_flow_kernel=None, walltime=60, cache=False, executors='all'): """Decorator function for making bash apps. Parameters ---------- function : function Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis, for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the decorator is used alone, function will be the actual function being decorated, whereas if it is called with arguments, function will be None. Default is None. data_flow_kernel : DataFlowKernel The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None. walltime : int Walltime for app in seconds. Default is 60. executors : string or list Labels of the executors that this app can execute over. Default is 'all'. cache : bool Enable caching of the app call. Default is False. """ from parsl.app.bash import BashApp def decorator(func): def wrapper(f): return BashApp(f, data_flow_kernel=data_flow_kernel, walltime=walltime, cache=cache, executors=executors) return wrapper(func) if function is not None: return decorator(function) return decorator
[ "def", "bash_app", "(", "function", "=", "None", ",", "data_flow_kernel", "=", "None", ",", "walltime", "=", "60", ",", "cache", "=", "False", ",", "executors", "=", "'all'", ")", ":", "from", "parsl", ".", "app", ".", "bash", "import", "BashApp", "def", "decorator", "(", "func", ")", ":", "def", "wrapper", "(", "f", ")", ":", "return", "BashApp", "(", "f", ",", "data_flow_kernel", "=", "data_flow_kernel", ",", "walltime", "=", "walltime", ",", "cache", "=", "cache", ",", "executors", "=", "executors", ")", "return", "wrapper", "(", "func", ")", "if", "function", "is", "not", "None", ":", "return", "decorator", "(", "function", ")", "return", "decorator" ]
Decorator function for making bash apps. Parameters ---------- function : function Do not pass this keyword argument directly. This is needed in order to allow for omitted parenthesis, for example, `@bash_app` if using all defaults or `@bash_app(walltime=120)`. If the decorator is used alone, function will be the actual function being decorated, whereas if it is called with arguments, function will be None. Default is None. data_flow_kernel : DataFlowKernel The :class:`~parsl.dataflow.dflow.DataFlowKernel` responsible for managing this app. This can be omitted only after calling :meth:`parsl.dataflow.dflow.DataFlowKernelLoader.load`. Default is None. walltime : int Walltime for app in seconds. Default is 60. executors : string or list Labels of the executors that this app can execute over. Default is 'all'. cache : bool Enable caching of the app call. Default is False.
[ "Decorator", "function", "for", "making", "bash", "apps", "." ]
python
valid
45.121212
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L1866-L1902
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False): """ Writes tables to a pandas.HDFStore file. Parameters ---------- fname : str File name for HDFStore. Will be opened in append mode and closed at the end of this function. table_names: list of str, optional, default None List of tables to write. If None, all registered tables will be written. prefix: str If not None, used to prefix the output table names so that multiple iterations can go in the same file. compress: boolean Whether to compress output file using standard HDF5-readable zlib compression, default False. """ if table_names is None: table_names = list_tables() tables = (get_table(t) for t in table_names) key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}' # set compression options to zlib level-1 if compress arg is True complib = compress and 'zlib' or None complevel = compress and 1 or 0 with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store: for t in tables: # if local arg is True, store only local columns columns = None if local is True: columns = t.local_columns store[key_template.format(t.name)] = t.to_frame(columns=columns)
[ "def", "write_tables", "(", "fname", ",", "table_names", "=", "None", ",", "prefix", "=", "None", ",", "compress", "=", "False", ",", "local", "=", "False", ")", ":", "if", "table_names", "is", "None", ":", "table_names", "=", "list_tables", "(", ")", "tables", "=", "(", "get_table", "(", "t", ")", "for", "t", "in", "table_names", ")", "key_template", "=", "'{}/{{}}'", ".", "format", "(", "prefix", ")", "if", "prefix", "is", "not", "None", "else", "'{}'", "# set compression options to zlib level-1 if compress arg is True", "complib", "=", "compress", "and", "'zlib'", "or", "None", "complevel", "=", "compress", "and", "1", "or", "0", "with", "pd", ".", "HDFStore", "(", "fname", ",", "mode", "=", "'a'", ",", "complib", "=", "complib", ",", "complevel", "=", "complevel", ")", "as", "store", ":", "for", "t", "in", "tables", ":", "# if local arg is True, store only local columns", "columns", "=", "None", "if", "local", "is", "True", ":", "columns", "=", "t", ".", "local_columns", "store", "[", "key_template", ".", "format", "(", "t", ".", "name", ")", "]", "=", "t", ".", "to_frame", "(", "columns", "=", "columns", ")" ]
Writes tables to a pandas.HDFStore file. Parameters ---------- fname : str File name for HDFStore. Will be opened in append mode and closed at the end of this function. table_names: list of str, optional, default None List of tables to write. If None, all registered tables will be written. prefix: str If not None, used to prefix the output table names so that multiple iterations can go in the same file. compress: boolean Whether to compress output file using standard HDF5-readable zlib compression, default False.
[ "Writes", "tables", "to", "a", "pandas", ".", "HDFStore", "file", "." ]
python
train
36.891892
bachya/pyopenuv
example.py
https://github.com/bachya/pyopenuv/blob/f7c2f9dd99dd4e3b8b1f9e501ea17ce62a7ace46/example.py#L16-L41
async def run(websession: ClientSession): """Run.""" try: # Create a client: client = Client( '<API_KEY>', 39.7974509, -104.8887227, websession, altitude=1609.3) # Get current UV info: print('CURRENT UV DATA:') print(await client.uv_index()) # Get forecasted UV info: print() print('FORECASTED UV DATA:') print(await client.uv_forecast()) # Get UV protection window: print() print('UV PROTECTION WINDOW:') print(await client.uv_protection_window()) except OpenUvError as err: print(err)
[ "async", "def", "run", "(", "websession", ":", "ClientSession", ")", ":", "try", ":", "# Create a client:", "client", "=", "Client", "(", "'<API_KEY>'", ",", "39.7974509", ",", "-", "104.8887227", ",", "websession", ",", "altitude", "=", "1609.3", ")", "# Get current UV info:", "print", "(", "'CURRENT UV DATA:'", ")", "print", "(", "await", "client", ".", "uv_index", "(", ")", ")", "# Get forecasted UV info:", "print", "(", ")", "print", "(", "'FORECASTED UV DATA:'", ")", "print", "(", "await", "client", ".", "uv_forecast", "(", ")", ")", "# Get UV protection window:", "print", "(", ")", "print", "(", "'UV PROTECTION WINDOW:'", ")", "print", "(", "await", "client", ".", "uv_protection_window", "(", ")", ")", "except", "OpenUvError", "as", "err", ":", "print", "(", "err", ")" ]
Run.
[ "Run", "." ]
python
train
24.923077
troeger/opensubmit
executor/opensubmitexec/running.py
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/executor/opensubmitexec/running.py#L67-L75
def get_exitstatus(self): """Get the exit status of the program execution. Returns: int: Exit status as reported by the operating system, or None if it is not available. """ logger.debug("Exit status is {0}".format(self._spawn.exitstatus)) return self._spawn.exitstatus
[ "def", "get_exitstatus", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Exit status is {0}\"", ".", "format", "(", "self", ".", "_spawn", ".", "exitstatus", ")", ")", "return", "self", ".", "_spawn", ".", "exitstatus" ]
Get the exit status of the program execution. Returns: int: Exit status as reported by the operating system, or None if it is not available.
[ "Get", "the", "exit", "status", "of", "the", "program", "execution", "." ]
python
train
36.777778
bcbio/bcbio-nextgen
bcbio/structural/gatkcnv.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/structural/gatkcnv.py#L34-L54
def _run_paired(paired): """Run somatic variant calling pipeline. """ from bcbio.structural import titancna work_dir = _sv_workdir(paired.tumor_data) seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data), work_dir, paired) call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data) out = [] if paired.normal_data: out.append(paired.normal_data) if "sv" not in paired.tumor_data: paired.tumor_data["sv"] = [] paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv", "call_file": call_file, "vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header, _seg_to_vcf, paired.tumor_data), "seg": seg_files["seg"], "plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)}) out.append(paired.tumor_data) return out
[ "def", "_run_paired", "(", "paired", ")", ":", "from", "bcbio", ".", "structural", "import", "titancna", "work_dir", "=", "_sv_workdir", "(", "paired", ".", "tumor_data", ")", "seg_files", "=", "model_segments", "(", "tz", ".", "get_in", "(", "[", "\"depth\"", ",", "\"bins\"", ",", "\"normalized\"", "]", ",", "paired", ".", "tumor_data", ")", ",", "work_dir", ",", "paired", ")", "call_file", "=", "call_copy_numbers", "(", "seg_files", "[", "\"seg\"", "]", ",", "work_dir", ",", "paired", ".", "tumor_data", ")", "out", "=", "[", "]", "if", "paired", ".", "normal_data", ":", "out", ".", "append", "(", "paired", ".", "normal_data", ")", "if", "\"sv\"", "not", "in", "paired", ".", "tumor_data", ":", "paired", ".", "tumor_data", "[", "\"sv\"", "]", "=", "[", "]", "paired", ".", "tumor_data", "[", "\"sv\"", "]", ".", "append", "(", "{", "\"variantcaller\"", ":", "\"gatk-cnv\"", ",", "\"call_file\"", ":", "call_file", ",", "\"vrn_file\"", ":", "titancna", ".", "to_vcf", "(", "call_file", ",", "\"GATK4-CNV\"", ",", "_get_seg_header", ",", "_seg_to_vcf", ",", "paired", ".", "tumor_data", ")", ",", "\"seg\"", ":", "seg_files", "[", "\"seg\"", "]", ",", "\"plot\"", ":", "plot_model_segments", "(", "seg_files", ",", "work_dir", ",", "paired", ".", "tumor_data", ")", "}", ")", "out", ".", "append", "(", "paired", ".", "tumor_data", ")", "return", "out" ]
Run somatic variant calling pipeline.
[ "Run", "somatic", "variant", "calling", "pipeline", "." ]
python
train
50.761905
aheadley/python-crunchyroll
crunchyroll/apis/meta.py
https://github.com/aheadley/python-crunchyroll/blob/9bf2eb644f0d0f3e9dc21b95b8e355c6e2050178/crunchyroll/apis/meta.py#L251-L256
def list_manga_series(self, filter=None, content_type='jp_manga'): """Get a list of manga series """ result = self._manga_api.list_series(filter, content_type) return result
[ "def", "list_manga_series", "(", "self", ",", "filter", "=", "None", ",", "content_type", "=", "'jp_manga'", ")", ":", "result", "=", "self", ".", "_manga_api", ".", "list_series", "(", "filter", ",", "content_type", ")", "return", "result" ]
Get a list of manga series
[ "Get", "a", "list", "of", "manga", "series" ]
python
train
33.5
ewels/MultiQC
multiqc/plots/table.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/plots/table.py#L52-L345
def make_table (dt): """ Build the HTML needed for a MultiQC table. :param data: MultiQC datatable object """ table_id = dt.pconfig.get('id', 'table_{}'.format(''.join(random.sample(letters, 4))) ) table_id = report.save_htmlid(table_id) t_headers = OrderedDict() t_modal_headers = OrderedDict() t_rows = OrderedDict() dt.raw_vals = defaultdict(lambda: dict()) empty_cells = dict() hidden_cols = 1 table_title = dt.pconfig.get('table_title') if table_title is None: table_title = table_id.replace("_", " ").title() for idx, k, header in dt.get_headers_in_order(): rid = header['rid'] # Build the table header cell shared_key = '' if header.get('shared_key', None) is not None: shared_key = ' data-shared-key={}'.format(header['shared_key']) hide = '' muted = '' checked = ' checked="checked"' if header.get('hidden', False) is True: hide = 'hidden' muted = ' text-muted' checked = '' hidden_cols += 1 data_attr = 'data-dmax="{}" data-dmin="{}" data-namespace="{}" {}' \ .format(header['dmax'], header['dmin'], header['namespace'], shared_key) cell_contents = '<span class="mqc_table_tooltip" title="{}: {}">{}</span>' \ .format(header['namespace'], header['description'], header['title']) t_headers[rid] = '<th id="header_{rid}" class="{rid} {h}" {da}>{c}</th>' \ .format(rid=rid, h=hide, da=data_attr, c=cell_contents) empty_cells[rid] = '<td class="data-coloured {rid} {h}"></td>'.format(rid=rid, h=hide) # Build the modal table row t_modal_headers[rid] = """ <tr class="{rid}{muted}" style="background-color: rgba({col}, 0.15);"> <td class="sorthandle ui-sortable-handle">||</span></td> <td style="text-align:center;"> <input class="mqc_table_col_visible" type="checkbox" {checked} value="{rid}" data-target="#{tid}"> </td> <td>{name}</td> <td>{title}</td> <td>{desc}</td> <td>{col_id}</td> <td>{sk}</td> </tr>""".format( rid = rid, muted = muted, checked = checked, tid = table_id, col = header['colour'], name = header['namespace'], title = header['title'], desc = header['description'], col_id = '<code>{}</code>'.format(k), sk = header.get('shared_key', '') ) # Make a colour scale if header['scale'] == False: c_scale = None else: c_scale = mqc_colour.mqc_colour_scale(header['scale'], header['dmin'], header['dmax']) # Add the data table cells for (s_name, samp) in dt.data[idx].items(): if k in samp: val = samp[k] kname = '{}_{}'.format(header['namespace'], rid) dt.raw_vals[s_name][kname] = val if 'modify' in header and callable(header['modify']): val = header['modify'](val) try: dmin = header['dmin'] dmax = header['dmax'] percentage = ((float(val) - dmin) / (dmax - dmin)) * 100 percentage = min(percentage, 100) percentage = max(percentage, 0) except (ZeroDivisionError,ValueError): percentage = 0 try: valstring = str(header['format'].format(val)) except ValueError: try: valstring = str(header['format'].format(float(val))) except ValueError: valstring = str(val) except: valstring = str(val) # This is horrible, but Python locale settings are worse if config.thousandsSep_format is None: config.thousandsSep_format = '<span class="mqc_thousandSep"></span>' if config.decimalPoint_format is None: config.decimalPoint_format = '.' valstring = valstring.replace('.', 'DECIMAL').replace(',', 'THOUSAND') valstring = valstring.replace('DECIMAL', config.decimalPoint_format).replace('THOUSAND', config.thousandsSep_format) # Percentage suffixes etc valstring += header.get('suffix', '') # Conditional formatting cmatches = { cfck: False for cfc in config.table_cond_formatting_colours for cfck in cfc } # Find general rules followed by column-specific rules for cfk in ['all_columns', rid]: if cfk in config.table_cond_formatting_rules: # Loop through match types for ftype in cmatches.keys(): # Loop through array of comparison types for cmp in config.table_cond_formatting_rules[cfk].get(ftype, []): try: # Each comparison should be a dict with single key: val if 's_eq' in cmp and str(cmp['s_eq']).lower() == str(val).lower(): cmatches[ftype] = True if 's_contains' in cmp and str(cmp['s_contains']).lower() in str(val).lower(): cmatches[ftype] = True if 's_ne' in cmp and str(cmp['s_ne']).lower() != str(val).lower(): cmatches[ftype] = True if 'eq' in cmp and float(cmp['eq']) == float(val): cmatches[ftype] = True if 'ne' in cmp and float(cmp['ne']) != float(val): cmatches[ftype] = True if 'gt' in cmp and float(cmp['gt']) < float(val): cmatches[ftype] = True if 'lt' in cmp and float(cmp['lt']) > float(val): cmatches[ftype] = True except: logger.warn("Not able to apply table conditional formatting to '{}' ({})".format(val, cmp)) # Apply HTML in order of config keys bgcol = None for cfc in config.table_cond_formatting_colours: for cfck in cfc: # should always be one, but you never know if cmatches[cfck]: bgcol = cfc[cfck] if bgcol is not None: valstring = '<span class="badge" style="background-color:{}">{}</span>'.format(bgcol, valstring) # Build HTML if not header['scale']: if s_name not in t_rows: t_rows[s_name] = dict() t_rows[s_name][rid] = '<td class="{rid} {h}">{v}</td>'.format(rid=rid, h=hide, v=valstring) else: if c_scale is not None: col = ' background-color:{};'.format(c_scale.get_colour(val)) else: col = '' bar_html = '<span class="bar" style="width:{}%;{}"></span>'.format(percentage, col) val_html = '<span class="val">{}</span>'.format(valstring) wrapper_html = '<div class="wrapper">{}{}</div>'.format(bar_html, val_html) if s_name not in t_rows: t_rows[s_name] = dict() t_rows[s_name][rid] = '<td class="data-coloured {rid} {h}">{c}</td>'.format(rid=rid, h=hide, c=wrapper_html) # Remove header if we don't have any filled cells for it if sum([len(rows) for rows in t_rows.values()]) == 0: t_headers.pop(rid, None) t_modal_headers.pop(rid, None) logger.debug('Removing header {} from general stats table, as no data'.format(k)) # # Put everything together # # Buttons above the table html = '' if not config.simple_output: # Copy Table Button html += """ <button type="button" class="mqc_table_copy_btn btn btn-default btn-sm" data-clipboard-target="#{tid}"> <span class="glyphicon glyphicon-copy"></span> Copy table </button> """.format(tid=table_id) # Configure Columns Button if len(t_headers) > 1: html += """ <button type="button" class="mqc_table_configModal_btn btn btn-default btn-sm" data-toggle="modal" data-target="#{tid}_configModal"> <span class="glyphicon glyphicon-th"></span> Configure Columns </button> """.format(tid=table_id) # Sort By Highlight button html += """ <button type="button" class="mqc_table_sortHighlight btn btn-default btn-sm" data-target="#{tid}" data-direction="desc" style="display:none;"> <span class="glyphicon glyphicon-sort-by-attributes-alt"></span> Sort by highlight </button> """.format(tid=table_id) # Scatter Plot Button if len(t_headers) > 1: html += """ <button type="button" class="mqc_table_makeScatter btn btn-default btn-sm" data-toggle="modal" data-target="#tableScatterModal" data-table="#{tid}"> <span class="glyphicon glyphicon glyphicon-stats"></span> Plot </button> """.format(tid=table_id) # "Showing x of y columns" text html += """ <small id="{tid}_numrows_text" class="mqc_table_numrows_text">Showing <sup id="{tid}_numrows" class="mqc_table_numrows">{nrows}</sup>/<sub>{nrows}</sub> rows and <sup id="{tid}_numcols" class="mqc_table_numcols">{ncols_vis}</sup>/<sub>{ncols}</sub> columns.</small> """.format(tid=table_id, nrows=len(t_rows), ncols_vis = (len(t_headers)+1)-hidden_cols, ncols=len(t_headers)) # Build the table itself collapse_class = 'mqc-table-collapse' if len(t_rows) > 10 and config.collapse_tables else '' html += """ <div id="{tid}_container" class="mqc_table_container"> <div class="table-responsive mqc-table-responsive {cc}"> <table id="{tid}" class="table table-condensed mqc_table" data-title="{title}"> """.format( tid=table_id, title=table_title, cc=collapse_class) # Build the header row col1_header = dt.pconfig.get('col1_header', 'Sample Name') html += '<thead><tr><th class="rowheader">{}</th>{}</tr></thead>'.format(col1_header, ''.join(t_headers.values())) # Build the table body html += '<tbody>' t_row_keys = t_rows.keys() if dt.pconfig.get('sortRows') is not False: t_row_keys = sorted(t_row_keys) for s_name in t_row_keys: html += '<tr>' # Sample name row header html += '<th class="rowheader" data-original-sn="{sn}">{sn}</th>'.format(sn=s_name) for k in t_headers: html += t_rows[s_name].get(k, empty_cells[k]) html += '</tr>' html += '</tbody></table></div>' if len(t_rows) > 10 and config.collapse_tables: html += '<div class="mqc-table-expand"><span class="glyphicon glyphicon-chevron-down" aria-hidden="true"></span></div>' html += '</div>' # Build the bootstrap modal to customise columns and order if not config.simple_output: html += """ <!-- MultiQC Table Columns Modal --> <div class="modal fade" id="{tid}_configModal" tabindex="-1"> <div class="modal-dialog modal-lg"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button> <h4 class="modal-title">{title}: Columns</h4> </div> <div class="modal-body"> <p>Uncheck the tick box to hide columns. Click and drag the handle on the left to change order.</p> <p> <button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showAll">Show All</button> <button class="btn btn-default btn-sm mqc_configModal_bulkVisible" data-target="#{tid}" data-action="showNone">Show None</button> </p> <table class="table mqc_table mqc_sortable mqc_configModal_table" id="{tid}_configModal_table" data-title="{title}"> <thead> <tr> <th class="sorthandle" style="text-align:center;">Sort</th> <th style="text-align:center;">Visible</th> <th>Group</th> <th>Column</th> <th>Description</th> <th>ID</th> <th>Scale</th> </tr> </thead> <tbody> {trows} </tbody> </table> </div> <div class="modal-footer"> <button type="button" class="btn btn-default" data-dismiss="modal">Close</button> </div> </div> </div> </div>""".format( tid=table_id, title=table_title, trows=''.join(t_modal_headers.values()) ) # Save the raw values to a file if requested if dt.pconfig.get('save_file') is True: fn = dt.pconfig.get('raw_data_fn', 'multiqc_{}'.format(table_id) ) util_functions.write_data_file(dt.raw_vals, fn ) report.saved_raw_data[fn] = dt.raw_vals return html
[ "def", "make_table", "(", "dt", ")", ":", "table_id", "=", "dt", ".", "pconfig", ".", "get", "(", "'id'", ",", "'table_{}'", ".", "format", "(", "''", ".", "join", "(", "random", ".", "sample", "(", "letters", ",", "4", ")", ")", ")", ")", "table_id", "=", "report", ".", "save_htmlid", "(", "table_id", ")", "t_headers", "=", "OrderedDict", "(", ")", "t_modal_headers", "=", "OrderedDict", "(", ")", "t_rows", "=", "OrderedDict", "(", ")", "dt", ".", "raw_vals", "=", "defaultdict", "(", "lambda", ":", "dict", "(", ")", ")", "empty_cells", "=", "dict", "(", ")", "hidden_cols", "=", "1", "table_title", "=", "dt", ".", "pconfig", ".", "get", "(", "'table_title'", ")", "if", "table_title", "is", "None", ":", "table_title", "=", "table_id", ".", "replace", "(", "\"_\"", ",", "\" \"", ")", ".", "title", "(", ")", "for", "idx", ",", "k", ",", "header", "in", "dt", ".", "get_headers_in_order", "(", ")", ":", "rid", "=", "header", "[", "'rid'", "]", "# Build the table header cell", "shared_key", "=", "''", "if", "header", ".", "get", "(", "'shared_key'", ",", "None", ")", "is", "not", "None", ":", "shared_key", "=", "' data-shared-key={}'", ".", "format", "(", "header", "[", "'shared_key'", "]", ")", "hide", "=", "''", "muted", "=", "''", "checked", "=", "' checked=\"checked\"'", "if", "header", ".", "get", "(", "'hidden'", ",", "False", ")", "is", "True", ":", "hide", "=", "'hidden'", "muted", "=", "' text-muted'", "checked", "=", "''", "hidden_cols", "+=", "1", "data_attr", "=", "'data-dmax=\"{}\" data-dmin=\"{}\" data-namespace=\"{}\" {}'", ".", "format", "(", "header", "[", "'dmax'", "]", ",", "header", "[", "'dmin'", "]", ",", "header", "[", "'namespace'", "]", ",", "shared_key", ")", "cell_contents", "=", "'<span class=\"mqc_table_tooltip\" title=\"{}: {}\">{}</span>'", ".", "format", "(", "header", "[", "'namespace'", "]", ",", "header", "[", "'description'", "]", ",", "header", "[", "'title'", "]", ")", "t_headers", "[", "rid", "]", "=", "'<th id=\"header_{rid}\" class=\"{rid} {h}\" {da}>{c}</th>'", ".", "format", "(", "rid", "=", "rid", ",", "h", "=", "hide", ",", "da", "=", "data_attr", ",", "c", "=", "cell_contents", ")", "empty_cells", "[", "rid", "]", "=", "'<td class=\"data-coloured {rid} {h}\"></td>'", ".", "format", "(", "rid", "=", "rid", ",", "h", "=", "hide", ")", "# Build the modal table row", "t_modal_headers", "[", "rid", "]", "=", "\"\"\"\n <tr class=\"{rid}{muted}\" style=\"background-color: rgba({col}, 0.15);\">\n <td class=\"sorthandle ui-sortable-handle\">||</span></td>\n <td style=\"text-align:center;\">\n <input class=\"mqc_table_col_visible\" type=\"checkbox\" {checked} value=\"{rid}\" data-target=\"#{tid}\">\n </td>\n <td>{name}</td>\n <td>{title}</td>\n <td>{desc}</td>\n <td>{col_id}</td>\n <td>{sk}</td>\n </tr>\"\"\"", ".", "format", "(", "rid", "=", "rid", ",", "muted", "=", "muted", ",", "checked", "=", "checked", ",", "tid", "=", "table_id", ",", "col", "=", "header", "[", "'colour'", "]", ",", "name", "=", "header", "[", "'namespace'", "]", ",", "title", "=", "header", "[", "'title'", "]", ",", "desc", "=", "header", "[", "'description'", "]", ",", "col_id", "=", "'<code>{}</code>'", ".", "format", "(", "k", ")", ",", "sk", "=", "header", ".", "get", "(", "'shared_key'", ",", "''", ")", ")", "# Make a colour scale", "if", "header", "[", "'scale'", "]", "==", "False", ":", "c_scale", "=", "None", "else", ":", "c_scale", "=", "mqc_colour", ".", "mqc_colour_scale", "(", "header", "[", "'scale'", "]", ",", "header", "[", "'dmin'", "]", ",", "header", "[", "'dmax'", "]", ")", "# Add the data table cells", "for", "(", "s_name", ",", "samp", ")", "in", "dt", ".", "data", "[", "idx", "]", ".", "items", "(", ")", ":", "if", "k", "in", "samp", ":", "val", "=", "samp", "[", "k", "]", "kname", "=", "'{}_{}'", ".", "format", "(", "header", "[", "'namespace'", "]", ",", "rid", ")", "dt", ".", "raw_vals", "[", "s_name", "]", "[", "kname", "]", "=", "val", "if", "'modify'", "in", "header", "and", "callable", "(", "header", "[", "'modify'", "]", ")", ":", "val", "=", "header", "[", "'modify'", "]", "(", "val", ")", "try", ":", "dmin", "=", "header", "[", "'dmin'", "]", "dmax", "=", "header", "[", "'dmax'", "]", "percentage", "=", "(", "(", "float", "(", "val", ")", "-", "dmin", ")", "/", "(", "dmax", "-", "dmin", ")", ")", "*", "100", "percentage", "=", "min", "(", "percentage", ",", "100", ")", "percentage", "=", "max", "(", "percentage", ",", "0", ")", "except", "(", "ZeroDivisionError", ",", "ValueError", ")", ":", "percentage", "=", "0", "try", ":", "valstring", "=", "str", "(", "header", "[", "'format'", "]", ".", "format", "(", "val", ")", ")", "except", "ValueError", ":", "try", ":", "valstring", "=", "str", "(", "header", "[", "'format'", "]", ".", "format", "(", "float", "(", "val", ")", ")", ")", "except", "ValueError", ":", "valstring", "=", "str", "(", "val", ")", "except", ":", "valstring", "=", "str", "(", "val", ")", "# This is horrible, but Python locale settings are worse", "if", "config", ".", "thousandsSep_format", "is", "None", ":", "config", ".", "thousandsSep_format", "=", "'<span class=\"mqc_thousandSep\"></span>'", "if", "config", ".", "decimalPoint_format", "is", "None", ":", "config", ".", "decimalPoint_format", "=", "'.'", "valstring", "=", "valstring", ".", "replace", "(", "'.'", ",", "'DECIMAL'", ")", ".", "replace", "(", "','", ",", "'THOUSAND'", ")", "valstring", "=", "valstring", ".", "replace", "(", "'DECIMAL'", ",", "config", ".", "decimalPoint_format", ")", ".", "replace", "(", "'THOUSAND'", ",", "config", ".", "thousandsSep_format", ")", "# Percentage suffixes etc", "valstring", "+=", "header", ".", "get", "(", "'suffix'", ",", "''", ")", "# Conditional formatting", "cmatches", "=", "{", "cfck", ":", "False", "for", "cfc", "in", "config", ".", "table_cond_formatting_colours", "for", "cfck", "in", "cfc", "}", "# Find general rules followed by column-specific rules", "for", "cfk", "in", "[", "'all_columns'", ",", "rid", "]", ":", "if", "cfk", "in", "config", ".", "table_cond_formatting_rules", ":", "# Loop through match types", "for", "ftype", "in", "cmatches", ".", "keys", "(", ")", ":", "# Loop through array of comparison types", "for", "cmp", "in", "config", ".", "table_cond_formatting_rules", "[", "cfk", "]", ".", "get", "(", "ftype", ",", "[", "]", ")", ":", "try", ":", "# Each comparison should be a dict with single key: val", "if", "'s_eq'", "in", "cmp", "and", "str", "(", "cmp", "[", "'s_eq'", "]", ")", ".", "lower", "(", ")", "==", "str", "(", "val", ")", ".", "lower", "(", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'s_contains'", "in", "cmp", "and", "str", "(", "cmp", "[", "'s_contains'", "]", ")", ".", "lower", "(", ")", "in", "str", "(", "val", ")", ".", "lower", "(", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'s_ne'", "in", "cmp", "and", "str", "(", "cmp", "[", "'s_ne'", "]", ")", ".", "lower", "(", ")", "!=", "str", "(", "val", ")", ".", "lower", "(", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'eq'", "in", "cmp", "and", "float", "(", "cmp", "[", "'eq'", "]", ")", "==", "float", "(", "val", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'ne'", "in", "cmp", "and", "float", "(", "cmp", "[", "'ne'", "]", ")", "!=", "float", "(", "val", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'gt'", "in", "cmp", "and", "float", "(", "cmp", "[", "'gt'", "]", ")", "<", "float", "(", "val", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "if", "'lt'", "in", "cmp", "and", "float", "(", "cmp", "[", "'lt'", "]", ")", ">", "float", "(", "val", ")", ":", "cmatches", "[", "ftype", "]", "=", "True", "except", ":", "logger", ".", "warn", "(", "\"Not able to apply table conditional formatting to '{}' ({})\"", ".", "format", "(", "val", ",", "cmp", ")", ")", "# Apply HTML in order of config keys", "bgcol", "=", "None", "for", "cfc", "in", "config", ".", "table_cond_formatting_colours", ":", "for", "cfck", "in", "cfc", ":", "# should always be one, but you never know", "if", "cmatches", "[", "cfck", "]", ":", "bgcol", "=", "cfc", "[", "cfck", "]", "if", "bgcol", "is", "not", "None", ":", "valstring", "=", "'<span class=\"badge\" style=\"background-color:{}\">{}</span>'", ".", "format", "(", "bgcol", ",", "valstring", ")", "# Build HTML", "if", "not", "header", "[", "'scale'", "]", ":", "if", "s_name", "not", "in", "t_rows", ":", "t_rows", "[", "s_name", "]", "=", "dict", "(", ")", "t_rows", "[", "s_name", "]", "[", "rid", "]", "=", "'<td class=\"{rid} {h}\">{v}</td>'", ".", "format", "(", "rid", "=", "rid", ",", "h", "=", "hide", ",", "v", "=", "valstring", ")", "else", ":", "if", "c_scale", "is", "not", "None", ":", "col", "=", "' background-color:{};'", ".", "format", "(", "c_scale", ".", "get_colour", "(", "val", ")", ")", "else", ":", "col", "=", "''", "bar_html", "=", "'<span class=\"bar\" style=\"width:{}%;{}\"></span>'", ".", "format", "(", "percentage", ",", "col", ")", "val_html", "=", "'<span class=\"val\">{}</span>'", ".", "format", "(", "valstring", ")", "wrapper_html", "=", "'<div class=\"wrapper\">{}{}</div>'", ".", "format", "(", "bar_html", ",", "val_html", ")", "if", "s_name", "not", "in", "t_rows", ":", "t_rows", "[", "s_name", "]", "=", "dict", "(", ")", "t_rows", "[", "s_name", "]", "[", "rid", "]", "=", "'<td class=\"data-coloured {rid} {h}\">{c}</td>'", ".", "format", "(", "rid", "=", "rid", ",", "h", "=", "hide", ",", "c", "=", "wrapper_html", ")", "# Remove header if we don't have any filled cells for it", "if", "sum", "(", "[", "len", "(", "rows", ")", "for", "rows", "in", "t_rows", ".", "values", "(", ")", "]", ")", "==", "0", ":", "t_headers", ".", "pop", "(", "rid", ",", "None", ")", "t_modal_headers", ".", "pop", "(", "rid", ",", "None", ")", "logger", ".", "debug", "(", "'Removing header {} from general stats table, as no data'", ".", "format", "(", "k", ")", ")", "#", "# Put everything together", "#", "# Buttons above the table", "html", "=", "''", "if", "not", "config", ".", "simple_output", ":", "# Copy Table Button", "html", "+=", "\"\"\"\n <button type=\"button\" class=\"mqc_table_copy_btn btn btn-default btn-sm\" data-clipboard-target=\"#{tid}\">\n <span class=\"glyphicon glyphicon-copy\"></span> Copy table\n </button>\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ")", "# Configure Columns Button", "if", "len", "(", "t_headers", ")", ">", "1", ":", "html", "+=", "\"\"\"\n <button type=\"button\" class=\"mqc_table_configModal_btn btn btn-default btn-sm\" data-toggle=\"modal\" data-target=\"#{tid}_configModal\">\n <span class=\"glyphicon glyphicon-th\"></span> Configure Columns\n </button>\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ")", "# Sort By Highlight button", "html", "+=", "\"\"\"\n <button type=\"button\" class=\"mqc_table_sortHighlight btn btn-default btn-sm\" data-target=\"#{tid}\" data-direction=\"desc\" style=\"display:none;\">\n <span class=\"glyphicon glyphicon-sort-by-attributes-alt\"></span> Sort by highlight\n </button>\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ")", "# Scatter Plot Button", "if", "len", "(", "t_headers", ")", ">", "1", ":", "html", "+=", "\"\"\"\n <button type=\"button\" class=\"mqc_table_makeScatter btn btn-default btn-sm\" data-toggle=\"modal\" data-target=\"#tableScatterModal\" data-table=\"#{tid}\">\n <span class=\"glyphicon glyphicon glyphicon-stats\"></span> Plot\n </button>\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ")", "# \"Showing x of y columns\" text", "html", "+=", "\"\"\"\n <small id=\"{tid}_numrows_text\" class=\"mqc_table_numrows_text\">Showing <sup id=\"{tid}_numrows\" class=\"mqc_table_numrows\">{nrows}</sup>/<sub>{nrows}</sub> rows and <sup id=\"{tid}_numcols\" class=\"mqc_table_numcols\">{ncols_vis}</sup>/<sub>{ncols}</sub> columns.</small>\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ",", "nrows", "=", "len", "(", "t_rows", ")", ",", "ncols_vis", "=", "(", "len", "(", "t_headers", ")", "+", "1", ")", "-", "hidden_cols", ",", "ncols", "=", "len", "(", "t_headers", ")", ")", "# Build the table itself", "collapse_class", "=", "'mqc-table-collapse'", "if", "len", "(", "t_rows", ")", ">", "10", "and", "config", ".", "collapse_tables", "else", "''", "html", "+=", "\"\"\"\n <div id=\"{tid}_container\" class=\"mqc_table_container\">\n <div class=\"table-responsive mqc-table-responsive {cc}\">\n <table id=\"{tid}\" class=\"table table-condensed mqc_table\" data-title=\"{title}\">\n \"\"\"", ".", "format", "(", "tid", "=", "table_id", ",", "title", "=", "table_title", ",", "cc", "=", "collapse_class", ")", "# Build the header row", "col1_header", "=", "dt", ".", "pconfig", ".", "get", "(", "'col1_header'", ",", "'Sample Name'", ")", "html", "+=", "'<thead><tr><th class=\"rowheader\">{}</th>{}</tr></thead>'", ".", "format", "(", "col1_header", ",", "''", ".", "join", "(", "t_headers", ".", "values", "(", ")", ")", ")", "# Build the table body", "html", "+=", "'<tbody>'", "t_row_keys", "=", "t_rows", ".", "keys", "(", ")", "if", "dt", ".", "pconfig", ".", "get", "(", "'sortRows'", ")", "is", "not", "False", ":", "t_row_keys", "=", "sorted", "(", "t_row_keys", ")", "for", "s_name", "in", "t_row_keys", ":", "html", "+=", "'<tr>'", "# Sample name row header", "html", "+=", "'<th class=\"rowheader\" data-original-sn=\"{sn}\">{sn}</th>'", ".", "format", "(", "sn", "=", "s_name", ")", "for", "k", "in", "t_headers", ":", "html", "+=", "t_rows", "[", "s_name", "]", ".", "get", "(", "k", ",", "empty_cells", "[", "k", "]", ")", "html", "+=", "'</tr>'", "html", "+=", "'</tbody></table></div>'", "if", "len", "(", "t_rows", ")", ">", "10", "and", "config", ".", "collapse_tables", ":", "html", "+=", "'<div class=\"mqc-table-expand\"><span class=\"glyphicon glyphicon-chevron-down\" aria-hidden=\"true\"></span></div>'", "html", "+=", "'</div>'", "# Build the bootstrap modal to customise columns and order", "if", "not", "config", ".", "simple_output", ":", "html", "+=", "\"\"\"\n <!-- MultiQC Table Columns Modal -->\n <div class=\"modal fade\" id=\"{tid}_configModal\" tabindex=\"-1\">\n <div class=\"modal-dialog modal-lg\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">&times;</span></button>\n <h4 class=\"modal-title\">{title}: Columns</h4>\n </div>\n <div class=\"modal-body\">\n <p>Uncheck the tick box to hide columns. Click and drag the handle on the left to change order.</p>\n <p>\n <button class=\"btn btn-default btn-sm mqc_configModal_bulkVisible\" data-target=\"#{tid}\" data-action=\"showAll\">Show All</button>\n <button class=\"btn btn-default btn-sm mqc_configModal_bulkVisible\" data-target=\"#{tid}\" data-action=\"showNone\">Show None</button>\n </p>\n <table class=\"table mqc_table mqc_sortable mqc_configModal_table\" id=\"{tid}_configModal_table\" data-title=\"{title}\">\n <thead>\n <tr>\n <th class=\"sorthandle\" style=\"text-align:center;\">Sort</th>\n <th style=\"text-align:center;\">Visible</th>\n <th>Group</th>\n <th>Column</th>\n <th>Description</th>\n <th>ID</th>\n <th>Scale</th>\n </tr>\n </thead>\n <tbody>\n {trows}\n </tbody>\n </table>\n </div>\n <div class=\"modal-footer\"> <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Close</button> </div>\n </div> </div> </div>\"\"\"", ".", "format", "(", "tid", "=", "table_id", ",", "title", "=", "table_title", ",", "trows", "=", "''", ".", "join", "(", "t_modal_headers", ".", "values", "(", ")", ")", ")", "# Save the raw values to a file if requested", "if", "dt", ".", "pconfig", ".", "get", "(", "'save_file'", ")", "is", "True", ":", "fn", "=", "dt", ".", "pconfig", ".", "get", "(", "'raw_data_fn'", ",", "'multiqc_{}'", ".", "format", "(", "table_id", ")", ")", "util_functions", ".", "write_data_file", "(", "dt", ".", "raw_vals", ",", "fn", ")", "report", ".", "saved_raw_data", "[", "fn", "]", "=", "dt", ".", "raw_vals", "return", "html" ]
Build the HTML needed for a MultiQC table. :param data: MultiQC datatable object
[ "Build", "the", "HTML", "needed", "for", "a", "MultiQC", "table", ".", ":", "param", "data", ":", "MultiQC", "datatable", "object" ]
python
train
46.204082
itamarst/crochet
crochet/_eventloop.py
https://github.com/itamarst/crochet/blob/ecfc22cefa90f3dfbafa71883c1470e7294f2b6d/crochet/_eventloop.py#L365-L375
def _common_setup(self): """ The minimal amount of setup done by both setup() and no_setup(). """ self._started = True self._reactor = self._reactorFactory() self._registry = ResultRegistry() # We want to unblock EventualResult regardless of how the reactor is # run, so we always register this: self._reactor.addSystemEventTrigger( "before", "shutdown", self._registry.stop)
[ "def", "_common_setup", "(", "self", ")", ":", "self", ".", "_started", "=", "True", "self", ".", "_reactor", "=", "self", ".", "_reactorFactory", "(", ")", "self", ".", "_registry", "=", "ResultRegistry", "(", ")", "# We want to unblock EventualResult regardless of how the reactor is", "# run, so we always register this:", "self", ".", "_reactor", ".", "addSystemEventTrigger", "(", "\"before\"", ",", "\"shutdown\"", ",", "self", ".", "_registry", ".", "stop", ")" ]
The minimal amount of setup done by both setup() and no_setup().
[ "The", "minimal", "amount", "of", "setup", "done", "by", "both", "setup", "()", "and", "no_setup", "()", "." ]
python
train
40.818182
geophysics-ubonn/crtomo_tools
src/cr_trig_create.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/src/cr_trig_create.py#L218-L287
def read_char_lengths(self, filename, electrode_filename): """Read characteristic lengths from the given file. The file is expected to have either 1 or 4 entries/lines with characteristic lengths > 0 (floats). If only one value is encountered, it is used for all four entities. If four values are encountered, they are assigned, in order, to: 1) electrode nodes 2) boundary nodes 3) nodes from extra lines 4) nodes from extra nodes Note that in case one node belongs to multiple entities, the smallest characteristic length will be used. If four values are used and the electrode length is negative, then the electrode positions will be read in (todo: we open the electrode.dat file two times here...) and the minimal distance between all electrodes will be multiplied by the absolute value of the imported value, and used as the characteristic length: .. math:: l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from file}| The function scipy.spatial.distance.pdist is used to compute the global minimal distance between any two electrodes. It is advisable to only used values in the range [-1, 0) for the automatic char length option. """ if os.path.isfile(filename): data = np.atleast_1d(np.loadtxt(filename)) if data.size == 4: characteristic_length = data # check sign of first (electrode) length value if characteristic_length[0] < 0: try: elec_positions = np.loadtxt(electrode_filename) except: raise IOError( 'The was an error opening the electrode file') import scipy.spatial.distance distances = scipy.spatial.distance.pdist(elec_positions) characteristic_length[0] = min(distances) * np.abs( characteristic_length[0]) if characteristic_length[0] == 0: raise Exception( 'Error computing electrode ' + 'distances (got a minimal distance of zero') else: characteristic_length = np.ones(4) * data[0] else: characteristic_length = np.ones(4) if np.any(characteristic_length <= 0): raise Exception('No negative characteristic lengths allowed ' + '(except for electrode length') self.char_lengths = {} for key, item in zip(('electrode', 'boundary', 'extra_line', 'extra_node'), characteristic_length): self.char_lengths[key] = item
[ "def", "read_char_lengths", "(", "self", ",", "filename", ",", "electrode_filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "data", "=", "np", ".", "atleast_1d", "(", "np", ".", "loadtxt", "(", "filename", ")", ")", "if", "data", ".", "size", "==", "4", ":", "characteristic_length", "=", "data", "# check sign of first (electrode) length value", "if", "characteristic_length", "[", "0", "]", "<", "0", ":", "try", ":", "elec_positions", "=", "np", ".", "loadtxt", "(", "electrode_filename", ")", "except", ":", "raise", "IOError", "(", "'The was an error opening the electrode file'", ")", "import", "scipy", ".", "spatial", ".", "distance", "distances", "=", "scipy", ".", "spatial", ".", "distance", ".", "pdist", "(", "elec_positions", ")", "characteristic_length", "[", "0", "]", "=", "min", "(", "distances", ")", "*", "np", ".", "abs", "(", "characteristic_length", "[", "0", "]", ")", "if", "characteristic_length", "[", "0", "]", "==", "0", ":", "raise", "Exception", "(", "'Error computing electrode '", "+", "'distances (got a minimal distance of zero'", ")", "else", ":", "characteristic_length", "=", "np", ".", "ones", "(", "4", ")", "*", "data", "[", "0", "]", "else", ":", "characteristic_length", "=", "np", ".", "ones", "(", "4", ")", "if", "np", ".", "any", "(", "characteristic_length", "<=", "0", ")", ":", "raise", "Exception", "(", "'No negative characteristic lengths allowed '", "+", "'(except for electrode length'", ")", "self", ".", "char_lengths", "=", "{", "}", "for", "key", ",", "item", "in", "zip", "(", "(", "'electrode'", ",", "'boundary'", ",", "'extra_line'", ",", "'extra_node'", ")", ",", "characteristic_length", ")", ":", "self", ".", "char_lengths", "[", "key", "]", "=", "item" ]
Read characteristic lengths from the given file. The file is expected to have either 1 or 4 entries/lines with characteristic lengths > 0 (floats). If only one value is encountered, it is used for all four entities. If four values are encountered, they are assigned, in order, to: 1) electrode nodes 2) boundary nodes 3) nodes from extra lines 4) nodes from extra nodes Note that in case one node belongs to multiple entities, the smallest characteristic length will be used. If four values are used and the electrode length is negative, then the electrode positions will be read in (todo: we open the electrode.dat file two times here...) and the minimal distance between all electrodes will be multiplied by the absolute value of the imported value, and used as the characteristic length: .. math:: l_{electrodes} = min(pdist(electrodes)) * |l_{electrodes}^{from file}| The function scipy.spatial.distance.pdist is used to compute the global minimal distance between any two electrodes. It is advisable to only used values in the range [-1, 0) for the automatic char length option.
[ "Read", "characteristic", "lengths", "from", "the", "given", "file", "." ]
python
train
41.8
campbellr/smashrun-client
smashrun/client.py
https://github.com/campbellr/smashrun-client/blob/2522cb4d0545cf482a49a9533f12aac94c5aecdc/smashrun/client.py#L76-L102
def get_activities(self, count=10, since=None, style='summary', limit=None): """Iterate over all activities, from newest to oldest. :param count: The number of results to retrieve per page. If set to ``None``, pagination is disabled. :param since: Return only activities since this date. Can be either a timestamp or a datetime object. :param style: The type of records to return. May be one of 'summary', 'briefs', 'ids', or 'extended'. :param limit: The maximum number of activities to return for the given query. """ params = {} if since: params.update(fromDate=to_timestamp(since)) parts = ['my', 'activities', 'search'] if style != 'summary': parts.append(style) url = self._build_url(*parts) # TODO: return an Activity (or ActivitySummary?) class that can do # things like convert date and time fields to proper datetime objects return islice(self._iter(url, count, **params), limit)
[ "def", "get_activities", "(", "self", ",", "count", "=", "10", ",", "since", "=", "None", ",", "style", "=", "'summary'", ",", "limit", "=", "None", ")", ":", "params", "=", "{", "}", "if", "since", ":", "params", ".", "update", "(", "fromDate", "=", "to_timestamp", "(", "since", ")", ")", "parts", "=", "[", "'my'", ",", "'activities'", ",", "'search'", "]", "if", "style", "!=", "'summary'", ":", "parts", ".", "append", "(", "style", ")", "url", "=", "self", ".", "_build_url", "(", "*", "parts", ")", "# TODO: return an Activity (or ActivitySummary?) class that can do", "# things like convert date and time fields to proper datetime objects", "return", "islice", "(", "self", ".", "_iter", "(", "url", ",", "count", ",", "*", "*", "params", ")", ",", "limit", ")" ]
Iterate over all activities, from newest to oldest. :param count: The number of results to retrieve per page. If set to ``None``, pagination is disabled. :param since: Return only activities since this date. Can be either a timestamp or a datetime object. :param style: The type of records to return. May be one of 'summary', 'briefs', 'ids', or 'extended'. :param limit: The maximum number of activities to return for the given query.
[ "Iterate", "over", "all", "activities", "from", "newest", "to", "oldest", "." ]
python
train
41.296296
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidgetdelegate.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidgetdelegate.py#L217-L251
def drawDisplay(self, painter, option, rect, text): """ Overloads the drawDisplay method to render HTML if the rich text \ information is set to true. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> text | <str> """ if self.showRichText(): # create the document doc = QtGui.QTextDocument() doc.setTextWidth(float(rect.width())) doc.setHtml(text) # draw the contents painter.translate(rect.x(), rect.y()) doc.drawContents(painter, QtCore.QRectF(0, 0, float(rect.width()), float(rect.height()))) painter.translate(-rect.x(), -rect.y()) else: if type(text).__name__ not in ('str', 'unicode', 'QString'): text = nativestring(text) metrics = QtGui.QFontMetrics(option.font) text = metrics.elidedText(text, QtCore.Qt.TextElideMode(option.textElideMode), rect.width()) painter.setFont(option.font) painter.drawText(rect, int(option.displayAlignment), text)
[ "def", "drawDisplay", "(", "self", ",", "painter", ",", "option", ",", "rect", ",", "text", ")", ":", "if", "self", ".", "showRichText", "(", ")", ":", "# create the document\r", "doc", "=", "QtGui", ".", "QTextDocument", "(", ")", "doc", ".", "setTextWidth", "(", "float", "(", "rect", ".", "width", "(", ")", ")", ")", "doc", ".", "setHtml", "(", "text", ")", "# draw the contents\r", "painter", ".", "translate", "(", "rect", ".", "x", "(", ")", ",", "rect", ".", "y", "(", ")", ")", "doc", ".", "drawContents", "(", "painter", ",", "QtCore", ".", "QRectF", "(", "0", ",", "0", ",", "float", "(", "rect", ".", "width", "(", ")", ")", ",", "float", "(", "rect", ".", "height", "(", ")", ")", ")", ")", "painter", ".", "translate", "(", "-", "rect", ".", "x", "(", ")", ",", "-", "rect", ".", "y", "(", ")", ")", "else", ":", "if", "type", "(", "text", ")", ".", "__name__", "not", "in", "(", "'str'", ",", "'unicode'", ",", "'QString'", ")", ":", "text", "=", "nativestring", "(", "text", ")", "metrics", "=", "QtGui", ".", "QFontMetrics", "(", "option", ".", "font", ")", "text", "=", "metrics", ".", "elidedText", "(", "text", ",", "QtCore", ".", "Qt", ".", "TextElideMode", "(", "option", ".", "textElideMode", ")", ",", "rect", ".", "width", "(", ")", ")", "painter", ".", "setFont", "(", "option", ".", "font", ")", "painter", ".", "drawText", "(", "rect", ",", "int", "(", "option", ".", "displayAlignment", ")", ",", "text", ")" ]
Overloads the drawDisplay method to render HTML if the rich text \ information is set to true. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> text | <str>
[ "Overloads", "the", "drawDisplay", "method", "to", "render", "HTML", "if", "the", "rich", "text", "\\", "information", "is", "set", "to", "true", ".", ":", "param", "painter", "|", "<QtGui", ".", "QPainter", ">", "option", "|", "<QtGui", ".", "QStyleOptionItem", ">", "rect", "|", "<QtCore", ".", "QRect", ">", "text", "|", "<str", ">" ]
python
train
43.428571
gamechanger/confluent_schema_registry_client
confluent_schema_registry_client/__init__.py
https://github.com/gamechanger/confluent_schema_registry_client/blob/ac9196e366724eeb2f19f1a169fd2f9a0c8d68ae/confluent_schema_registry_client/__init__.py#L68-L75
def get_subject_version_ids(self, subject): """ Return the list of schema version ids which have been registered under the given subject. """ res = requests.get(self._url('/subjects/{}/versions', subject)) raise_if_failed(res) return res.json()
[ "def", "get_subject_version_ids", "(", "self", ",", "subject", ")", ":", "res", "=", "requests", ".", "get", "(", "self", ".", "_url", "(", "'/subjects/{}/versions'", ",", "subject", ")", ")", "raise_if_failed", "(", "res", ")", "return", "res", ".", "json", "(", ")" ]
Return the list of schema version ids which have been registered under the given subject.
[ "Return", "the", "list", "of", "schema", "version", "ids", "which", "have", "been", "registered", "under", "the", "given", "subject", "." ]
python
train
36.625