repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
mayfield/shellish
shellish/command/command.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/command.py#L262-L268
def depth(self, value): """ Update ourself and any of our subcommands. """ for command in self.subcommands.values(): command.depth = value + 1 del command.argparser._defaults[self.arg_label_fmt % self._depth] command.argparser._defaults[self.arg_label_fmt % value] = command self._depth = value
[ "def", "depth", "(", "self", ",", "value", ")", ":", "for", "command", "in", "self", ".", "subcommands", ".", "values", "(", ")", ":", "command", ".", "depth", "=", "value", "+", "1", "del", "command", ".", "argparser", ".", "_defaults", "[", "self", ".", "arg_label_fmt", "%", "self", ".", "_depth", "]", "command", ".", "argparser", ".", "_defaults", "[", "self", ".", "arg_label_fmt", "%", "value", "]", "=", "command", "self", ".", "_depth", "=", "value" ]
Update ourself and any of our subcommands.
[ "Update", "ourself", "and", "any", "of", "our", "subcommands", "." ]
python
train
49.714286
Azure/msrest-for-python
msrest/universal_http/__init__.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/universal_http/__init__.py#L161-L192
def load(self, filepath): # type: (str) -> None """Load configuration from existing file. :param str filepath: Path to existing config file. :raises: ValueError if supplied config file is invalid. """ try: self._config.read(filepath) import ast self.connection.timeout = \ self._config.getint("Connection", "timeout") self.connection.verify = \ self._config.getboolean("Connection", "verify") self.connection.cert = \ self._config.get("Connection", "cert") self.proxies.proxies = \ ast.literal_eval(self._config.get("Proxies", "proxies")) self.proxies.use_env_settings = \ self._config.getboolean("Proxies", "env_settings") self.redirect_policy.allow = \ self._config.getboolean("RedirectPolicy", "allow") self.redirect_policy.max_redirects = \ self._config.getint("RedirectPolicy", "max_redirects") except (ValueError, EnvironmentError, NoOptionError): error = "Supplied config file incompatible." raise_with_traceback(ValueError, error) finally: self._clear_config()
[ "def", "load", "(", "self", ",", "filepath", ")", ":", "# type: (str) -> None", "try", ":", "self", ".", "_config", ".", "read", "(", "filepath", ")", "import", "ast", "self", ".", "connection", ".", "timeout", "=", "self", ".", "_config", ".", "getint", "(", "\"Connection\"", ",", "\"timeout\"", ")", "self", ".", "connection", ".", "verify", "=", "self", ".", "_config", ".", "getboolean", "(", "\"Connection\"", ",", "\"verify\"", ")", "self", ".", "connection", ".", "cert", "=", "self", ".", "_config", ".", "get", "(", "\"Connection\"", ",", "\"cert\"", ")", "self", ".", "proxies", ".", "proxies", "=", "ast", ".", "literal_eval", "(", "self", ".", "_config", ".", "get", "(", "\"Proxies\"", ",", "\"proxies\"", ")", ")", "self", ".", "proxies", ".", "use_env_settings", "=", "self", ".", "_config", ".", "getboolean", "(", "\"Proxies\"", ",", "\"env_settings\"", ")", "self", ".", "redirect_policy", ".", "allow", "=", "self", ".", "_config", ".", "getboolean", "(", "\"RedirectPolicy\"", ",", "\"allow\"", ")", "self", ".", "redirect_policy", ".", "max_redirects", "=", "self", ".", "_config", ".", "getint", "(", "\"RedirectPolicy\"", ",", "\"max_redirects\"", ")", "except", "(", "ValueError", ",", "EnvironmentError", ",", "NoOptionError", ")", ":", "error", "=", "\"Supplied config file incompatible.\"", "raise_with_traceback", "(", "ValueError", ",", "error", ")", "finally", ":", "self", ".", "_clear_config", "(", ")" ]
Load configuration from existing file. :param str filepath: Path to existing config file. :raises: ValueError if supplied config file is invalid.
[ "Load", "configuration", "from", "existing", "file", "." ]
python
train
39.40625
numenta/nupic
src/nupic/swarming/permutations_runner.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/permutations_runner.py#L1743-L1757
def queryModelIDs(self): """Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: ---------------------------------------------------------------------- retval: A sequence of Nupic modelIDs """ jobID = self.getJobID() modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID) modelIDs = tuple(x[0] for x in modelCounterPairs) return modelIDs
[ "def", "queryModelIDs", "(", "self", ")", ":", "jobID", "=", "self", ".", "getJobID", "(", ")", "modelCounterPairs", "=", "_clientJobsDB", "(", ")", ".", "modelsGetUpdateCounters", "(", "jobID", ")", "modelIDs", "=", "tuple", "(", "x", "[", "0", "]", "for", "x", "in", "modelCounterPairs", ")", "return", "modelIDs" ]
Queuries DB for model IDs of all currently instantiated models associated with this HyperSearch job. See also: _iterModels() Parameters: ---------------------------------------------------------------------- retval: A sequence of Nupic modelIDs
[ "Queuries", "DB", "for", "model", "IDs", "of", "all", "currently", "instantiated", "models", "associated", "with", "this", "HyperSearch", "job", "." ]
python
valid
31.6
kakwa/ldapcherry
ldapcherry/__init__.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L992-L1006
def checkppolicy(self, **params): """ search user page """ self._check_auth(must_admin=False, redir_login=False) keys = list(params.keys()) if len(keys) != 1: cherrypy.response.status = 400 return "bad argument" password = params[keys[0]] is_admin = self._check_admin() ret = self._checkppolicy(password) if ret['match']: cherrypy.response.status = 200 else: cherrypy.response.status = 200 return json.dumps(ret, separators=(',', ':'))
[ "def", "checkppolicy", "(", "self", ",", "*", "*", "params", ")", ":", "self", ".", "_check_auth", "(", "must_admin", "=", "False", ",", "redir_login", "=", "False", ")", "keys", "=", "list", "(", "params", ".", "keys", "(", ")", ")", "if", "len", "(", "keys", ")", "!=", "1", ":", "cherrypy", ".", "response", ".", "status", "=", "400", "return", "\"bad argument\"", "password", "=", "params", "[", "keys", "[", "0", "]", "]", "is_admin", "=", "self", ".", "_check_admin", "(", ")", "ret", "=", "self", ".", "_checkppolicy", "(", "password", ")", "if", "ret", "[", "'match'", "]", ":", "cherrypy", ".", "response", ".", "status", "=", "200", "else", ":", "cherrypy", ".", "response", ".", "status", "=", "200", "return", "json", ".", "dumps", "(", "ret", ",", "separators", "=", "(", "','", ",", "':'", ")", ")" ]
search user page
[ "search", "user", "page" ]
python
train
36.6
juiceinc/recipe
recipe/core.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/core.py#L233-L248
def metrics(self, *metrics): """ Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list """ for m in metrics: self._cauldron.use(self._shelf.find(m, Metric)) self.dirty = True return self
[ "def", "metrics", "(", "self", ",", "*", "metrics", ")", ":", "for", "m", "in", "metrics", ":", "self", ".", "_cauldron", ".", "use", "(", "self", ".", "_shelf", ".", "find", "(", "m", ",", "Metric", ")", ")", "self", ".", "dirty", "=", "True", "return", "self" ]
Add a list of Metric ingredients to the query. These can either be Metric objects or strings representing metrics on the shelf. The Metric expression will be added to the query's select statement. The metric value is a property of each row of the result. :param metrics: Metrics to add to the recipe. Metrics can either be keys on the ``shelf`` or Metric objects :type metrics: list
[ "Add", "a", "list", "of", "Metric", "ingredients", "to", "the", "query", ".", "These", "can", "either", "be", "Metric", "objects", "or", "strings", "representing", "metrics", "on", "the", "shelf", "." ]
python
train
40.25
learningequality/ricecooker
ricecooker/managers/progress.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/managers/progress.py#L75-L82
def get_restore_path(self, status=None): """ get_restore_path: get path to restoration file Args: status (str): step to get restore file (optional) Returns: string path to restoration file """ status = self.get_status() if status is None else status return config.get_restore_path(status.name.lower())
[ "def", "get_restore_path", "(", "self", ",", "status", "=", "None", ")", ":", "status", "=", "self", ".", "get_status", "(", ")", "if", "status", "is", "None", "else", "status", "return", "config", ".", "get_restore_path", "(", "status", ".", "name", ".", "lower", "(", ")", ")" ]
get_restore_path: get path to restoration file Args: status (str): step to get restore file (optional) Returns: string path to restoration file
[ "get_restore_path", ":", "get", "path", "to", "restoration", "file", "Args", ":", "status", "(", "str", ")", ":", "step", "to", "get", "restore", "file", "(", "optional", ")", "Returns", ":", "string", "path", "to", "restoration", "file" ]
python
train
45.75
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/custom_objects_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/custom_objects_api.py#L1859-L1883
def patch_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501 """patch_cluster_custom_object_status # noqa: E501 partially update status of the specified cluster scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param UNKNOWN_BASE_TYPE body: (required) :return: object If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501 else: (data) = self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501 return data
[ "def", "patch_cluster_custom_object_status", "(", "self", ",", "group", ",", "version", ",", "plural", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_cluster_custom_object_status_with_http_info", "(", "group", ",", "version", ",", "plural", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "patch_cluster_custom_object_status_with_http_info", "(", "group", ",", "version", ",", "plural", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
patch_cluster_custom_object_status # noqa: E501 partially update status of the specified cluster scoped custom object # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param UNKNOWN_BASE_TYPE body: (required) :return: object If the method is called asynchronously, returns the request thread.
[ "patch_cluster_custom_object_status", "#", "noqa", ":", "E501" ]
python
train
57.72
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-core/ask_sdk_core/attributes_manager.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-core/ask_sdk_core/attributes_manager.py#L185-L202
def persistent_attributes(self, persistent_attributes): # type: (Dict[str, object]) -> None """Overwrites and caches the persistent attributes value. Note that the persistent attributes will not be saved to persistence layer until the save_persistent_attributes method is called. :param persistent_attributes: attributes in persistence layer :type persistent_attributes: Dict[str, object] :raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException` if trying to set persistent attributes without persistence adapter """ if not self._persistence_adapter: raise AttributesManagerException( "Cannot set PersistentAttributes without persistence adapter!") self._persistence_attributes = persistent_attributes self._persistent_attributes_set = True
[ "def", "persistent_attributes", "(", "self", ",", "persistent_attributes", ")", ":", "# type: (Dict[str, object]) -> None", "if", "not", "self", ".", "_persistence_adapter", ":", "raise", "AttributesManagerException", "(", "\"Cannot set PersistentAttributes without persistence adapter!\"", ")", "self", ".", "_persistence_attributes", "=", "persistent_attributes", "self", ".", "_persistent_attributes_set", "=", "True" ]
Overwrites and caches the persistent attributes value. Note that the persistent attributes will not be saved to persistence layer until the save_persistent_attributes method is called. :param persistent_attributes: attributes in persistence layer :type persistent_attributes: Dict[str, object] :raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException` if trying to set persistent attributes without persistence adapter
[ "Overwrites", "and", "caches", "the", "persistent", "attributes", "value", "." ]
python
train
48.666667
domainaware/parsedmarc
parsedmarc/__init__.py
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/__init__.py#L409-L516
def parsed_aggregate_reports_to_csv(reports): """ Converts one or more parsed aggregate reports to flat CSV format, including headers Args: reports: A parsed aggregate report or list of parsed aggregate reports Returns: str: Parsed aggregate report data in flat CSV format, including headers """ def to_str(obj): return str(obj).lower() fields = ["xml_schema", "org_name", "org_email", "org_extra_contact_info", "report_id", "begin_date", "end_date", "errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo", "source_ip_address", "source_country", "source_reverse_dns", "source_base_domain", "count", "disposition", "dkim_alignment", "spf_alignment", "policy_override_reasons", "policy_override_comments", "envelope_from", "header_from", "envelope_to", "dkim_domains", "dkim_selectors", "dkim_results", "spf_domains", "spf_scopes", "spf_results"] csv_file_object = StringIO(newline="\n") writer = DictWriter(csv_file_object, fields) writer.writeheader() if type(reports) == OrderedDict: reports = [reports] for report in reports: xml_schema = report["xml_schema"] org_name = report["report_metadata"]["org_name"] org_email = report["report_metadata"]["org_email"] org_extra_contact = report["report_metadata"]["org_extra_contact_info"] report_id = report["report_metadata"]["report_id"] begin_date = report["report_metadata"]["begin_date"] end_date = report["report_metadata"]["end_date"] errors = "|".join(report["report_metadata"]["errors"]) domain = report["policy_published"]["domain"] adkim = report["policy_published"]["adkim"] aspf = report["policy_published"]["aspf"] p = report["policy_published"]["p"] sp = report["policy_published"]["sp"] pct = report["policy_published"]["pct"] fo = report["policy_published"]["fo"] report_dict = dict(xml_schema=xml_schema, org_name=org_name, org_email=org_email, org_extra_contact_info=org_extra_contact, report_id=report_id, begin_date=begin_date, end_date=end_date, errors=errors, domain=domain, adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo) for record in report["records"]: row = report_dict row["source_ip_address"] = record["source"]["ip_address"] row["source_country"] = record["source"]["country"] row["source_reverse_dns"] = record["source"]["reverse_dns"] row["source_base_domain"] = record["source"]["base_domain"] row["count"] = record["count"] row["disposition"] = record["policy_evaluated"]["disposition"] row["spf_alignment"] = record["policy_evaluated"]["spf"] row["dkim_alignment"] = record["policy_evaluated"]["dkim"] policy_override_reasons = list(map( lambda r: r["type"], record["policy_evaluated"] ["policy_override_reasons"])) policy_override_comments = list(map( lambda r: r["comment"] or "none", record["policy_evaluated"] ["policy_override_reasons"])) row["policy_override_reasons"] = ",".join( policy_override_reasons) row["policy_override_comments"] = "|".join( policy_override_comments) row["envelope_from"] = record["identifiers"]["envelope_from"] row["header_from"] = record["identifiers"]["header_from"] envelope_to = record["identifiers"]["envelope_to"] row["envelope_to"] = envelope_to dkim_domains = [] dkim_selectors = [] dkim_results = [] for dkim_result in record["auth_results"]["dkim"]: dkim_domains.append(dkim_result["domain"]) if "selector" in dkim_result: dkim_selectors.append(dkim_result["selector"]) dkim_results.append(dkim_result["result"]) row["dkim_domains"] = ",".join(map(to_str, dkim_domains)) row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors)) row["dkim_results"] = ",".join(map(to_str, dkim_results)) spf_domains = [] spf_scopes = [] spf_results = [] for spf_result in record["auth_results"]["spf"]: spf_domains.append(spf_result["domain"]) spf_scopes.append(spf_result["scope"]) spf_results.append(spf_result["result"]) row["spf_domains"] = ",".join(map(to_str, spf_domains)) row["spf_scopes"] = ",".join(map(to_str, spf_scopes)) row["spf_results"] = ",".join(map(to_str, dkim_results)) writer.writerow(row) csv_file_object.flush() return csv_file_object.getvalue()
[ "def", "parsed_aggregate_reports_to_csv", "(", "reports", ")", ":", "def", "to_str", "(", "obj", ")", ":", "return", "str", "(", "obj", ")", ".", "lower", "(", ")", "fields", "=", "[", "\"xml_schema\"", ",", "\"org_name\"", ",", "\"org_email\"", ",", "\"org_extra_contact_info\"", ",", "\"report_id\"", ",", "\"begin_date\"", ",", "\"end_date\"", ",", "\"errors\"", ",", "\"domain\"", ",", "\"adkim\"", ",", "\"aspf\"", ",", "\"p\"", ",", "\"sp\"", ",", "\"pct\"", ",", "\"fo\"", ",", "\"source_ip_address\"", ",", "\"source_country\"", ",", "\"source_reverse_dns\"", ",", "\"source_base_domain\"", ",", "\"count\"", ",", "\"disposition\"", ",", "\"dkim_alignment\"", ",", "\"spf_alignment\"", ",", "\"policy_override_reasons\"", ",", "\"policy_override_comments\"", ",", "\"envelope_from\"", ",", "\"header_from\"", ",", "\"envelope_to\"", ",", "\"dkim_domains\"", ",", "\"dkim_selectors\"", ",", "\"dkim_results\"", ",", "\"spf_domains\"", ",", "\"spf_scopes\"", ",", "\"spf_results\"", "]", "csv_file_object", "=", "StringIO", "(", "newline", "=", "\"\\n\"", ")", "writer", "=", "DictWriter", "(", "csv_file_object", ",", "fields", ")", "writer", ".", "writeheader", "(", ")", "if", "type", "(", "reports", ")", "==", "OrderedDict", ":", "reports", "=", "[", "reports", "]", "for", "report", "in", "reports", ":", "xml_schema", "=", "report", "[", "\"xml_schema\"", "]", "org_name", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"org_name\"", "]", "org_email", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"org_email\"", "]", "org_extra_contact", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"org_extra_contact_info\"", "]", "report_id", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"report_id\"", "]", "begin_date", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"begin_date\"", "]", "end_date", "=", "report", "[", "\"report_metadata\"", "]", "[", "\"end_date\"", "]", "errors", "=", "\"|\"", ".", "join", "(", "report", "[", "\"report_metadata\"", "]", "[", "\"errors\"", "]", ")", "domain", "=", "report", "[", "\"policy_published\"", "]", "[", "\"domain\"", "]", "adkim", "=", "report", "[", "\"policy_published\"", "]", "[", "\"adkim\"", "]", "aspf", "=", "report", "[", "\"policy_published\"", "]", "[", "\"aspf\"", "]", "p", "=", "report", "[", "\"policy_published\"", "]", "[", "\"p\"", "]", "sp", "=", "report", "[", "\"policy_published\"", "]", "[", "\"sp\"", "]", "pct", "=", "report", "[", "\"policy_published\"", "]", "[", "\"pct\"", "]", "fo", "=", "report", "[", "\"policy_published\"", "]", "[", "\"fo\"", "]", "report_dict", "=", "dict", "(", "xml_schema", "=", "xml_schema", ",", "org_name", "=", "org_name", ",", "org_email", "=", "org_email", ",", "org_extra_contact_info", "=", "org_extra_contact", ",", "report_id", "=", "report_id", ",", "begin_date", "=", "begin_date", ",", "end_date", "=", "end_date", ",", "errors", "=", "errors", ",", "domain", "=", "domain", ",", "adkim", "=", "adkim", ",", "aspf", "=", "aspf", ",", "p", "=", "p", ",", "sp", "=", "sp", ",", "pct", "=", "pct", ",", "fo", "=", "fo", ")", "for", "record", "in", "report", "[", "\"records\"", "]", ":", "row", "=", "report_dict", "row", "[", "\"source_ip_address\"", "]", "=", "record", "[", "\"source\"", "]", "[", "\"ip_address\"", "]", "row", "[", "\"source_country\"", "]", "=", "record", "[", "\"source\"", "]", "[", "\"country\"", "]", "row", "[", "\"source_reverse_dns\"", "]", "=", "record", "[", "\"source\"", "]", "[", "\"reverse_dns\"", "]", "row", "[", "\"source_base_domain\"", "]", "=", "record", "[", "\"source\"", "]", "[", "\"base_domain\"", "]", "row", "[", "\"count\"", "]", "=", "record", "[", "\"count\"", "]", "row", "[", "\"disposition\"", "]", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"disposition\"", "]", "row", "[", "\"spf_alignment\"", "]", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"spf\"", "]", "row", "[", "\"dkim_alignment\"", "]", "=", "record", "[", "\"policy_evaluated\"", "]", "[", "\"dkim\"", "]", "policy_override_reasons", "=", "list", "(", "map", "(", "lambda", "r", ":", "r", "[", "\"type\"", "]", ",", "record", "[", "\"policy_evaluated\"", "]", "[", "\"policy_override_reasons\"", "]", ")", ")", "policy_override_comments", "=", "list", "(", "map", "(", "lambda", "r", ":", "r", "[", "\"comment\"", "]", "or", "\"none\"", ",", "record", "[", "\"policy_evaluated\"", "]", "[", "\"policy_override_reasons\"", "]", ")", ")", "row", "[", "\"policy_override_reasons\"", "]", "=", "\",\"", ".", "join", "(", "policy_override_reasons", ")", "row", "[", "\"policy_override_comments\"", "]", "=", "\"|\"", ".", "join", "(", "policy_override_comments", ")", "row", "[", "\"envelope_from\"", "]", "=", "record", "[", "\"identifiers\"", "]", "[", "\"envelope_from\"", "]", "row", "[", "\"header_from\"", "]", "=", "record", "[", "\"identifiers\"", "]", "[", "\"header_from\"", "]", "envelope_to", "=", "record", "[", "\"identifiers\"", "]", "[", "\"envelope_to\"", "]", "row", "[", "\"envelope_to\"", "]", "=", "envelope_to", "dkim_domains", "=", "[", "]", "dkim_selectors", "=", "[", "]", "dkim_results", "=", "[", "]", "for", "dkim_result", "in", "record", "[", "\"auth_results\"", "]", "[", "\"dkim\"", "]", ":", "dkim_domains", ".", "append", "(", "dkim_result", "[", "\"domain\"", "]", ")", "if", "\"selector\"", "in", "dkim_result", ":", "dkim_selectors", ".", "append", "(", "dkim_result", "[", "\"selector\"", "]", ")", "dkim_results", ".", "append", "(", "dkim_result", "[", "\"result\"", "]", ")", "row", "[", "\"dkim_domains\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "dkim_domains", ")", ")", "row", "[", "\"dkim_selectors\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "dkim_selectors", ")", ")", "row", "[", "\"dkim_results\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "dkim_results", ")", ")", "spf_domains", "=", "[", "]", "spf_scopes", "=", "[", "]", "spf_results", "=", "[", "]", "for", "spf_result", "in", "record", "[", "\"auth_results\"", "]", "[", "\"spf\"", "]", ":", "spf_domains", ".", "append", "(", "spf_result", "[", "\"domain\"", "]", ")", "spf_scopes", ".", "append", "(", "spf_result", "[", "\"scope\"", "]", ")", "spf_results", ".", "append", "(", "spf_result", "[", "\"result\"", "]", ")", "row", "[", "\"spf_domains\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "spf_domains", ")", ")", "row", "[", "\"spf_scopes\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "spf_scopes", ")", ")", "row", "[", "\"spf_results\"", "]", "=", "\",\"", ".", "join", "(", "map", "(", "to_str", ",", "dkim_results", ")", ")", "writer", ".", "writerow", "(", "row", ")", "csv_file_object", ".", "flush", "(", ")", "return", "csv_file_object", ".", "getvalue", "(", ")" ]
Converts one or more parsed aggregate reports to flat CSV format, including headers Args: reports: A parsed aggregate report or list of parsed aggregate reports Returns: str: Parsed aggregate report data in flat CSV format, including headers
[ "Converts", "one", "or", "more", "parsed", "aggregate", "reports", "to", "flat", "CSV", "format", "including", "headers" ]
python
test
46.324074
dlintott/gns3-converter
gns3converter/topology.py
https://github.com/dlintott/gns3-converter/blob/acbc55da51de86388dc5b5f6da55809b3c86b7ca/gns3converter/topology.py#L500-L515
def get_vboxes(self): """ Get the maximum ID of the VBoxes :return: Maximum VBox ID :rtype: int """ vbox_list = [] vbox_max = None for node in self.nodes: if node['type'] == 'VirtualBoxVM': vbox_list.append(node['vbox_id']) if len(vbox_list) > 0: vbox_max = max(vbox_list) return vbox_max
[ "def", "get_vboxes", "(", "self", ")", ":", "vbox_list", "=", "[", "]", "vbox_max", "=", "None", "for", "node", "in", "self", ".", "nodes", ":", "if", "node", "[", "'type'", "]", "==", "'VirtualBoxVM'", ":", "vbox_list", ".", "append", "(", "node", "[", "'vbox_id'", "]", ")", "if", "len", "(", "vbox_list", ")", ">", "0", ":", "vbox_max", "=", "max", "(", "vbox_list", ")", "return", "vbox_max" ]
Get the maximum ID of the VBoxes :return: Maximum VBox ID :rtype: int
[ "Get", "the", "maximum", "ID", "of", "the", "VBoxes" ]
python
train
24.6875
briney/abutils
abutils/core/lineage.py
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/core/lineage.py#L291-L308
def verify_light_chains(self, threshold=0.9): ''' Clusters the light chains to identify potentially spurious (non-lineage) pairings. Following clustering, all pairs in the largest light chain cluster are assumed to be correctly paired. For each of those pairs, the <verified> attribute is set to True. For pairs not in the largest light chain cluster, the <verified> attribute is set to False. Inputs (optional) ----------------- threshold: CD-HIT clustering threshold. Default is 0.9. ''' lseqs = [l.light for l in self.lights] clusters = cluster(lseqs, threshold=threshold) clusters.sort(key=lambda x: x.size, reverse=True) verified_ids = clusters[0].ids for p in self.lights: p.verified = True if p.name in verified_ids else False
[ "def", "verify_light_chains", "(", "self", ",", "threshold", "=", "0.9", ")", ":", "lseqs", "=", "[", "l", ".", "light", "for", "l", "in", "self", ".", "lights", "]", "clusters", "=", "cluster", "(", "lseqs", ",", "threshold", "=", "threshold", ")", "clusters", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "size", ",", "reverse", "=", "True", ")", "verified_ids", "=", "clusters", "[", "0", "]", ".", "ids", "for", "p", "in", "self", ".", "lights", ":", "p", ".", "verified", "=", "True", "if", "p", ".", "name", "in", "verified_ids", "else", "False" ]
Clusters the light chains to identify potentially spurious (non-lineage) pairings. Following clustering, all pairs in the largest light chain cluster are assumed to be correctly paired. For each of those pairs, the <verified> attribute is set to True. For pairs not in the largest light chain cluster, the <verified> attribute is set to False. Inputs (optional) ----------------- threshold: CD-HIT clustering threshold. Default is 0.9.
[ "Clusters", "the", "light", "chains", "to", "identify", "potentially", "spurious", "(", "non", "-", "lineage", ")", "pairings", ".", "Following", "clustering", "all", "pairs", "in", "the", "largest", "light", "chain", "cluster", "are", "assumed", "to", "be", "correctly", "paired", ".", "For", "each", "of", "those", "pairs", "the", "<verified", ">", "attribute", "is", "set", "to", "True", ".", "For", "pairs", "not", "in", "the", "largest", "light", "chain", "cluster", "the", "<verified", ">", "attribute", "is", "set", "to", "False", "." ]
python
train
47.166667
martinpitt/python-dbusmock
dbusmock/templates/upower.py
https://github.com/martinpitt/python-dbusmock/blob/26f65f78bc0ed347233f699a8d6ee0e6880e7eb0/dbusmock/templates/upower.py#L127-L157
def AddDischargingBattery(self, device_name, model_name, percentage, seconds_to_empty): '''Convenience method to add a discharging battery object You have to specify a device name which must be a valid part of an object path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and the seconds until the battery is empty. Please note that this does not set any global properties such as "on-battery". Returns the new object path. ''' path = '/org/freedesktop/UPower/devices/' + device_name self.AddObject(path, DEVICE_IFACE, { 'PowerSupply': dbus.Boolean(True, variant_level=1), 'IsPresent': dbus.Boolean(True, variant_level=1), 'Model': dbus.String(model_name, variant_level=1), 'Percentage': dbus.Double(percentage, variant_level=1), 'TimeToEmpty': dbus.Int64(seconds_to_empty, variant_level=1), 'EnergyFull': dbus.Double(100.0, variant_level=1), 'Energy': dbus.Double(percentage, variant_level=1), # UP_DEVICE_STATE_DISCHARGING 'State': dbus.UInt32(2, variant_level=1), # UP_DEVICE_KIND_BATTERY 'Type': dbus.UInt32(2, variant_level=1), }, []) self.EmitSignal(MAIN_IFACE, 'DeviceAdded', self.device_sig_type, [path]) return path
[ "def", "AddDischargingBattery", "(", "self", ",", "device_name", ",", "model_name", ",", "percentage", ",", "seconds_to_empty", ")", ":", "path", "=", "'/org/freedesktop/UPower/devices/'", "+", "device_name", "self", ".", "AddObject", "(", "path", ",", "DEVICE_IFACE", ",", "{", "'PowerSupply'", ":", "dbus", ".", "Boolean", "(", "True", ",", "variant_level", "=", "1", ")", ",", "'IsPresent'", ":", "dbus", ".", "Boolean", "(", "True", ",", "variant_level", "=", "1", ")", ",", "'Model'", ":", "dbus", ".", "String", "(", "model_name", ",", "variant_level", "=", "1", ")", ",", "'Percentage'", ":", "dbus", ".", "Double", "(", "percentage", ",", "variant_level", "=", "1", ")", ",", "'TimeToEmpty'", ":", "dbus", ".", "Int64", "(", "seconds_to_empty", ",", "variant_level", "=", "1", ")", ",", "'EnergyFull'", ":", "dbus", ".", "Double", "(", "100.0", ",", "variant_level", "=", "1", ")", ",", "'Energy'", ":", "dbus", ".", "Double", "(", "percentage", ",", "variant_level", "=", "1", ")", ",", "# UP_DEVICE_STATE_DISCHARGING", "'State'", ":", "dbus", ".", "UInt32", "(", "2", ",", "variant_level", "=", "1", ")", ",", "# UP_DEVICE_KIND_BATTERY", "'Type'", ":", "dbus", ".", "UInt32", "(", "2", ",", "variant_level", "=", "1", ")", ",", "}", ",", "[", "]", ")", "self", ".", "EmitSignal", "(", "MAIN_IFACE", ",", "'DeviceAdded'", ",", "self", ".", "device_sig_type", ",", "[", "path", "]", ")", "return", "path" ]
Convenience method to add a discharging battery object You have to specify a device name which must be a valid part of an object path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and the seconds until the battery is empty. Please note that this does not set any global properties such as "on-battery". Returns the new object path.
[ "Convenience", "method", "to", "add", "a", "discharging", "battery", "object" ]
python
train
48.16129
robotools/fontParts
Lib/fontParts/base/contour.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/contour.py#L697-L718
def appendBPoint(self, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None): """ Append a bPoint to the contour. """ if bPoint is not None: if type is None: type = bPoint.type if anchor is None: anchor = bPoint.anchor if bcpIn is None: bcpIn = bPoint.bcpIn if bcpOut is None: bcpOut = bPoint.bcpOut type = normalizers.normalizeBPointType(type) anchor = normalizers.normalizeCoordinateTuple(anchor) if bcpIn is None: bcpIn = (0, 0) bcpIn = normalizers.normalizeCoordinateTuple(bcpIn) if bcpOut is None: bcpOut = (0, 0) bcpOut = normalizers.normalizeCoordinateTuple(bcpOut) self._appendBPoint(type, anchor, bcpIn=bcpIn, bcpOut=bcpOut)
[ "def", "appendBPoint", "(", "self", ",", "type", "=", "None", ",", "anchor", "=", "None", ",", "bcpIn", "=", "None", ",", "bcpOut", "=", "None", ",", "bPoint", "=", "None", ")", ":", "if", "bPoint", "is", "not", "None", ":", "if", "type", "is", "None", ":", "type", "=", "bPoint", ".", "type", "if", "anchor", "is", "None", ":", "anchor", "=", "bPoint", ".", "anchor", "if", "bcpIn", "is", "None", ":", "bcpIn", "=", "bPoint", ".", "bcpIn", "if", "bcpOut", "is", "None", ":", "bcpOut", "=", "bPoint", ".", "bcpOut", "type", "=", "normalizers", ".", "normalizeBPointType", "(", "type", ")", "anchor", "=", "normalizers", ".", "normalizeCoordinateTuple", "(", "anchor", ")", "if", "bcpIn", "is", "None", ":", "bcpIn", "=", "(", "0", ",", "0", ")", "bcpIn", "=", "normalizers", ".", "normalizeCoordinateTuple", "(", "bcpIn", ")", "if", "bcpOut", "is", "None", ":", "bcpOut", "=", "(", "0", ",", "0", ")", "bcpOut", "=", "normalizers", ".", "normalizeCoordinateTuple", "(", "bcpOut", ")", "self", ".", "_appendBPoint", "(", "type", ",", "anchor", ",", "bcpIn", "=", "bcpIn", ",", "bcpOut", "=", "bcpOut", ")" ]
Append a bPoint to the contour.
[ "Append", "a", "bPoint", "to", "the", "contour", "." ]
python
train
38.363636
marcinmiklitz/pywindow
pywindow/io_tools.py
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/io_tools.py#L193-L231
def dump2json(self, obj, filepath, override=False, **kwargs): """ Dump a dictionary into a JSON dictionary. Uses the json.dump() function. Parameters ---------- obj : :class:`dict` A dictionary to be dumpped as JSON file. filepath : :class:`str` The filepath for the dumped file. override : :class:`bool` If True, any file in the filepath will be override. (default=False) """ # We make sure that the object passed by the user is a dictionary. if isinstance(obj, dict): pass else: raise _NotADictionary( "This function only accepts dictionaries as input") # We check if the filepath has a json extenstion, if not we add it. if str(filepath[-4:]) == 'json': pass else: filepath = ".".join((str(filepath), "json")) # First we check if the file already exists. If yes and the override # keyword is False (default), we will raise an exception. Otherwise # the file will be overwritten. if override is False: if os.path.isfile(filepath): raise _FileAlreadyExists( "The file {0} already exists. Use a different filepath, " "or set the 'override' kwarg to True.".format(filepath)) # We dump the object to the json file. Additional kwargs can be passed. with open(filepath, 'w+') as json_file: json.dump(obj, json_file, **kwargs)
[ "def", "dump2json", "(", "self", ",", "obj", ",", "filepath", ",", "override", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# We make sure that the object passed by the user is a dictionary.", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "pass", "else", ":", "raise", "_NotADictionary", "(", "\"This function only accepts dictionaries as input\"", ")", "# We check if the filepath has a json extenstion, if not we add it.", "if", "str", "(", "filepath", "[", "-", "4", ":", "]", ")", "==", "'json'", ":", "pass", "else", ":", "filepath", "=", "\".\"", ".", "join", "(", "(", "str", "(", "filepath", ")", ",", "\"json\"", ")", ")", "# First we check if the file already exists. If yes and the override", "# keyword is False (default), we will raise an exception. Otherwise", "# the file will be overwritten.", "if", "override", "is", "False", ":", "if", "os", ".", "path", ".", "isfile", "(", "filepath", ")", ":", "raise", "_FileAlreadyExists", "(", "\"The file {0} already exists. Use a different filepath, \"", "\"or set the 'override' kwarg to True.\"", ".", "format", "(", "filepath", ")", ")", "# We dump the object to the json file. Additional kwargs can be passed.", "with", "open", "(", "filepath", ",", "'w+'", ")", "as", "json_file", ":", "json", ".", "dump", "(", "obj", ",", "json_file", ",", "*", "*", "kwargs", ")" ]
Dump a dictionary into a JSON dictionary. Uses the json.dump() function. Parameters ---------- obj : :class:`dict` A dictionary to be dumpped as JSON file. filepath : :class:`str` The filepath for the dumped file. override : :class:`bool` If True, any file in the filepath will be override. (default=False)
[ "Dump", "a", "dictionary", "into", "a", "JSON", "dictionary", "." ]
python
train
39.307692
openstax/cnx-publishing
cnxpublishing/events.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/events.py#L67-L79
def create_pg_notify_event(notif): """A factory for creating a Postgres Notification Event (an object inheriting from `cnxpublishing.events.PGNotifyEvent`) given `notif`, a `psycopg2.extensions.Notify` object. """ # TODO Lookup registered events via getAllUtilitiesRegisteredFor # for class mapping. if notif.channel not in _CHANNEL_MAPPER: cls = _CHANNEL_MAPPER[None] else: cls = _CHANNEL_MAPPER[notif.channel] return cls(notif)
[ "def", "create_pg_notify_event", "(", "notif", ")", ":", "# TODO Lookup registered events via getAllUtilitiesRegisteredFor", "# for class mapping.", "if", "notif", ".", "channel", "not", "in", "_CHANNEL_MAPPER", ":", "cls", "=", "_CHANNEL_MAPPER", "[", "None", "]", "else", ":", "cls", "=", "_CHANNEL_MAPPER", "[", "notif", ".", "channel", "]", "return", "cls", "(", "notif", ")" ]
A factory for creating a Postgres Notification Event (an object inheriting from `cnxpublishing.events.PGNotifyEvent`) given `notif`, a `psycopg2.extensions.Notify` object.
[ "A", "factory", "for", "creating", "a", "Postgres", "Notification", "Event", "(", "an", "object", "inheriting", "from", "cnxpublishing", ".", "events", ".", "PGNotifyEvent", ")", "given", "notif", "a", "psycopg2", ".", "extensions", ".", "Notify", "object", "." ]
python
valid
36.538462
FujiMakoto/AgentML
agentml/common.py
https://github.com/FujiMakoto/AgentML/blob/c8cb64b460d876666bf29ea2c682189874c7c403/agentml/common.py#L102-L124
def int_attribute(element, attribute, default=0): """ Returns the int value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined :type default: int :rtype: int """ attribute_value = element.get(attribute) if attribute_value: try: return int(attribute_value) except (TypeError, ValueError): return default return default
[ "def", "int_attribute", "(", "element", ",", "attribute", ",", "default", "=", "0", ")", ":", "attribute_value", "=", "element", ".", "get", "(", "attribute", ")", "if", "attribute_value", ":", "try", ":", "return", "int", "(", "attribute_value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "default", "return", "default" ]
Returns the int value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined :type default: int :rtype: int
[ "Returns", "the", "int", "value", "of", "an", "attribute", "or", "a", "default", "if", "it", "s", "not", "defined", ":", "param", "element", ":", "The", "XML", "Element", "object", ":", "type", "element", ":", "etree", ".", "_Element" ]
python
train
27.086957
projecthamster/hamster
src/hamster/lib/__init__.py
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/__init__.py#L40-L44
def extract_time(match): """extract time from a time_re match.""" hour = int(match.group('hour')) minute = int(match.group('minute')) return dt.time(hour, minute)
[ "def", "extract_time", "(", "match", ")", ":", "hour", "=", "int", "(", "match", ".", "group", "(", "'hour'", ")", ")", "minute", "=", "int", "(", "match", ".", "group", "(", "'minute'", ")", ")", "return", "dt", ".", "time", "(", "hour", ",", "minute", ")" ]
extract time from a time_re match.
[ "extract", "time", "from", "a", "time_re", "match", "." ]
python
train
34.8
google/grr
grr/core/grr_response_core/stats/default_stats_collector.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/stats/default_stats_collector.py#L190-L192
def RecordEvent(self, metric_name, value, fields=None): """See base class.""" self._event_metrics[metric_name].Record(value, fields)
[ "def", "RecordEvent", "(", "self", ",", "metric_name", ",", "value", ",", "fields", "=", "None", ")", ":", "self", ".", "_event_metrics", "[", "metric_name", "]", ".", "Record", "(", "value", ",", "fields", ")" ]
See base class.
[ "See", "base", "class", "." ]
python
train
46
nteract/bookstore
bookstore/handlers.py
https://github.com/nteract/bookstore/blob/14d90834cc09e211453dbebf8914b9c0819bdcda/bookstore/handlers.py#L54-L68
async def put(self, path=''): """Publish a notebook on a given path. The payload directly matches the contents API for PUT. """ self.log.info("Attempt publishing to %s", path) if path == '' or path == '/': raise web.HTTPError(400, "Must provide a path for publishing") model = self.get_json_body() if model: await self._publish(model, path.lstrip('/')) else: raise web.HTTPError(400, "Cannot publish an empty model")
[ "async", "def", "put", "(", "self", ",", "path", "=", "''", ")", ":", "self", ".", "log", ".", "info", "(", "\"Attempt publishing to %s\"", ",", "path", ")", "if", "path", "==", "''", "or", "path", "==", "'/'", ":", "raise", "web", ".", "HTTPError", "(", "400", ",", "\"Must provide a path for publishing\"", ")", "model", "=", "self", ".", "get_json_body", "(", ")", "if", "model", ":", "await", "self", ".", "_publish", "(", "model", ",", "path", ".", "lstrip", "(", "'/'", ")", ")", "else", ":", "raise", "web", ".", "HTTPError", "(", "400", ",", "\"Cannot publish an empty model\"", ")" ]
Publish a notebook on a given path. The payload directly matches the contents API for PUT.
[ "Publish", "a", "notebook", "on", "a", "given", "path", "." ]
python
train
33.666667
rene-aguirre/pywinusb
pywinusb/hid/core.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/core.py#L692-L702
def find_any_reports(self, usage_page = 0, usage_id = 0): """Find any report type referencing HID usage control/data item. Results are returned in a dictionary mapping report_type to usage lists. """ items = [ (HidP_Input, self.find_input_reports(usage_page, usage_id)), (HidP_Output, self.find_output_reports(usage_page, usage_id)), (HidP_Feature, self.find_feature_reports(usage_page, usage_id)), ] return dict([(t, r) for t, r in items if r])
[ "def", "find_any_reports", "(", "self", ",", "usage_page", "=", "0", ",", "usage_id", "=", "0", ")", ":", "items", "=", "[", "(", "HidP_Input", ",", "self", ".", "find_input_reports", "(", "usage_page", ",", "usage_id", ")", ")", ",", "(", "HidP_Output", ",", "self", ".", "find_output_reports", "(", "usage_page", ",", "usage_id", ")", ")", ",", "(", "HidP_Feature", ",", "self", ".", "find_feature_reports", "(", "usage_page", ",", "usage_id", ")", ")", ",", "]", "return", "dict", "(", "[", "(", "t", ",", "r", ")", "for", "t", ",", "r", "in", "items", "if", "r", "]", ")" ]
Find any report type referencing HID usage control/data item. Results are returned in a dictionary mapping report_type to usage lists.
[ "Find", "any", "report", "type", "referencing", "HID", "usage", "control", "/", "data", "item", ".", "Results", "are", "returned", "in", "a", "dictionary", "mapping", "report_type", "to", "usage", "lists", "." ]
python
train
49.363636
IEMLdev/ieml
ieml/distance/projection.py
https://github.com/IEMLdev/ieml/blob/4c842ba7e6165e2f1b4a4e2e98759f9f33af5f25/ieml/distance/projection.py#L25-L53
def project_usls_on_dictionary(usls, allowed_terms=None): """`usls` is an iterable of usl. return a mapping term -> usl list """ cells_to_usls = defaultdict(set) tables = set() for u in usls: for t in u.objects(Term): for c in t.singular_sequences: # This is the first time we meet the cell c if not cells_to_usls[c]: tables.update(c.relations.contained) cells_to_usls[c].add(u) if allowed_terms: allowed_terms = set(allowed_terms) tables = tables & allowed_terms cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms} tables_to_usls = { table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c])) for table in tables if not isinstance(table, TableSet) } return tables_to_usls
[ "def", "project_usls_on_dictionary", "(", "usls", ",", "allowed_terms", "=", "None", ")", ":", "cells_to_usls", "=", "defaultdict", "(", "set", ")", "tables", "=", "set", "(", ")", "for", "u", "in", "usls", ":", "for", "t", "in", "u", ".", "objects", "(", "Term", ")", ":", "for", "c", "in", "t", ".", "singular_sequences", ":", "# This is the first time we meet the cell c", "if", "not", "cells_to_usls", "[", "c", "]", ":", "tables", ".", "update", "(", "c", ".", "relations", ".", "contained", ")", "cells_to_usls", "[", "c", "]", ".", "add", "(", "u", ")", "if", "allowed_terms", ":", "allowed_terms", "=", "set", "(", "allowed_terms", ")", "tables", "=", "tables", "&", "allowed_terms", "cells_to_usls", "=", "{", "c", ":", "l", "for", "c", ",", "l", "in", "cells_to_usls", ".", "items", "(", ")", "if", "c", "in", "allowed_terms", "}", "tables_to_usls", "=", "{", "table", ":", "list", "(", "set", "(", "u", "for", "c", "in", "table", ".", "singular_sequences", "for", "u", "in", "cells_to_usls", "[", "c", "]", ")", ")", "for", "table", "in", "tables", "if", "not", "isinstance", "(", "table", ",", "TableSet", ")", "}", "return", "tables_to_usls" ]
`usls` is an iterable of usl. return a mapping term -> usl list
[ "usls", "is", "an", "iterable", "of", "usl", "." ]
python
test
30.034483
jaredLunde/vital-tools
vital/debug/__init__.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/debug/__init__.py#L1775-L1801
def count(self, flag_message, padding=None, force=False): """ Log Level: :attr:COUNT @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Total apps").count(3) # Total apps (3) logg().count([0, 1, 2, 3]) # (4) .. """ if self.should_log(self.COUNT) or force: flag_message = flag_message \ if isinstance(flag_message, (int, float)) else \ str(len(flag_message)) self._print_message( flag_message=flag_message, padding=padding, reverse=True, color=colors.timing_color)
[ "def", "count", "(", "self", ",", "flag_message", ",", "padding", "=", "None", ",", "force", "=", "False", ")", ":", "if", "self", ".", "should_log", "(", "self", ".", "COUNT", ")", "or", "force", ":", "flag_message", "=", "flag_message", "if", "isinstance", "(", "flag_message", ",", "(", "int", ",", "float", ")", ")", "else", "str", "(", "len", "(", "flag_message", ")", ")", "self", ".", "_print_message", "(", "flag_message", "=", "flag_message", ",", "padding", "=", "padding", ",", "reverse", "=", "True", ",", "color", "=", "colors", ".", "timing_color", ")" ]
Log Level: :attr:COUNT @flag_message: time-like #float @padding: #str 'top', 'bottom' or 'all', adds a new line to the specified area with :func:padd @force: #bool whether or not to force the message to log in spite of the assigned log level .. from vital.debug import Logg logg = Logg(loglevel="v") logg("Total apps").count(3) # Total apps (3) logg().count([0, 1, 2, 3]) # (4) ..
[ "Log", "Level", ":", ":", "attr", ":", "COUNT" ]
python
train
36.037037
heuer/cablemap
cablemap.nlp/cablemap/nlp/corpus.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.nlp/cablemap/nlp/corpus.py#L65-L76
def add_texts(self, reference_id, texts): """\ Adds the words from the provided iterable `texts` to the corpus. The strings will be tokenized. `reference_id` The reference identifier of the cable. `texts` An iterable of strings. """ self.add_words(reference_id, chain(*(self._tokenize(t) for t in texts)))
[ "def", "add_texts", "(", "self", ",", "reference_id", ",", "texts", ")", ":", "self", ".", "add_words", "(", "reference_id", ",", "chain", "(", "*", "(", "self", ".", "_tokenize", "(", "t", ")", "for", "t", "in", "texts", ")", ")", ")" ]
\ Adds the words from the provided iterable `texts` to the corpus. The strings will be tokenized. `reference_id` The reference identifier of the cable. `texts` An iterable of strings.
[ "\\", "Adds", "the", "words", "from", "the", "provided", "iterable", "texts", "to", "the", "corpus", "." ]
python
train
31.333333
jjgomera/iapws
iapws/humidAir.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/humidAir.py#L729-L761
def _eq(self, T, P): """Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg] """ if T <= 273.16: ice = _Ice(T, P) gw = ice["g"] else: water = IAPWS95(T=T, P=P) gw = water.g def f(parr): rho, a = parr if a > 1: a = 1 fa = self._fav(T, rho, a) muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"] return gw-muw, rho**2*fa["fird"]/1000-P rinput = fsolve(f, [1, 0.95], full_output=True) Asat = rinput[0][1] return Asat
[ "def", "_eq", "(", "self", ",", "T", ",", "P", ")", ":", "if", "T", "<=", "273.16", ":", "ice", "=", "_Ice", "(", "T", ",", "P", ")", "gw", "=", "ice", "[", "\"g\"", "]", "else", ":", "water", "=", "IAPWS95", "(", "T", "=", "T", ",", "P", "=", "P", ")", "gw", "=", "water", ".", "g", "def", "f", "(", "parr", ")", ":", "rho", ",", "a", "=", "parr", "if", "a", ">", "1", ":", "a", "=", "1", "fa", "=", "self", ".", "_fav", "(", "T", ",", "rho", ",", "a", ")", "muw", "=", "fa", "[", "\"fir\"", "]", "+", "rho", "*", "fa", "[", "\"fird\"", "]", "-", "a", "*", "fa", "[", "\"fira\"", "]", "return", "gw", "-", "muw", ",", "rho", "**", "2", "*", "fa", "[", "\"fird\"", "]", "/", "1000", "-", "P", "rinput", "=", "fsolve", "(", "f", ",", "[", "1", ",", "0.95", "]", ",", "full_output", "=", "True", ")", "Asat", "=", "rinput", "[", "0", "]", "[", "1", "]", "return", "Asat" ]
Procedure for calculate the composition in saturation state Parameters ---------- T : float Temperature [K] P : float Pressure [MPa] Returns ------- Asat : float Saturation mass fraction of dry air in humid air [kg/kg]
[ "Procedure", "for", "calculate", "the", "composition", "in", "saturation", "state" ]
python
train
24.939394
twilio/twilio-python
twilio/rest/api/v2010/account/sip/credential_list/credential.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/sip/credential_list/credential.py#L143-L157
def get(self, sid): """ Constructs a CredentialContext :param sid: The unique id that identifies the resource to fetch. :returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext :rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext """ return CredentialContext( self._version, account_sid=self._solution['account_sid'], credential_list_sid=self._solution['credential_list_sid'], sid=sid, )
[ "def", "get", "(", "self", ",", "sid", ")", ":", "return", "CredentialContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", "credential_list_sid", "=", "self", ".", "_solution", "[", "'credential_list_sid'", "]", ",", "sid", "=", "sid", ",", ")" ]
Constructs a CredentialContext :param sid: The unique id that identifies the resource to fetch. :returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext :rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
[ "Constructs", "a", "CredentialContext" ]
python
train
36.866667
westurner/pyrpo
pyrpo/pyrpo.py
https://github.com/westurner/pyrpo/blob/2a910af055dc405b761571a52ef87842397ddadf/pyrpo/pyrpo.py#L1661-L1688
def __last_commit(self): """ Retrieve the most recent commit message (with ``svn info``) Returns: tuple: (datestr, (revno, user, None, desc)) $ svn info Path: . URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python Repository Root: http://python-dlp.googlecode.com/svn Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d Revision: 378 Node Kind: directory Schedule: normal Last Changed Author: chimezie Last Changed Rev: 378 Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011) """ cmd = ['svn', 'info'] op = self.sh(cmd, shell=False) if not op: return None author, rev, datestr = op.split('\n')[7:10] author = author.split(': ', 1)[1].strip() rev = rev.split(': ', 1)[1].strip() datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip() return datestr, (rev, author, None, None)
[ "def", "__last_commit", "(", "self", ")", ":", "cmd", "=", "[", "'svn'", ",", "'info'", "]", "op", "=", "self", ".", "sh", "(", "cmd", ",", "shell", "=", "False", ")", "if", "not", "op", ":", "return", "None", "author", ",", "rev", ",", "datestr", "=", "op", ".", "split", "(", "'\\n'", ")", "[", "7", ":", "10", "]", "author", "=", "author", ".", "split", "(", "': '", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", "rev", "=", "rev", ".", "split", "(", "': '", ",", "1", ")", "[", "1", "]", ".", "strip", "(", ")", "datestr", "=", "datestr", ".", "split", "(", "': '", ",", "1", ")", "[", "1", "]", ".", "split", "(", "'('", ",", "1", ")", "[", "0", "]", ".", "strip", "(", ")", "return", "datestr", ",", "(", "rev", ",", "author", ",", "None", ",", "None", ")" ]
Retrieve the most recent commit message (with ``svn info``) Returns: tuple: (datestr, (revno, user, None, desc)) $ svn info Path: . URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python Repository Root: http://python-dlp.googlecode.com/svn Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d Revision: 378 Node Kind: directory Schedule: normal Last Changed Author: chimezie Last Changed Rev: 378 Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
[ "Retrieve", "the", "most", "recent", "commit", "message", "(", "with", "svn", "info", ")" ]
python
train
35.321429
pulumi/pulumi
sdk/python/lib/pulumi/runtime/rpc.py
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/runtime/rpc.py#L274-L292
def translate_output_properties(res: 'Resource', output: Any) -> Any: """ Recursively rewrite keys of objects returned by the engine to conform with a naming convention specified by the resource's implementation of `translate_output_property`. If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed by recursing. If output is a `list`, every value is recursively transformed. If output is a primitive (i.e. not a dict or list), the value is returned without modification. """ if isinstance(output, dict): return {res.translate_output_property(k): translate_output_properties(res, v) for k, v in output.items()} if isinstance(output, list): return [translate_output_properties(res, v) for v in output] return output
[ "def", "translate_output_properties", "(", "res", ":", "'Resource'", ",", "output", ":", "Any", ")", "->", "Any", ":", "if", "isinstance", "(", "output", ",", "dict", ")", ":", "return", "{", "res", ".", "translate_output_property", "(", "k", ")", ":", "translate_output_properties", "(", "res", ",", "v", ")", "for", "k", ",", "v", "in", "output", ".", "items", "(", ")", "}", "if", "isinstance", "(", "output", ",", "list", ")", ":", "return", "[", "translate_output_properties", "(", "res", ",", "v", ")", "for", "v", "in", "output", "]", "return", "output" ]
Recursively rewrite keys of objects returned by the engine to conform with a naming convention specified by the resource's implementation of `translate_output_property`. If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed by recursing. If output is a `list`, every value is recursively transformed. If output is a primitive (i.e. not a dict or list), the value is returned without modification.
[ "Recursively", "rewrite", "keys", "of", "objects", "returned", "by", "the", "engine", "to", "conform", "with", "a", "naming", "convention", "specified", "by", "the", "resource", "s", "implementation", "of", "translate_output_property", "." ]
python
train
43.157895
concordusapps/python-shield
shield/_registry.py
https://github.com/concordusapps/python-shield/blob/3c08d483eaec1ebaa814e31c7de5daf82234b8f7/shield/_registry.py#L129-L135
def retrieve(self, *args, **kwargs): """Retrieve the permsission function for the provided things. """ lookup, key = self._lookup(*args, **kwargs) return lookup[key]
[ "def", "retrieve", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lookup", ",", "key", "=", "self", ".", "_lookup", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "lookup", "[", "key", "]" ]
Retrieve the permsission function for the provided things.
[ "Retrieve", "the", "permsission", "function", "for", "the", "provided", "things", "." ]
python
train
27.571429
mbodenhamer/syn
syn/base_utils/py.py
https://github.com/mbodenhamer/syn/blob/aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258/syn/base_utils/py.py#L468-L490
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=''): '''For logging exception-raising function invocations during randomized unit tests. ''' from .str import safe_str args = args if args else () kwargs = kwargs if kwargs else {} name = '{}.{}'.format(get_mod(func), name) if name else full_funcname(func) if pretty: invocation = ', '.join([safe_str(arg) for arg in args]) if kwargs: invocation += ', ' invocation += ', '.join(['{}={}'.format(key, safe_str(value)) for key, value in sorted(kwargs.items())]) else: invocation = 'args={}, kwargs={}'.format(safe_str(args), safe_str(kwargs)) msg = '***{}***: "{}" --- {}({})'.format(get_typename(exc), message(exc), name, invocation) elogger.error(msg)
[ "def", "elog", "(", "exc", ",", "func", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "str", "=", "str", ",", "pretty", "=", "True", ",", "name", "=", "''", ")", ":", "from", ".", "str", "import", "safe_str", "args", "=", "args", "if", "args", "else", "(", ")", "kwargs", "=", "kwargs", "if", "kwargs", "else", "{", "}", "name", "=", "'{}.{}'", ".", "format", "(", "get_mod", "(", "func", ")", ",", "name", ")", "if", "name", "else", "full_funcname", "(", "func", ")", "if", "pretty", ":", "invocation", "=", "', '", ".", "join", "(", "[", "safe_str", "(", "arg", ")", "for", "arg", "in", "args", "]", ")", "if", "kwargs", ":", "invocation", "+=", "', '", "invocation", "+=", "', '", ".", "join", "(", "[", "'{}={}'", ".", "format", "(", "key", ",", "safe_str", "(", "value", ")", ")", "for", "key", ",", "value", "in", "sorted", "(", "kwargs", ".", "items", "(", ")", ")", "]", ")", "else", ":", "invocation", "=", "'args={}, kwargs={}'", ".", "format", "(", "safe_str", "(", "args", ")", ",", "safe_str", "(", "kwargs", ")", ")", "msg", "=", "'***{}***: \"{}\" --- {}({})'", ".", "format", "(", "get_typename", "(", "exc", ")", ",", "message", "(", "exc", ")", ",", "name", ",", "invocation", ")", "elogger", ".", "error", "(", "msg", ")" ]
For logging exception-raising function invocations during randomized unit tests.
[ "For", "logging", "exception", "-", "raising", "function", "invocations", "during", "randomized", "unit", "tests", "." ]
python
train
41.826087
cloudendpoints/endpoints-python
endpoints/api_config_manager.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config_manager.py#L53-L77
def process_api_config_response(self, config_json): """Parses a JSON API config and registers methods for dispatch. Side effects: Parses method name, etc. for all methods and updates the indexing data structures with the information. Args: config_json: A dict, the JSON body of the getApiConfigs response. """ with self._config_lock: self._add_discovery_config() for config in config_json.get('items', []): lookup_key = config.get('name', ''), config.get('version', '') self._configs[lookup_key] = config for config in self._configs.itervalues(): name = config.get('name', '') api_version = config.get('api_version', '') path_version = config.get('path_version', '') sorted_methods = self._get_sorted_methods(config.get('methods', {})) for method_name, method in sorted_methods: self._save_rest_method(method_name, name, path_version, method)
[ "def", "process_api_config_response", "(", "self", ",", "config_json", ")", ":", "with", "self", ".", "_config_lock", ":", "self", ".", "_add_discovery_config", "(", ")", "for", "config", "in", "config_json", ".", "get", "(", "'items'", ",", "[", "]", ")", ":", "lookup_key", "=", "config", ".", "get", "(", "'name'", ",", "''", ")", ",", "config", ".", "get", "(", "'version'", ",", "''", ")", "self", ".", "_configs", "[", "lookup_key", "]", "=", "config", "for", "config", "in", "self", ".", "_configs", ".", "itervalues", "(", ")", ":", "name", "=", "config", ".", "get", "(", "'name'", ",", "''", ")", "api_version", "=", "config", ".", "get", "(", "'api_version'", ",", "''", ")", "path_version", "=", "config", ".", "get", "(", "'path_version'", ",", "''", ")", "sorted_methods", "=", "self", ".", "_get_sorted_methods", "(", "config", ".", "get", "(", "'methods'", ",", "{", "}", ")", ")", "for", "method_name", ",", "method", "in", "sorted_methods", ":", "self", ".", "_save_rest_method", "(", "method_name", ",", "name", ",", "path_version", ",", "method", ")" ]
Parses a JSON API config and registers methods for dispatch. Side effects: Parses method name, etc. for all methods and updates the indexing data structures with the information. Args: config_json: A dict, the JSON body of the getApiConfigs response.
[ "Parses", "a", "JSON", "API", "config", "and", "registers", "methods", "for", "dispatch", "." ]
python
train
37.8
budacom/trading-bots
trading_bots/core/utils.py
https://github.com/budacom/trading-bots/blob/8cb68bb8d0b5f822108db1cc5dae336e3d3c3452/trading_bots/core/utils.py#L7-L12
def load_class_by_name(name: str): """Given a dotted path, returns the class""" mod_path, _, cls_name = name.rpartition('.') mod = importlib.import_module(mod_path) cls = getattr(mod, cls_name) return cls
[ "def", "load_class_by_name", "(", "name", ":", "str", ")", ":", "mod_path", ",", "_", ",", "cls_name", "=", "name", ".", "rpartition", "(", "'.'", ")", "mod", "=", "importlib", ".", "import_module", "(", "mod_path", ")", "cls", "=", "getattr", "(", "mod", ",", "cls_name", ")", "return", "cls" ]
Given a dotted path, returns the class
[ "Given", "a", "dotted", "path", "returns", "the", "class" ]
python
train
36.5
gabstopper/smc-python
smc/base/model.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/model.py#L760-L781
def export(self, filename='element.zip'): """ Export this element. Usage:: engine = Engine('myfirewall') extask = engine.export(filename='fooexport.zip') while not extask.done(): extask.wait(3) print("Finished download task: %s" % extask.message()) print("File downloaded to: %s" % extask.filename) :param str filename: filename to store exported element :raises TaskRunFailed: invalid permissions, invalid directory, or this element is a system element and cannot be exported. :return: DownloadTask .. note:: It is not possible to export system elements """ from smc.administration.tasks import Task return Task.download(self, 'export', filename)
[ "def", "export", "(", "self", ",", "filename", "=", "'element.zip'", ")", ":", "from", "smc", ".", "administration", ".", "tasks", "import", "Task", "return", "Task", ".", "download", "(", "self", ",", "'export'", ",", "filename", ")" ]
Export this element. Usage:: engine = Engine('myfirewall') extask = engine.export(filename='fooexport.zip') while not extask.done(): extask.wait(3) print("Finished download task: %s" % extask.message()) print("File downloaded to: %s" % extask.filename) :param str filename: filename to store exported element :raises TaskRunFailed: invalid permissions, invalid directory, or this element is a system element and cannot be exported. :return: DownloadTask .. note:: It is not possible to export system elements
[ "Export", "this", "element", "." ]
python
train
36.636364
sammchardy/python-binance
binance/client.py
https://github.com/sammchardy/python-binance/blob/31c0d0a32f9edd528c6c2c1dd3044d9a34ce43cc/binance/client.py#L1188-L1222
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params): """Send in a new limit buy order Any order with an icebergQty MUST have timeInForce set to GTC. :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param price: required :type price: str :param timeInForce: default Good till cancelled :type timeInForce: str :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param stopPrice: Used with stop orders :type stopPrice: decimal :param icebergQty: Used with iceberg orders :type icebergQty: decimal :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException """ params.update({ 'side': self.SIDE_BUY, }) return self.order_limit(timeInForce=timeInForce, **params)
[ "def", "order_limit_buy", "(", "self", ",", "timeInForce", "=", "TIME_IN_FORCE_GTC", ",", "*", "*", "params", ")", ":", "params", ".", "update", "(", "{", "'side'", ":", "self", ".", "SIDE_BUY", ",", "}", ")", "return", "self", ".", "order_limit", "(", "timeInForce", "=", "timeInForce", ",", "*", "*", "params", ")" ]
Send in a new limit buy order Any order with an icebergQty MUST have timeInForce set to GTC. :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param price: required :type price: str :param timeInForce: default Good till cancelled :type timeInForce: str :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param stopPrice: Used with stop orders :type stopPrice: decimal :param icebergQty: Used with iceberg orders :type icebergQty: decimal :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
[ "Send", "in", "a", "new", "limit", "buy", "order" ]
python
train
41.228571
facelessuser/pyspelling
pyspelling/util/__init__.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/util/__init__.py#L52-L74
def get_process(cmd): """Get a command process.""" if sys.platform.startswith('win'): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW process = subprocess.Popen( cmd, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) else: process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, shell=False ) return process
[ "def", "get_process", "(", "cmd", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "startupinfo", "=", "subprocess", ".", "STARTUPINFO", "(", ")", "startupinfo", ".", "dwFlags", "|=", "subprocess", ".", "STARTF_USESHOWWINDOW", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "startupinfo", "=", "startupinfo", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ")", "else", ":", "process", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "False", ")", "return", "process" ]
Get a command process.
[ "Get", "a", "command", "process", "." ]
python
train
27.826087
jobovy/galpy
galpy/util/bovy_plot.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/util/bovy_plot.py#L688-L750
def bovy_print(fig_width=5,fig_height=5,axes_labelsize=16, text_fontsize=11,legend_fontsize=12, xtick_labelsize=10,ytick_labelsize=10, xtick_minor_size=2,ytick_minor_size=2, xtick_major_size=4,ytick_major_size=4): """ NAME: bovy_print PURPOSE: setup a figure for plotting INPUT: fig_width - width in inches fig_height - height in inches axes_labelsize - size of the axis-labels text_fontsize - font-size of the text (if any) legend_fontsize - font-size of the legend (if any) xtick_labelsize - size of the x-axis labels ytick_labelsize - size of the y-axis labels xtick_minor_size - size of the minor x-ticks ytick_minor_size - size of the minor y-ticks OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU) """ fig_size = [fig_width,fig_height] params = {'axes.labelsize': axes_labelsize, 'font.size': text_fontsize, 'legend.fontsize': legend_fontsize, 'xtick.labelsize':xtick_labelsize, 'ytick.labelsize':ytick_labelsize, 'text.usetex': True, 'figure.figsize': fig_size, 'xtick.major.size' : xtick_major_size, 'ytick.major.size' : ytick_major_size, 'xtick.minor.size' : xtick_minor_size, 'ytick.minor.size' : ytick_minor_size, 'legend.numpoints':1, 'xtick.top': True, 'xtick.direction': 'in', 'ytick.right': True, 'ytick.direction': 'in'} pyplot.rcParams.update(params) rc('text.latex', preamble=r'\usepackage{amsmath}'+'\n' +r'\usepackage{amssymb}')
[ "def", "bovy_print", "(", "fig_width", "=", "5", ",", "fig_height", "=", "5", ",", "axes_labelsize", "=", "16", ",", "text_fontsize", "=", "11", ",", "legend_fontsize", "=", "12", ",", "xtick_labelsize", "=", "10", ",", "ytick_labelsize", "=", "10", ",", "xtick_minor_size", "=", "2", ",", "ytick_minor_size", "=", "2", ",", "xtick_major_size", "=", "4", ",", "ytick_major_size", "=", "4", ")", ":", "fig_size", "=", "[", "fig_width", ",", "fig_height", "]", "params", "=", "{", "'axes.labelsize'", ":", "axes_labelsize", ",", "'font.size'", ":", "text_fontsize", ",", "'legend.fontsize'", ":", "legend_fontsize", ",", "'xtick.labelsize'", ":", "xtick_labelsize", ",", "'ytick.labelsize'", ":", "ytick_labelsize", ",", "'text.usetex'", ":", "True", ",", "'figure.figsize'", ":", "fig_size", ",", "'xtick.major.size'", ":", "xtick_major_size", ",", "'ytick.major.size'", ":", "ytick_major_size", ",", "'xtick.minor.size'", ":", "xtick_minor_size", ",", "'ytick.minor.size'", ":", "ytick_minor_size", ",", "'legend.numpoints'", ":", "1", ",", "'xtick.top'", ":", "True", ",", "'xtick.direction'", ":", "'in'", ",", "'ytick.right'", ":", "True", ",", "'ytick.direction'", ":", "'in'", "}", "pyplot", ".", "rcParams", ".", "update", "(", "params", ")", "rc", "(", "'text.latex'", ",", "preamble", "=", "r'\\usepackage{amsmath}'", "+", "'\\n'", "+", "r'\\usepackage{amssymb}'", ")" ]
NAME: bovy_print PURPOSE: setup a figure for plotting INPUT: fig_width - width in inches fig_height - height in inches axes_labelsize - size of the axis-labels text_fontsize - font-size of the text (if any) legend_fontsize - font-size of the legend (if any) xtick_labelsize - size of the x-axis labels ytick_labelsize - size of the y-axis labels xtick_minor_size - size of the minor x-ticks ytick_minor_size - size of the minor y-ticks OUTPUT: (none) HISTORY: 2009-12-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
27.380952
sorgerlab/indra
indra/preassembler/grounding_mapper.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/preassembler/grounding_mapper.py#L592-L614
def save_base_map(filename, grouped_by_text): """Dump a list of agents along with groundings and counts into a csv file Parameters ---------- filename : str Filepath for output file grouped_by_text : list of tuple List of tuples of the form output by agent_texts_with_grounding """ rows = [] for group in grouped_by_text: text_string = group[0] for db, db_id, count in group[1]: if db == 'UP': name = uniprot_client.get_mnemonic(db_id) else: name = '' row = [text_string, db, db_id, count, name] rows.append(row) write_unicode_csv(filename, rows, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
[ "def", "save_base_map", "(", "filename", ",", "grouped_by_text", ")", ":", "rows", "=", "[", "]", "for", "group", "in", "grouped_by_text", ":", "text_string", "=", "group", "[", "0", "]", "for", "db", ",", "db_id", ",", "count", "in", "group", "[", "1", "]", ":", "if", "db", "==", "'UP'", ":", "name", "=", "uniprot_client", ".", "get_mnemonic", "(", "db_id", ")", "else", ":", "name", "=", "''", "row", "=", "[", "text_string", ",", "db", ",", "db_id", ",", "count", ",", "name", "]", "rows", ".", "append", "(", "row", ")", "write_unicode_csv", "(", "filename", ",", "rows", ",", "delimiter", "=", "','", ",", "quotechar", "=", "'\"'", ",", "quoting", "=", "csv", ".", "QUOTE_MINIMAL", ",", "lineterminator", "=", "'\\r\\n'", ")" ]
Dump a list of agents along with groundings and counts into a csv file Parameters ---------- filename : str Filepath for output file grouped_by_text : list of tuple List of tuples of the form output by agent_texts_with_grounding
[ "Dump", "a", "list", "of", "agents", "along", "with", "groundings", "and", "counts", "into", "a", "csv", "file" ]
python
train
33.73913
hannes-brt/hebel
hebel/pycuda_ops/cudart.py
https://github.com/hannes-brt/hebel/blob/1e2c3a9309c2646103901b26a55be4e312dd5005/hebel/pycuda_ops/cudart.py#L600-L620
def cudaMemcpy_dtoh(dst, src, count): """ Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy. """ status = _libcudart.cudaMemcpy(dst, src, ctypes.c_size_t(count), cudaMemcpyDeviceToHost) cudaCheckStatus(status)
[ "def", "cudaMemcpy_dtoh", "(", "dst", ",", "src", ",", "count", ")", ":", "status", "=", "_libcudart", ".", "cudaMemcpy", "(", "dst", ",", "src", ",", "ctypes", ".", "c_size_t", "(", "count", ")", ",", "cudaMemcpyDeviceToHost", ")", "cudaCheckStatus", "(", "status", ")" ]
Copy memory from device to host. Copy data from device memory to host memory. Parameters ---------- dst : ctypes pointer Host memory pointer. src : ctypes pointer Device memory pointer. count : int Number of bytes to copy.
[ "Copy", "memory", "from", "device", "to", "host", "." ]
python
train
23.952381
Autodesk/pyccc
pyccc/python.py
https://github.com/Autodesk/pyccc/blob/011698e78d49a83ac332e0578a4a2a865b75ef8d/pyccc/python.py#L278-L290
def run(self, func=None): """ Evaluates the packaged function as func(*self.args,**self.kwargs) If func is a method of an object, it's accessed as getattr(self.obj,__name__). If it's a user-defined function, it needs to be passed in here because it can't be serialized. Returns: object: function's return value """ to_run = self.prepare_namespace(func) result = to_run(*self.args, **self.kwargs) return result
[ "def", "run", "(", "self", ",", "func", "=", "None", ")", ":", "to_run", "=", "self", ".", "prepare_namespace", "(", "func", ")", "result", "=", "to_run", "(", "*", "self", ".", "args", ",", "*", "*", "self", ".", "kwargs", ")", "return", "result" ]
Evaluates the packaged function as func(*self.args,**self.kwargs) If func is a method of an object, it's accessed as getattr(self.obj,__name__). If it's a user-defined function, it needs to be passed in here because it can't be serialized. Returns: object: function's return value
[ "Evaluates", "the", "packaged", "function", "as", "func", "(", "*", "self", ".", "args", "**", "self", ".", "kwargs", ")", "If", "func", "is", "a", "method", "of", "an", "object", "it", "s", "accessed", "as", "getattr", "(", "self", ".", "obj", "__name__", ")", ".", "If", "it", "s", "a", "user", "-", "defined", "function", "it", "needs", "to", "be", "passed", "in", "here", "because", "it", "can", "t", "be", "serialized", "." ]
python
train
37.692308
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/context.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/context.py#L225-L236
def add_ref(self, name, ref): """ Add a reference for the backend object that gives access to the low level context. Used in vispy.app.canvas.backends. The given name must match with that of previously added references. """ if self._name is None: self._name = name elif name != self._name: raise RuntimeError('Contexts can only share between backends of ' 'the same type') self._refs.append(weakref.ref(ref))
[ "def", "add_ref", "(", "self", ",", "name", ",", "ref", ")", ":", "if", "self", ".", "_name", "is", "None", ":", "self", ".", "_name", "=", "name", "elif", "name", "!=", "self", ".", "_name", ":", "raise", "RuntimeError", "(", "'Contexts can only share between backends of '", "'the same type'", ")", "self", ".", "_refs", ".", "append", "(", "weakref", ".", "ref", "(", "ref", ")", ")" ]
Add a reference for the backend object that gives access to the low level context. Used in vispy.app.canvas.backends. The given name must match with that of previously added references.
[ "Add", "a", "reference", "for", "the", "backend", "object", "that", "gives", "access", "to", "the", "low", "level", "context", ".", "Used", "in", "vispy", ".", "app", ".", "canvas", ".", "backends", ".", "The", "given", "name", "must", "match", "with", "that", "of", "previously", "added", "references", "." ]
python
train
43
bhmm/bhmm
bhmm/estimators/bayesian_sampling.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/bayesian_sampling.py#L337-L369
def _updateTransitionMatrix(self): """ Updates the hidden-state transition matrix and the initial distribution """ # TRANSITION MATRIX C = self.model.count_matrix() + self.prior_C # posterior count matrix # check if we work with these options if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True): raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n ' + str(C) + '\nUse prior to ensure connectivity or use reversible=False.') # ensure consistent sparsity pattern (P0 might have additional zeros because of underflows) # TODO: these steps work around a bug in msmtools. Should be fixed there P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False) zeros = np.where(P0 + P0.T == 0) C[zeros] = 0 # run sampler Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps, reversible=self.reversible) # INITIAL DISTRIBUTION if self.stationary: # p0 is consistent with P p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C) else: n0 = self.model.count_init().astype(float) first_timestep_counts_with_prior = n0 + self.prior_n0 positive = first_timestep_counts_with_prior > 0 p0 = np.zeros_like(n0) p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior # update HMM with new sample self.model.update(p0, Tij)
[ "def", "_updateTransitionMatrix", "(", "self", ")", ":", "# TRANSITION MATRIX", "C", "=", "self", ".", "model", ".", "count_matrix", "(", ")", "+", "self", ".", "prior_C", "# posterior count matrix", "# check if we work with these options", "if", "self", ".", "reversible", "and", "not", "_tmatrix_disconnected", ".", "is_connected", "(", "C", ",", "strong", "=", "True", ")", ":", "raise", "NotImplementedError", "(", "'Encountered disconnected count matrix with sampling option reversible:\\n '", "+", "str", "(", "C", ")", "+", "'\\nUse prior to ensure connectivity or use reversible=False.'", ")", "# ensure consistent sparsity pattern (P0 might have additional zeros because of underflows)", "# TODO: these steps work around a bug in msmtools. Should be fixed there", "P0", "=", "msmest", ".", "transition_matrix", "(", "C", ",", "reversible", "=", "self", ".", "reversible", ",", "maxiter", "=", "10000", ",", "warn_not_converged", "=", "False", ")", "zeros", "=", "np", ".", "where", "(", "P0", "+", "P0", ".", "T", "==", "0", ")", "C", "[", "zeros", "]", "=", "0", "# run sampler", "Tij", "=", "msmest", ".", "sample_tmatrix", "(", "C", ",", "nsample", "=", "1", ",", "nsteps", "=", "self", ".", "transition_matrix_sampling_steps", ",", "reversible", "=", "self", ".", "reversible", ")", "# INITIAL DISTRIBUTION", "if", "self", ".", "stationary", ":", "# p0 is consistent with P", "p0", "=", "_tmatrix_disconnected", ".", "stationary_distribution", "(", "Tij", ",", "C", "=", "C", ")", "else", ":", "n0", "=", "self", ".", "model", ".", "count_init", "(", ")", ".", "astype", "(", "float", ")", "first_timestep_counts_with_prior", "=", "n0", "+", "self", ".", "prior_n0", "positive", "=", "first_timestep_counts_with_prior", ">", "0", "p0", "=", "np", ".", "zeros_like", "(", "n0", ")", "p0", "[", "positive", "]", "=", "np", ".", "random", ".", "dirichlet", "(", "first_timestep_counts_with_prior", "[", "positive", "]", ")", "# sample p0 from posterior", "# update HMM with new sample", "self", ".", "model", ".", "update", "(", "p0", ",", "Tij", ")" ]
Updates the hidden-state transition matrix and the initial distribution
[ "Updates", "the", "hidden", "-", "state", "transition", "matrix", "and", "the", "initial", "distribution" ]
python
train
51.242424
OpenMath/py-openmath
openmath/helpers.py
https://github.com/OpenMath/py-openmath/blob/4906aa9ccf606f533675c28823772e07c30fd220/openmath/helpers.py#L289-L307
def convertAsOpenMath(term, converter): """ Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method """ # if we already have openmath, or have some of our magic helpers, use interpretAsOpenMath if hasattr(term, "_ishelper") and term._ishelper or isinstance(term, om.OMAny): return interpretAsOpenMath(term) # next try to convert using the converter if converter is not None: try: _converted = converter.to_openmath(term) except Exception as e: _converted = None if isinstance(_converted, om.OMAny): return _converted # fallback to the openmath helper return interpretAsOpenMath(term)
[ "def", "convertAsOpenMath", "(", "term", ",", "converter", ")", ":", "# if we already have openmath, or have some of our magic helpers, use interpretAsOpenMath", "if", "hasattr", "(", "term", ",", "\"_ishelper\"", ")", "and", "term", ".", "_ishelper", "or", "isinstance", "(", "term", ",", "om", ".", "OMAny", ")", ":", "return", "interpretAsOpenMath", "(", "term", ")", "# next try to convert using the converter", "if", "converter", "is", "not", "None", ":", "try", ":", "_converted", "=", "converter", ".", "to_openmath", "(", "term", ")", "except", "Exception", "as", "e", ":", "_converted", "=", "None", "if", "isinstance", "(", "_converted", ",", "om", ".", "OMAny", ")", ":", "return", "_converted", "# fallback to the openmath helper", "return", "interpretAsOpenMath", "(", "term", ")" ]
Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method
[ "Converts", "a", "term", "into", "OpenMath", "using", "either", "a", "converter", "or", "the", "interpretAsOpenMath", "method" ]
python
test
38.052632
StorjOld/heartbeat
heartbeat/Merkle/Merkle.py
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/Merkle/Merkle.py#L75-L83
def fromdict(dict): """Takes a dictionary as an argument and returns a new Challenge object from the dictionary. :param dict: the dictionary to convert """ seed = hb_decode(dict['seed']) index = dict['index'] return Challenge(seed, index)
[ "def", "fromdict", "(", "dict", ")", ":", "seed", "=", "hb_decode", "(", "dict", "[", "'seed'", "]", ")", "index", "=", "dict", "[", "'index'", "]", "return", "Challenge", "(", "seed", ",", "index", ")" ]
Takes a dictionary as an argument and returns a new Challenge object from the dictionary. :param dict: the dictionary to convert
[ "Takes", "a", "dictionary", "as", "an", "argument", "and", "returns", "a", "new", "Challenge", "object", "from", "the", "dictionary", "." ]
python
train
31.888889
sangoma/pysensu
pysensu/api.py
https://github.com/sangoma/pysensu/blob/dc6799edbf2635247aec61fcf45b04ddec1beb49/pysensu/api.py#L88-L93
def get_client_data(self, client): """ Returns a client. """ data = self._request('GET', '/clients/{}'.format(client)) return data.json()
[ "def", "get_client_data", "(", "self", ",", "client", ")", ":", "data", "=", "self", ".", "_request", "(", "'GET'", ",", "'/clients/{}'", ".", "format", "(", "client", ")", ")", "return", "data", ".", "json", "(", ")" ]
Returns a client.
[ "Returns", "a", "client", "." ]
python
train
28.666667
clalancette/pycdlib
pycdlib/rockridge.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L3151-L3197
def add_entry(self, length): # type: (int) -> int ''' Add a new entry to this Rock Ridge Continuation Block. This method attempts to find a gap that fits the new length anywhere within this Continuation Block. If successful, it returns the offset at which it placed this entry. If unsuccessful, it returns None. Parameters: length - The length of the entry to find a gap for. Returns: The offset the entry was placed at, or None if no gap was found. ''' offset = -1 # Need to find a gap for index, entry in enumerate(self._entries): if index == 0: if entry.offset != 0 and length <= entry.offset: # We can put it at the beginning! offset = 0 break else: lastentry = self._entries[index - 1] lastend = lastentry.offset + lastentry.length - 1 gapsize = entry.offset - lastend - 1 if gapsize >= length: # We found a spot for it! offset = lastend + 1 break else: # We reached the end without finding a gap for it. Look at the last # entry and see if there is room at the end. if self._entries: lastentry = self._entries[-1] lastend = lastentry.offset + lastentry.length - 1 left = self._max_block_size - lastend - 1 if left >= length: offset = lastend + 1 else: if self._max_block_size >= length: offset = 0 if offset >= 0: bisect.insort_left(self._entries, RockRidgeContinuationEntry(offset, length)) return offset
[ "def", "add_entry", "(", "self", ",", "length", ")", ":", "# type: (int) -> int", "offset", "=", "-", "1", "# Need to find a gap", "for", "index", ",", "entry", "in", "enumerate", "(", "self", ".", "_entries", ")", ":", "if", "index", "==", "0", ":", "if", "entry", ".", "offset", "!=", "0", "and", "length", "<=", "entry", ".", "offset", ":", "# We can put it at the beginning!", "offset", "=", "0", "break", "else", ":", "lastentry", "=", "self", ".", "_entries", "[", "index", "-", "1", "]", "lastend", "=", "lastentry", ".", "offset", "+", "lastentry", ".", "length", "-", "1", "gapsize", "=", "entry", ".", "offset", "-", "lastend", "-", "1", "if", "gapsize", ">=", "length", ":", "# We found a spot for it!", "offset", "=", "lastend", "+", "1", "break", "else", ":", "# We reached the end without finding a gap for it. Look at the last", "# entry and see if there is room at the end.", "if", "self", ".", "_entries", ":", "lastentry", "=", "self", ".", "_entries", "[", "-", "1", "]", "lastend", "=", "lastentry", ".", "offset", "+", "lastentry", ".", "length", "-", "1", "left", "=", "self", ".", "_max_block_size", "-", "lastend", "-", "1", "if", "left", ">=", "length", ":", "offset", "=", "lastend", "+", "1", "else", ":", "if", "self", ".", "_max_block_size", ">=", "length", ":", "offset", "=", "0", "if", "offset", ">=", "0", ":", "bisect", ".", "insort_left", "(", "self", ".", "_entries", ",", "RockRidgeContinuationEntry", "(", "offset", ",", "length", ")", ")", "return", "offset" ]
Add a new entry to this Rock Ridge Continuation Block. This method attempts to find a gap that fits the new length anywhere within this Continuation Block. If successful, it returns the offset at which it placed this entry. If unsuccessful, it returns None. Parameters: length - The length of the entry to find a gap for. Returns: The offset the entry was placed at, or None if no gap was found.
[ "Add", "a", "new", "entry", "to", "this", "Rock", "Ridge", "Continuation", "Block", ".", "This", "method", "attempts", "to", "find", "a", "gap", "that", "fits", "the", "new", "length", "anywhere", "within", "this", "Continuation", "Block", ".", "If", "successful", "it", "returns", "the", "offset", "at", "which", "it", "placed", "this", "entry", ".", "If", "unsuccessful", "it", "returns", "None", "." ]
python
train
39.255319
DeV1doR/aioethereum
aioethereum/management/db.py
https://github.com/DeV1doR/aioethereum/blob/85eb46550d862b3ccc309914ea871ca1c7b42157/aioethereum/management/db.py#L29-L37
def db_putHex(self, db_name, key, value): """https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex DEPRECATED """ warnings.warn('deprecated', DeprecationWarning) if not value.startswith('0x'): value = add_0x(value) return (yield from self.rpc_call('db_putHex', [db_name, key, value]))
[ "def", "db_putHex", "(", "self", ",", "db_name", ",", "key", ",", "value", ")", ":", "warnings", ".", "warn", "(", "'deprecated'", ",", "DeprecationWarning", ")", "if", "not", "value", ".", "startswith", "(", "'0x'", ")", ":", "value", "=", "add_0x", "(", "value", ")", "return", "(", "yield", "from", "self", ".", "rpc_call", "(", "'db_putHex'", ",", "[", "db_name", ",", "key", ",", "value", "]", ")", ")" ]
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex DEPRECATED
[ "https", ":", "//", "github", ".", "com", "/", "ethereum", "/", "wiki", "/", "wiki", "/", "JSON", "-", "RPC#db_puthex" ]
python
train
37.777778
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L445-L454
def visit_comprehension(self, node, parent): """visit a Comprehension node by returning a fresh instance of it""" newnode = nodes.Comprehension(parent) newnode.postinit( self.visit(node.target, newnode), self.visit(node.iter, newnode), [self.visit(child, newnode) for child in node.ifs], getattr(node, "is_async", None), ) return newnode
[ "def", "visit_comprehension", "(", "self", ",", "node", ",", "parent", ")", ":", "newnode", "=", "nodes", ".", "Comprehension", "(", "parent", ")", "newnode", ".", "postinit", "(", "self", ".", "visit", "(", "node", ".", "target", ",", "newnode", ")", ",", "self", ".", "visit", "(", "node", ".", "iter", ",", "newnode", ")", ",", "[", "self", ".", "visit", "(", "child", ",", "newnode", ")", "for", "child", "in", "node", ".", "ifs", "]", ",", "getattr", "(", "node", ",", "\"is_async\"", ",", "None", ")", ",", ")", "return", "newnode" ]
visit a Comprehension node by returning a fresh instance of it
[ "visit", "a", "Comprehension", "node", "by", "returning", "a", "fresh", "instance", "of", "it" ]
python
train
41.6
idlesign/django-sitemessage
sitemessage/toolbox.py
https://github.com/idlesign/django-sitemessage/blob/25b179b798370354c5988042ec209e255d23793f/sitemessage/toolbox.py#L89-L122
def check_undelivered(to=None): """Sends a notification email if any undelivered dispatches. Returns undelivered (failed) dispatches count. :param str|unicode to: Recipient address. If not set Django ADMINS setting is used. :rtype: int """ failed_count = Dispatch.objects.filter(dispatch_status=Dispatch.DISPATCH_STATUS_FAILED).count() if failed_count: from sitemessage.shortcuts import schedule_email from sitemessage.messages.email import EmailTextMessage if to is None: admins = settings.ADMINS if admins: to = list(dict(admins).values()) if to: priority = 999 register_message_types(EmailTextMessage) schedule_email( 'You have %s undelivered dispatch(es) at %s' % (failed_count, get_site_url()), subject='[SITEMESSAGE] Undelivered dispatches', to=to, priority=priority) send_scheduled_messages(priority=priority) return failed_count
[ "def", "check_undelivered", "(", "to", "=", "None", ")", ":", "failed_count", "=", "Dispatch", ".", "objects", ".", "filter", "(", "dispatch_status", "=", "Dispatch", ".", "DISPATCH_STATUS_FAILED", ")", ".", "count", "(", ")", "if", "failed_count", ":", "from", "sitemessage", ".", "shortcuts", "import", "schedule_email", "from", "sitemessage", ".", "messages", ".", "email", "import", "EmailTextMessage", "if", "to", "is", "None", ":", "admins", "=", "settings", ".", "ADMINS", "if", "admins", ":", "to", "=", "list", "(", "dict", "(", "admins", ")", ".", "values", "(", ")", ")", "if", "to", ":", "priority", "=", "999", "register_message_types", "(", "EmailTextMessage", ")", "schedule_email", "(", "'You have %s undelivered dispatch(es) at %s'", "%", "(", "failed_count", ",", "get_site_url", "(", ")", ")", ",", "subject", "=", "'[SITEMESSAGE] Undelivered dispatches'", ",", "to", "=", "to", ",", "priority", "=", "priority", ")", "send_scheduled_messages", "(", "priority", "=", "priority", ")", "return", "failed_count" ]
Sends a notification email if any undelivered dispatches. Returns undelivered (failed) dispatches count. :param str|unicode to: Recipient address. If not set Django ADMINS setting is used. :rtype: int
[ "Sends", "a", "notification", "email", "if", "any", "undelivered", "dispatches", "." ]
python
train
29.823529
ga4gh/ga4gh-server
ga4gh/server/backend.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/backend.py#L910-L917
def runSearchReads(self, request): """ Runs the specified SearchReadsRequest. """ return self.runSearchRequest( request, protocol.SearchReadsRequest, protocol.SearchReadsResponse, self.readsGenerator)
[ "def", "runSearchReads", "(", "self", ",", "request", ")", ":", "return", "self", ".", "runSearchRequest", "(", "request", ",", "protocol", ".", "SearchReadsRequest", ",", "protocol", ".", "SearchReadsResponse", ",", "self", ".", "readsGenerator", ")" ]
Runs the specified SearchReadsRequest.
[ "Runs", "the", "specified", "SearchReadsRequest", "." ]
python
train
32.625
materialsproject/pymatgen
pymatgen/analysis/elasticity/elastic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L155-L161
def g_voigt(self): """ returns the G_v shear modulus """ return (2. * self.voigt[:3, :3].trace() - np.triu(self.voigt[:3, :3]).sum() + 3 * self.voigt[3:, 3:].trace()) / 15.
[ "def", "g_voigt", "(", "self", ")", ":", "return", "(", "2.", "*", "self", ".", "voigt", "[", ":", "3", ",", ":", "3", "]", ".", "trace", "(", ")", "-", "np", ".", "triu", "(", "self", ".", "voigt", "[", ":", "3", ",", ":", "3", "]", ")", ".", "sum", "(", ")", "+", "3", "*", "self", ".", "voigt", "[", "3", ":", ",", "3", ":", "]", ".", "trace", "(", ")", ")", "/", "15." ]
returns the G_v shear modulus
[ "returns", "the", "G_v", "shear", "modulus" ]
python
train
32.857143
bwohlberg/sporco
sporco/admm/cbpdntv.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/cbpdntv.py#L1259-L1269
def cnst_A1(self, X, Xf=None): r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )^T \mathbf{x}`. """ if Xf is None: Xf = sl.rfftn(X, axes=self.cri.axisN) return sl.irfftn(sl.inner( self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv, self.cri.axisN)
[ "def", "cnst_A1", "(", "self", ",", "X", ",", "Xf", "=", "None", ")", ":", "if", "Xf", "is", "None", ":", "Xf", "=", "sl", ".", "rfftn", "(", "X", ",", "axes", "=", "self", ".", "cri", ".", "axisN", ")", "return", "sl", ".", "irfftn", "(", "sl", ".", "inner", "(", "self", ".", "GDf", ",", "Xf", "[", "...", ",", "np", ".", "newaxis", "]", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")" ]
r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\; \Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
[ "r", "Compute", ":", "math", ":", "A_1", "\\", "mathbf", "{", "x", "}", "component", "of", "ADMM", "problem", "constraint", ".", "In", "this", "case", ":", "math", ":", "A_1", "\\", "mathbf", "{", "x", "}", "=", "(", "\\", "Gamma_0^T", "\\", ";", "\\", ";", "\\", "Gamma_1^T", "\\", ";", "\\", ";", "\\", "ldots", ")", "^T", "\\", "mathbf", "{", "x", "}", "." ]
python
train
41
jaraco/keyrings.alt
keyrings/alt/file.py
https://github.com/jaraco/keyrings.alt/blob/5b71223d12bf9ac6abd05b1b395f1efccb5ea660/keyrings/alt/file.py#L47-L54
def _create_cipher(self, password, salt, IV): """ Create the cipher object to encrypt or decrypt a payload. """ from Crypto.Protocol.KDF import PBKDF2 from Crypto.Cipher import AES pw = PBKDF2(password, salt, dkLen=self.block_size) return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
[ "def", "_create_cipher", "(", "self", ",", "password", ",", "salt", ",", "IV", ")", ":", "from", "Crypto", ".", "Protocol", ".", "KDF", "import", "PBKDF2", "from", "Crypto", ".", "Cipher", "import", "AES", "pw", "=", "PBKDF2", "(", "password", ",", "salt", ",", "dkLen", "=", "self", ".", "block_size", ")", "return", "AES", ".", "new", "(", "pw", "[", ":", "self", ".", "block_size", "]", ",", "AES", ".", "MODE_CFB", ",", "IV", ")" ]
Create the cipher object to encrypt or decrypt a payload.
[ "Create", "the", "cipher", "object", "to", "encrypt", "or", "decrypt", "a", "payload", "." ]
python
train
41.875
Scifabric/pbs
helpers.py
https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L348-L362
def find_project_by_short_name(short_name, pbclient, all=None): """Return project by short_name.""" try: response = pbclient.find_project(short_name=short_name, all=all) check_api_error(response) if (len(response) == 0): msg = '%s not found! You can use the all=1 argument to \ search in all the server.' error = 'Project Not Found' raise ProjectNotFound(msg, error) return response[0] except exceptions.ConnectionError: raise except ProjectNotFound: raise
[ "def", "find_project_by_short_name", "(", "short_name", ",", "pbclient", ",", "all", "=", "None", ")", ":", "try", ":", "response", "=", "pbclient", ".", "find_project", "(", "short_name", "=", "short_name", ",", "all", "=", "all", ")", "check_api_error", "(", "response", ")", "if", "(", "len", "(", "response", ")", "==", "0", ")", ":", "msg", "=", "'%s not found! You can use the all=1 argument to \\\n search in all the server.'", "error", "=", "'Project Not Found'", "raise", "ProjectNotFound", "(", "msg", ",", "error", ")", "return", "response", "[", "0", "]", "except", "exceptions", ".", "ConnectionError", ":", "raise", "except", "ProjectNotFound", ":", "raise" ]
Return project by short_name.
[ "Return", "project", "by", "short_name", "." ]
python
train
37.4
O365/python-o365
O365/utils/attachment.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/attachment.py#L383-L419
def download_attachments(self): """ Downloads this message attachments into memory. Need a call to 'attachment.save' to save them on disk. :return: Success / Failure :rtype: bool """ if not self._parent.has_attachments: log.debug( 'Parent {} has no attachments, skipping out early.'.format( self._parent.__class__.__name__)) return False if not self._parent.object_id: raise RuntimeError( 'Attempted to download attachments of an unsaved {}'.format( self._parent.__class__.__name__)) url = self.build_url(self._endpoints.get('attachments').format( id=self._parent.object_id)) response = self._parent.con.get(url) if not response: return False attachments = response.json().get('value', []) # Everything received from cloud must be passed as self._cloud_data_key self.untrack = True self.add({self._cloud_data_key: attachments}) self.untrack = False # TODO: when it's a item attachment the attachment itself # is not downloaded. We must download it... # TODO: idea: retrieve the attachments ids' only with # select and then download one by one. return True
[ "def", "download_attachments", "(", "self", ")", ":", "if", "not", "self", ".", "_parent", ".", "has_attachments", ":", "log", ".", "debug", "(", "'Parent {} has no attachments, skipping out early.'", ".", "format", "(", "self", ".", "_parent", ".", "__class__", ".", "__name__", ")", ")", "return", "False", "if", "not", "self", ".", "_parent", ".", "object_id", ":", "raise", "RuntimeError", "(", "'Attempted to download attachments of an unsaved {}'", ".", "format", "(", "self", ".", "_parent", ".", "__class__", ".", "__name__", ")", ")", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'attachments'", ")", ".", "format", "(", "id", "=", "self", ".", "_parent", ".", "object_id", ")", ")", "response", "=", "self", ".", "_parent", ".", "con", ".", "get", "(", "url", ")", "if", "not", "response", ":", "return", "False", "attachments", "=", "response", ".", "json", "(", ")", ".", "get", "(", "'value'", ",", "[", "]", ")", "# Everything received from cloud must be passed as self._cloud_data_key", "self", ".", "untrack", "=", "True", "self", ".", "add", "(", "{", "self", ".", "_cloud_data_key", ":", "attachments", "}", ")", "self", ".", "untrack", "=", "False", "# TODO: when it's a item attachment the attachment itself", "# is not downloaded. We must download it...", "# TODO: idea: retrieve the attachments ids' only with", "# select and then download one by one.", "return", "True" ]
Downloads this message attachments into memory. Need a call to 'attachment.save' to save them on disk. :return: Success / Failure :rtype: bool
[ "Downloads", "this", "message", "attachments", "into", "memory", ".", "Need", "a", "call", "to", "attachment", ".", "save", "to", "save", "them", "on", "disk", "." ]
python
train
35.621622
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L6173-L6204
def new_ele_description(**kwargs): ''' from elist.elist import * from elist.jprint import pobj root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[]) pobj(root_desc) #None means not handled ''' desc = { 'leaf':None, 'depth':None, 'breadth':None, 'breadth_path':None, 'sib_seq':None, 'path':None, 'parent_path':None, 'parent_breadth_path':None, 'lsib_path':None, 'rsib_path':None, 'lcin_path':None, 'rcin_path':None, 'sons_count':None, 'leaf_son_paths':None, 'non_leaf_son_paths':None, 'leaf_descendant_paths':None, 'non_leaf_descendant_paths':None, 'flat_offset':None, 'flat_len':None } for key in kwargs: desc[key.lower()] = kwargs[key] return(desc)
[ "def", "new_ele_description", "(", "*", "*", "kwargs", ")", ":", "desc", "=", "{", "'leaf'", ":", "None", ",", "'depth'", ":", "None", ",", "'breadth'", ":", "None", ",", "'breadth_path'", ":", "None", ",", "'sib_seq'", ":", "None", ",", "'path'", ":", "None", ",", "'parent_path'", ":", "None", ",", "'parent_breadth_path'", ":", "None", ",", "'lsib_path'", ":", "None", ",", "'rsib_path'", ":", "None", ",", "'lcin_path'", ":", "None", ",", "'rcin_path'", ":", "None", ",", "'sons_count'", ":", "None", ",", "'leaf_son_paths'", ":", "None", ",", "'non_leaf_son_paths'", ":", "None", ",", "'leaf_descendant_paths'", ":", "None", ",", "'non_leaf_descendant_paths'", ":", "None", ",", "'flat_offset'", ":", "None", ",", "'flat_len'", ":", "None", "}", "for", "key", "in", "kwargs", ":", "desc", "[", "key", ".", "lower", "(", ")", "]", "=", "kwargs", "[", "key", "]", "return", "(", "desc", ")" ]
from elist.elist import * from elist.jprint import pobj root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[]) pobj(root_desc) #None means not handled
[ "from", "elist", ".", "elist", "import", "*", "from", "elist", ".", "jprint", "import", "pobj", "root_desc", "=", "new_ele_description", "(", "leaf", "=", "False", "depth", "=", "0", "breadth_path", "=", "[]", "path", "=", "[]", "parent_path", "=", "[]", "parent_breadth_path", "=", "[]", ")", "pobj", "(", "root_desc", ")", "#None", "means", "not", "handled" ]
python
valid
28.1875
inveniosoftware/invenio-communities
invenio_communities/tasks.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/tasks.py#L38-L44
def delete_marked_communities(): """Delete communities after holdout time.""" # TODO: Delete the community ID from all records metadata first raise NotImplementedError() Community.query.filter_by( Community.delete_time > datetime.utcnow()).delete() db.session.commit()
[ "def", "delete_marked_communities", "(", ")", ":", "# TODO: Delete the community ID from all records metadata first", "raise", "NotImplementedError", "(", ")", "Community", ".", "query", ".", "filter_by", "(", "Community", ".", "delete_time", ">", "datetime", ".", "utcnow", "(", ")", ")", ".", "delete", "(", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Delete communities after holdout time.
[ "Delete", "communities", "after", "holdout", "time", "." ]
python
train
41.428571
helium/helium-python
helium/exceptions.py
https://github.com/helium/helium-python/blob/db73480b143da4fc48e95c4414bd69c576a3a390/helium/exceptions.py#L58-L66
def error_for(response): """Return the appropriate initialized exception class for a response.""" klass = error_classes.get(response.status) if klass is None: if 400 <= response.status < 500: klass = ClientError if 500 <= response.status < 600: klass = ServerError # pragma: no cover return klass(response)
[ "def", "error_for", "(", "response", ")", ":", "klass", "=", "error_classes", ".", "get", "(", "response", ".", "status", ")", "if", "klass", "is", "None", ":", "if", "400", "<=", "response", ".", "status", "<", "500", ":", "klass", "=", "ClientError", "if", "500", "<=", "response", ".", "status", "<", "600", ":", "klass", "=", "ServerError", "# pragma: no cover", "return", "klass", "(", "response", ")" ]
Return the appropriate initialized exception class for a response.
[ "Return", "the", "appropriate", "initialized", "exception", "class", "for", "a", "response", "." ]
python
train
39.444444
kejbaly2/metrique
metrique/parse.py
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/parse.py#L290-L336
def parse(table, query=None, date=None, fields=None, distinct=False, limit=None, alias=None): ''' Given a SQLAlchemy Table() instance, generate a SQLAlchemy Query() instance with the given parameters. :param table: SQLAlchemy Table() instance :param query: MQL query :param date: metrique date range query :param date: metrique date range query element :param fields: list of field names to return as columns :param distinct: apply DISTINCT to this query :param limit: apply LIMIT to this query :param alias: apply ALIAS AS to this query ''' date = date_range(date) limit = int(limit or -1) if query and date: query = '%s and %s' % (query, date) elif date: query = date elif query: pass else: # date is null, query is not query = None fields = parse_fields(fields=fields) or None # we must pass in the table column objects themselves to ensure # our bind / result processors are mapped properly fields = fields if fields else table.columns msg = 'parse(query=%s, fields=%s)' % (query, fields) #msg = re.sub(' in \[[^\]]+\]', ' in [...]', msg) logger.debug(msg) kwargs = {} if query: interpreter = MQLInterpreter(table) query = interpreter.parse(query) kwargs['whereclause'] = query if distinct: kwargs['distinct'] = distinct query = select(fields, from_obj=table, **kwargs) if limit >= 1: query = query.limit(limit) if alias: query = query.alias(alias) return query
[ "def", "parse", "(", "table", ",", "query", "=", "None", ",", "date", "=", "None", ",", "fields", "=", "None", ",", "distinct", "=", "False", ",", "limit", "=", "None", ",", "alias", "=", "None", ")", ":", "date", "=", "date_range", "(", "date", ")", "limit", "=", "int", "(", "limit", "or", "-", "1", ")", "if", "query", "and", "date", ":", "query", "=", "'%s and %s'", "%", "(", "query", ",", "date", ")", "elif", "date", ":", "query", "=", "date", "elif", "query", ":", "pass", "else", ":", "# date is null, query is not", "query", "=", "None", "fields", "=", "parse_fields", "(", "fields", "=", "fields", ")", "or", "None", "# we must pass in the table column objects themselves to ensure", "# our bind / result processors are mapped properly", "fields", "=", "fields", "if", "fields", "else", "table", ".", "columns", "msg", "=", "'parse(query=%s, fields=%s)'", "%", "(", "query", ",", "fields", ")", "#msg = re.sub(' in \\[[^\\]]+\\]', ' in [...]', msg)", "logger", ".", "debug", "(", "msg", ")", "kwargs", "=", "{", "}", "if", "query", ":", "interpreter", "=", "MQLInterpreter", "(", "table", ")", "query", "=", "interpreter", ".", "parse", "(", "query", ")", "kwargs", "[", "'whereclause'", "]", "=", "query", "if", "distinct", ":", "kwargs", "[", "'distinct'", "]", "=", "distinct", "query", "=", "select", "(", "fields", ",", "from_obj", "=", "table", ",", "*", "*", "kwargs", ")", "if", "limit", ">=", "1", ":", "query", "=", "query", ".", "limit", "(", "limit", ")", "if", "alias", ":", "query", "=", "query", ".", "alias", "(", "alias", ")", "return", "query" ]
Given a SQLAlchemy Table() instance, generate a SQLAlchemy Query() instance with the given parameters. :param table: SQLAlchemy Table() instance :param query: MQL query :param date: metrique date range query :param date: metrique date range query element :param fields: list of field names to return as columns :param distinct: apply DISTINCT to this query :param limit: apply LIMIT to this query :param alias: apply ALIAS AS to this query
[ "Given", "a", "SQLAlchemy", "Table", "()", "instance", "generate", "a", "SQLAlchemy", "Query", "()", "instance", "with", "the", "given", "parameters", "." ]
python
train
32.787234
singularityhub/sregistry-cli
sregistry/client/shell.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/client/shell.py#L47-L56
def ipython(args): '''give the user an ipython shell, optionally with an endpoint of choice. ''' # The client will announce itself (backend/database) unless it's get from sregistry.main import get_client client = get_client(args.endpoint) client.announce(args.command) from IPython import embed embed()
[ "def", "ipython", "(", "args", ")", ":", "# The client will announce itself (backend/database) unless it's get", "from", "sregistry", ".", "main", "import", "get_client", "client", "=", "get_client", "(", "args", ".", "endpoint", ")", "client", ".", "announce", "(", "args", ".", "command", ")", "from", "IPython", "import", "embed", "embed", "(", ")" ]
give the user an ipython shell, optionally with an endpoint of choice.
[ "give", "the", "user", "an", "ipython", "shell", "optionally", "with", "an", "endpoint", "of", "choice", "." ]
python
test
32.6
cdgriffith/Box
box.py
https://github.com/cdgriffith/Box/blob/5f09df824022127e7e335e3d993f7ddc1ed97fce/box.py#L871-L892
def to_json(self, filename=None, encoding="utf-8", errors="strict", multiline=False, **json_kwargs): """ Transform the BoxList object into a JSON string. :param filename: If provided will save to file :param encoding: File encoding :param errors: How to handle encoding errors :param multiline: Put each item in list onto it's own line :param json_kwargs: additional arguments to pass to json.dump(s) :return: string of JSON or return of `json.dump` """ if filename and multiline: lines = [_to_json(item, filename=False, encoding=encoding, errors=errors, **json_kwargs) for item in self] with open(filename, 'w', encoding=encoding, errors=errors) as f: f.write("\n".join(lines).decode('utf-8') if sys.version_info < (3, 0) else "\n".join(lines)) else: return _to_json(self.to_list(), filename=filename, encoding=encoding, errors=errors, **json_kwargs)
[ "def", "to_json", "(", "self", ",", "filename", "=", "None", ",", "encoding", "=", "\"utf-8\"", ",", "errors", "=", "\"strict\"", ",", "multiline", "=", "False", ",", "*", "*", "json_kwargs", ")", ":", "if", "filename", "and", "multiline", ":", "lines", "=", "[", "_to_json", "(", "item", ",", "filename", "=", "False", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "*", "*", "json_kwargs", ")", "for", "item", "in", "self", "]", "with", "open", "(", "filename", ",", "'w'", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ")", "as", "f", ":", "f", ".", "write", "(", "\"\\n\"", ".", "join", "(", "lines", ")", ".", "decode", "(", "'utf-8'", ")", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", "else", "\"\\n\"", ".", "join", "(", "lines", ")", ")", "else", ":", "return", "_to_json", "(", "self", ".", "to_list", "(", ")", ",", "filename", "=", "filename", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "*", "*", "json_kwargs", ")" ]
Transform the BoxList object into a JSON string. :param filename: If provided will save to file :param encoding: File encoding :param errors: How to handle encoding errors :param multiline: Put each item in list onto it's own line :param json_kwargs: additional arguments to pass to json.dump(s) :return: string of JSON or return of `json.dump`
[ "Transform", "the", "BoxList", "object", "into", "a", "JSON", "string", "." ]
python
train
49.318182
olgabot/prettyplotlib
prettyplotlib/_beeswarm.py
https://github.com/olgabot/prettyplotlib/blob/aa964ff777e60d26f078d8ace386936bf41cbd15/prettyplotlib/_beeswarm.py#L278-L346
def beeswarm(*args, **kwargs): """ Create a R-like beeswarm plot showing the mean and datapoints. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual beeswarm, many arguments for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot Additional arguments include: *median_color* : (default gray) The color of median lines *median_width* : (default 2) Median line width *colors* : (default None) Colors to use when painting a dataseries, for example list1 = [1,2,3] list2 = [5,6,7] ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"]) @return: """ ax, args, kwargs = maybe_get_ax(*args, **kwargs) # If no ticklabels are specified, don't draw any xticklabels = kwargs.pop('xticklabels', None) colors = kwargs.pop('colors', None) fontsize = kwargs.pop('fontsize', 10) gray = _colors.set1[8] red = _colors.set1[0] blue = kwargs.pop('color', _colors.set1[1]) kwargs.setdefault('widths', 0.25) kwargs.setdefault('sym', "o") bp = _beeswarm(ax, *args, **kwargs) kwargs.setdefault("median_color", gray) kwargs.setdefault("median_linewidth", 2) if xticklabels: ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize) show_caps = kwargs.pop('show_caps', True) show_ticks = kwargs.pop('show_ticks', False) remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks) linewidth = 0.75 plt.setp(bp['boxes'], color=blue, linewidth=linewidth) plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth")) #plt.setp(bp['whiskers'], color=blue, linestyle='solid', # linewidth=linewidth) for color, flier in zip(colors, bp['fliers']): plt.setp(flier, color=color) #if show_caps: # plt.setp(bp['caps'], color=blue, linewidth=linewidth) #else: # plt.setp(bp['caps'], color='none') ax.spines['left']._linewidth = 0.5 return bp
[ "def", "beeswarm", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ax", ",", "args", ",", "kwargs", "=", "maybe_get_ax", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# If no ticklabels are specified, don't draw any", "xticklabels", "=", "kwargs", ".", "pop", "(", "'xticklabels'", ",", "None", ")", "colors", "=", "kwargs", ".", "pop", "(", "'colors'", ",", "None", ")", "fontsize", "=", "kwargs", ".", "pop", "(", "'fontsize'", ",", "10", ")", "gray", "=", "_colors", ".", "set1", "[", "8", "]", "red", "=", "_colors", ".", "set1", "[", "0", "]", "blue", "=", "kwargs", ".", "pop", "(", "'color'", ",", "_colors", ".", "set1", "[", "1", "]", ")", "kwargs", ".", "setdefault", "(", "'widths'", ",", "0.25", ")", "kwargs", ".", "setdefault", "(", "'sym'", ",", "\"o\"", ")", "bp", "=", "_beeswarm", "(", "ax", ",", "*", "args", ",", "*", "*", "kwargs", ")", "kwargs", ".", "setdefault", "(", "\"median_color\"", ",", "gray", ")", "kwargs", ".", "setdefault", "(", "\"median_linewidth\"", ",", "2", ")", "if", "xticklabels", ":", "ax", ".", "xaxis", ".", "set_ticklabels", "(", "xticklabels", ",", "fontsize", "=", "fontsize", ")", "show_caps", "=", "kwargs", ".", "pop", "(", "'show_caps'", ",", "True", ")", "show_ticks", "=", "kwargs", ".", "pop", "(", "'show_ticks'", ",", "False", ")", "remove_chartjunk", "(", "ax", ",", "[", "'top'", ",", "'right'", ",", "'bottom'", "]", ",", "show_ticks", "=", "show_ticks", ")", "linewidth", "=", "0.75", "plt", ".", "setp", "(", "bp", "[", "'boxes'", "]", ",", "color", "=", "blue", ",", "linewidth", "=", "linewidth", ")", "plt", ".", "setp", "(", "bp", "[", "'medians'", "]", ",", "color", "=", "kwargs", ".", "pop", "(", "\"median_color\"", ")", ",", "linewidth", "=", "kwargs", ".", "pop", "(", "\"median_linewidth\"", ")", ")", "#plt.setp(bp['whiskers'], color=blue, linestyle='solid',", "# linewidth=linewidth)", "for", "color", ",", "flier", "in", "zip", "(", "colors", ",", "bp", "[", "'fliers'", "]", ")", ":", "plt", ".", "setp", "(", "flier", ",", "color", "=", "color", ")", "#if show_caps:", "# plt.setp(bp['caps'], color=blue, linewidth=linewidth)", "#else:", "# plt.setp(bp['caps'], color='none')", "ax", ".", "spines", "[", "'left'", "]", ".", "_linewidth", "=", "0.5", "return", "bp" ]
Create a R-like beeswarm plot showing the mean and datapoints. The difference from matplotlib is only the left axis line is shown, and ticklabels labeling each category of data can be added. @param ax: @param x: @param kwargs: Besides xticklabels, which is a prettyplotlib-specific argument which will label each individual beeswarm, many arguments for matplotlib.pyplot.boxplot will be accepted: http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot Additional arguments include: *median_color* : (default gray) The color of median lines *median_width* : (default 2) Median line width *colors* : (default None) Colors to use when painting a dataseries, for example list1 = [1,2,3] list2 = [5,6,7] ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"]) @return:
[ "Create", "a", "R", "-", "like", "beeswarm", "plot", "showing", "the", "mean", "and", "datapoints", ".", "The", "difference", "from", "matplotlib", "is", "only", "the", "left", "axis", "line", "is", "shown", "and", "ticklabels", "labeling", "each", "category", "of", "data", "can", "be", "added", "." ]
python
train
33.507246
mgedmin/check-manifest
check_manifest.py
https://github.com/mgedmin/check-manifest/blob/7f787e8272f56c5750670bfb3223509e0df72708/check_manifest.py#L423-L462
def is_interesting(entry): """Is this entry interesting? ``entry`` is an XML node representing one entry of the svn status XML output. It looks like this:: <entry path="unchanged.txt"> <wc-status item="normal" revision="1" props="none"> <commit revision="1"> <author>mg</author> <date>2015-02-06T07:52:38.163516Z</date> </commit> </wc-status> </entry> <entry path="added-but-not-committed.txt"> <wc-status item="added" revision="-1" props="none"></wc-status> </entry> <entry path="ext"> <wc-status item="external" props="none"></wc-status> </entry> <entry path="unknown.txt"> <wc-status props="none" item="unversioned"></wc-status> </entry> """ if entry.get('path') == '.': return False status = entry.find('wc-status') if status is None: warning('svn status --xml parse error: <entry path="%s"> without' ' <wc-status>' % entry.get('path')) return False # For SVN externals we get two entries: one mentioning the # existence of the external, and one about the status of the external. if status.get('item') in ('unversioned', 'external'): return False return True
[ "def", "is_interesting", "(", "entry", ")", ":", "if", "entry", ".", "get", "(", "'path'", ")", "==", "'.'", ":", "return", "False", "status", "=", "entry", ".", "find", "(", "'wc-status'", ")", "if", "status", "is", "None", ":", "warning", "(", "'svn status --xml parse error: <entry path=\"%s\"> without'", "' <wc-status>'", "%", "entry", ".", "get", "(", "'path'", ")", ")", "return", "False", "# For SVN externals we get two entries: one mentioning the", "# existence of the external, and one about the status of the external.", "if", "status", ".", "get", "(", "'item'", ")", "in", "(", "'unversioned'", ",", "'external'", ")", ":", "return", "False", "return", "True" ]
Is this entry interesting? ``entry`` is an XML node representing one entry of the svn status XML output. It looks like this:: <entry path="unchanged.txt"> <wc-status item="normal" revision="1" props="none"> <commit revision="1"> <author>mg</author> <date>2015-02-06T07:52:38.163516Z</date> </commit> </wc-status> </entry> <entry path="added-but-not-committed.txt"> <wc-status item="added" revision="-1" props="none"></wc-status> </entry> <entry path="ext"> <wc-status item="external" props="none"></wc-status> </entry> <entry path="unknown.txt"> <wc-status props="none" item="unversioned"></wc-status> </entry>
[ "Is", "this", "entry", "interesting?" ]
python
train
35.5
mardix/flask-cloudy
flask_cloudy.py
https://github.com/mardix/flask-cloudy/blob/8085d8fbbafec6c358f0d307bfcb795de50d4acb/flask_cloudy.py#L294-L376
def upload(self, file, name=None, prefix=None, extensions=None, overwrite=False, public=False, random_name=False, **kwargs): """ To upload file :param file: FileStorage object or string location :param name: The name of the object. :param prefix: A prefix for the object. Can be in the form of directory tree :param extensions: list of extensions to allow. If empty, it will use all extension. :param overwrite: bool - To overwrite if file exists :param public: bool - To set acl to private or public-read. Having acl in kwargs will override it :param random_name - If True and Name is None it will create a random name. Otherwise it will use the file name. `name` will always take precedence :param kwargs: extra params: ie: acl, meta_data etc. :return: Object """ tmp_file = None try: if "acl" not in kwargs: kwargs["acl"] = "public-read" if public else "private" extra = kwargs # It seems like this is a url, we'll try to download it first if isinstance(file, string_types) and re.match(URL_REGEXP, file): tmp_file = self._download_from_url(file) file = tmp_file # Create a random name if not name and random_name: name = uuid.uuid4().hex # coming from a flask, or upload object if isinstance(file, FileStorage): extension = get_file_extension(file.filename) if not name: fname = get_file_name(file.filename).split("." + extension)[0] name = slugify.slugify(fname) else: extension = get_file_extension(file) if not name: name = get_file_name(file) if len(get_file_extension(name).strip()) == 0: name += "." + extension name = name.strip("/").strip() if isinstance(self.driver, local.LocalStorageDriver): name = secure_filename(name) if prefix: name = prefix.lstrip("/") + name if not overwrite: name = self._safe_object_name(name) # For backwards compatibility, kwargs now holds `allowed_extensions` allowed_extensions = extensions or kwargs.get("allowed_extensions") if not allowed_extensions: allowed_extensions = self.allowed_extensions if extension.lower() not in allowed_extensions: raise InvalidExtensionError("Invalid file extension: '.%s' " % extension) if isinstance(file, FileStorage): obj = self.container.upload_object_via_stream(iterator=file.stream, object_name=name, extra=extra) else: obj = self.container.upload_object(file_path=file, object_name=name, extra=extra) return Object(obj=obj) except Exception as e: raise e finally: if tmp_file and os.path.isfile(tmp_file): os.remove(tmp_file)
[ "def", "upload", "(", "self", ",", "file", ",", "name", "=", "None", ",", "prefix", "=", "None", ",", "extensions", "=", "None", ",", "overwrite", "=", "False", ",", "public", "=", "False", ",", "random_name", "=", "False", ",", "*", "*", "kwargs", ")", ":", "tmp_file", "=", "None", "try", ":", "if", "\"acl\"", "not", "in", "kwargs", ":", "kwargs", "[", "\"acl\"", "]", "=", "\"public-read\"", "if", "public", "else", "\"private\"", "extra", "=", "kwargs", "# It seems like this is a url, we'll try to download it first", "if", "isinstance", "(", "file", ",", "string_types", ")", "and", "re", ".", "match", "(", "URL_REGEXP", ",", "file", ")", ":", "tmp_file", "=", "self", ".", "_download_from_url", "(", "file", ")", "file", "=", "tmp_file", "# Create a random name", "if", "not", "name", "and", "random_name", ":", "name", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# coming from a flask, or upload object", "if", "isinstance", "(", "file", ",", "FileStorage", ")", ":", "extension", "=", "get_file_extension", "(", "file", ".", "filename", ")", "if", "not", "name", ":", "fname", "=", "get_file_name", "(", "file", ".", "filename", ")", ".", "split", "(", "\".\"", "+", "extension", ")", "[", "0", "]", "name", "=", "slugify", ".", "slugify", "(", "fname", ")", "else", ":", "extension", "=", "get_file_extension", "(", "file", ")", "if", "not", "name", ":", "name", "=", "get_file_name", "(", "file", ")", "if", "len", "(", "get_file_extension", "(", "name", ")", ".", "strip", "(", ")", ")", "==", "0", ":", "name", "+=", "\".\"", "+", "extension", "name", "=", "name", ".", "strip", "(", "\"/\"", ")", ".", "strip", "(", ")", "if", "isinstance", "(", "self", ".", "driver", ",", "local", ".", "LocalStorageDriver", ")", ":", "name", "=", "secure_filename", "(", "name", ")", "if", "prefix", ":", "name", "=", "prefix", ".", "lstrip", "(", "\"/\"", ")", "+", "name", "if", "not", "overwrite", ":", "name", "=", "self", ".", "_safe_object_name", "(", "name", ")", "# For backwards compatibility, kwargs now holds `allowed_extensions`", "allowed_extensions", "=", "extensions", "or", "kwargs", ".", "get", "(", "\"allowed_extensions\"", ")", "if", "not", "allowed_extensions", ":", "allowed_extensions", "=", "self", ".", "allowed_extensions", "if", "extension", ".", "lower", "(", ")", "not", "in", "allowed_extensions", ":", "raise", "InvalidExtensionError", "(", "\"Invalid file extension: '.%s' \"", "%", "extension", ")", "if", "isinstance", "(", "file", ",", "FileStorage", ")", ":", "obj", "=", "self", ".", "container", ".", "upload_object_via_stream", "(", "iterator", "=", "file", ".", "stream", ",", "object_name", "=", "name", ",", "extra", "=", "extra", ")", "else", ":", "obj", "=", "self", ".", "container", ".", "upload_object", "(", "file_path", "=", "file", ",", "object_name", "=", "name", ",", "extra", "=", "extra", ")", "return", "Object", "(", "obj", "=", "obj", ")", "except", "Exception", "as", "e", ":", "raise", "e", "finally", ":", "if", "tmp_file", "and", "os", ".", "path", ".", "isfile", "(", "tmp_file", ")", ":", "os", ".", "remove", "(", "tmp_file", ")" ]
To upload file :param file: FileStorage object or string location :param name: The name of the object. :param prefix: A prefix for the object. Can be in the form of directory tree :param extensions: list of extensions to allow. If empty, it will use all extension. :param overwrite: bool - To overwrite if file exists :param public: bool - To set acl to private or public-read. Having acl in kwargs will override it :param random_name - If True and Name is None it will create a random name. Otherwise it will use the file name. `name` will always take precedence :param kwargs: extra params: ie: acl, meta_data etc. :return: Object
[ "To", "upload", "file", ":", "param", "file", ":", "FileStorage", "object", "or", "string", "location", ":", "param", "name", ":", "The", "name", "of", "the", "object", ".", ":", "param", "prefix", ":", "A", "prefix", "for", "the", "object", ".", "Can", "be", "in", "the", "form", "of", "directory", "tree", ":", "param", "extensions", ":", "list", "of", "extensions", "to", "allow", ".", "If", "empty", "it", "will", "use", "all", "extension", ".", ":", "param", "overwrite", ":", "bool", "-", "To", "overwrite", "if", "file", "exists", ":", "param", "public", ":", "bool", "-", "To", "set", "acl", "to", "private", "or", "public", "-", "read", ".", "Having", "acl", "in", "kwargs", "will", "override", "it", ":", "param", "random_name", "-", "If", "True", "and", "Name", "is", "None", "it", "will", "create", "a", "random", "name", ".", "Otherwise", "it", "will", "use", "the", "file", "name", ".", "name", "will", "always", "take", "precedence", ":", "param", "kwargs", ":", "extra", "params", ":", "ie", ":", "acl", "meta_data", "etc", ".", ":", "return", ":", "Object" ]
python
train
41.39759
napalm-automation/napalm-logs
napalm_logs/base.py
https://github.com/napalm-automation/napalm-logs/blob/4b89100a6e4f994aa004f3ea42a06dc803a7ccb0/napalm_logs/base.py#L613-L662
def start_engine(self): ''' Start the child processes (one per device OS) ''' if self.disable_security is True: log.warning('***Not starting the authenticator process due to disable_security being set to True***') else: log.debug('Generating the private key') self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE) log.debug('Generating the signing key') self.__signing_key = nacl.signing.SigningKey.generate() # start the keepalive thread for the auth sub-process self._processes.append(self._start_auth_proc()) log.debug('Starting the internal proxy') proc = self._start_pub_px_proc() self._processes.append(proc) # publisher process start pub_id = 0 for pub in self.publisher: publisher_type, publisher_opts = list(pub.items())[0] proc = self._start_pub_proc(publisher_type, publisher_opts, pub_id) self._processes.append(proc) pub_id += 1 # device process start log.info('Starting child processes for each device type') started_os_proc = [] for device_os, device_config in self.config_dict.items(): if not self._whitelist_blacklist(device_os): log.debug('Not starting process for %s (whitelist-blacklist logic)', device_os) # Ignore devices that are not in the whitelist (if defined), # or those operating systems that are on the blacklist. # This way we can prevent starting unwanted sub-processes. continue log.debug('Will start %d worker process(es) for %s', self.device_worker_processes, device_os) for proc_index in range(self.device_worker_processes): self._processes.append(self._start_dev_proc(device_os, device_config)) started_os_proc.append(device_os) # start the server process self._processes.append(self._start_srv_proc(started_os_proc)) # start listener process for lst in self.listener: listener_type, listener_opts = list(lst.items())[0] proc = self._start_lst_proc(listener_type, listener_opts) self._processes.append(proc) thread = threading.Thread(target=self._check_children) thread.start()
[ "def", "start_engine", "(", "self", ")", ":", "if", "self", ".", "disable_security", "is", "True", ":", "log", ".", "warning", "(", "'***Not starting the authenticator process due to disable_security being set to True***'", ")", "else", ":", "log", ".", "debug", "(", "'Generating the private key'", ")", "self", ".", "__priv_key", "=", "nacl", ".", "utils", ".", "random", "(", "nacl", ".", "secret", ".", "SecretBox", ".", "KEY_SIZE", ")", "log", ".", "debug", "(", "'Generating the signing key'", ")", "self", ".", "__signing_key", "=", "nacl", ".", "signing", ".", "SigningKey", ".", "generate", "(", ")", "# start the keepalive thread for the auth sub-process", "self", ".", "_processes", ".", "append", "(", "self", ".", "_start_auth_proc", "(", ")", ")", "log", ".", "debug", "(", "'Starting the internal proxy'", ")", "proc", "=", "self", ".", "_start_pub_px_proc", "(", ")", "self", ".", "_processes", ".", "append", "(", "proc", ")", "# publisher process start", "pub_id", "=", "0", "for", "pub", "in", "self", ".", "publisher", ":", "publisher_type", ",", "publisher_opts", "=", "list", "(", "pub", ".", "items", "(", ")", ")", "[", "0", "]", "proc", "=", "self", ".", "_start_pub_proc", "(", "publisher_type", ",", "publisher_opts", ",", "pub_id", ")", "self", ".", "_processes", ".", "append", "(", "proc", ")", "pub_id", "+=", "1", "# device process start", "log", ".", "info", "(", "'Starting child processes for each device type'", ")", "started_os_proc", "=", "[", "]", "for", "device_os", ",", "device_config", "in", "self", ".", "config_dict", ".", "items", "(", ")", ":", "if", "not", "self", ".", "_whitelist_blacklist", "(", "device_os", ")", ":", "log", ".", "debug", "(", "'Not starting process for %s (whitelist-blacklist logic)'", ",", "device_os", ")", "# Ignore devices that are not in the whitelist (if defined),", "# or those operating systems that are on the blacklist.", "# This way we can prevent starting unwanted sub-processes.", "continue", "log", ".", "debug", "(", "'Will start %d worker process(es) for %s'", ",", "self", ".", "device_worker_processes", ",", "device_os", ")", "for", "proc_index", "in", "range", "(", "self", ".", "device_worker_processes", ")", ":", "self", ".", "_processes", ".", "append", "(", "self", ".", "_start_dev_proc", "(", "device_os", ",", "device_config", ")", ")", "started_os_proc", ".", "append", "(", "device_os", ")", "# start the server process", "self", ".", "_processes", ".", "append", "(", "self", ".", "_start_srv_proc", "(", "started_os_proc", ")", ")", "# start listener process", "for", "lst", "in", "self", ".", "listener", ":", "listener_type", ",", "listener_opts", "=", "list", "(", "lst", ".", "items", "(", ")", ")", "[", "0", "]", "proc", "=", "self", ".", "_start_lst_proc", "(", "listener_type", ",", "listener_opts", ")", "self", ".", "_processes", ".", "append", "(", "proc", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_check_children", ")", "thread", ".", "start", "(", ")" ]
Start the child processes (one per device OS)
[ "Start", "the", "child", "processes", "(", "one", "per", "device", "OS", ")" ]
python
train
50.82
vertexproject/synapse
synapse/cortex.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L1597-L1601
async def nodes(self, text, opts=None, user=None): ''' A simple non-streaming way to return a list of nodes. ''' return [n async for n in self.eval(text, opts=opts, user=user)]
[ "async", "def", "nodes", "(", "self", ",", "text", ",", "opts", "=", "None", ",", "user", "=", "None", ")", ":", "return", "[", "n", "async", "for", "n", "in", "self", ".", "eval", "(", "text", ",", "opts", "=", "opts", ",", "user", "=", "user", ")", "]" ]
A simple non-streaming way to return a list of nodes.
[ "A", "simple", "non", "-", "streaming", "way", "to", "return", "a", "list", "of", "nodes", "." ]
python
train
40.8
Qiskit/qiskit-terra
qiskit/extensions/standard/ch.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/extensions/standard/ch.py#L33-L65
def _define(self): """ gate ch a,b { h b; sdg b; cx a,b; h b; t b; cx a,b; t b; h b; s b; x b; s a;} """ definition = [] q = QuantumRegister(2, "q") rule = [ (HGate(), [q[1]], []), (SdgGate(), [q[1]], []), (CnotGate(), [q[0], q[1]], []), (HGate(), [q[1]], []), (TGate(), [q[1]], []), (CnotGate(), [q[0], q[1]], []), (TGate(), [q[1]], []), (HGate(), [q[1]], []), (SGate(), [q[1]], []), (XGate(), [q[1]], []), (SGate(), [q[0]], []) ] for inst in rule: definition.append(inst) self.definition = definition
[ "def", "_define", "(", "self", ")", ":", "definition", "=", "[", "]", "q", "=", "QuantumRegister", "(", "2", ",", "\"q\"", ")", "rule", "=", "[", "(", "HGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "SdgGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "CnotGate", "(", ")", ",", "[", "q", "[", "0", "]", ",", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "HGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "TGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "CnotGate", "(", ")", ",", "[", "q", "[", "0", "]", ",", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "TGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "HGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "SGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "XGate", "(", ")", ",", "[", "q", "[", "1", "]", "]", ",", "[", "]", ")", ",", "(", "SGate", "(", ")", ",", "[", "q", "[", "0", "]", "]", ",", "[", "]", ")", "]", "for", "inst", "in", "rule", ":", "definition", ".", "append", "(", "inst", ")", "self", ".", "definition", "=", "definition" ]
gate ch a,b { h b; sdg b; cx a,b; h b; t b; cx a,b; t b; h b; s b; x b; s a;}
[ "gate", "ch", "a", "b", "{", "h", "b", ";", "sdg", "b", ";", "cx", "a", "b", ";", "h", "b", ";", "t", "b", ";", "cx", "a", "b", ";", "t", "b", ";", "h", "b", ";", "s", "b", ";", "x", "b", ";", "s", "a", ";", "}" ]
python
test
23.454545
muckamuck/stackility
stackility/CloudStackUtility.py
https://github.com/muckamuck/stackility/blob/b1696f02661134d31b99b4dea7c0d21d09482d33/stackility/CloudStackUtility.py#L453-L498
def _fill_parameters(self): """ Fill in the _parameters dict from the properties file. Args: None Returns: True Todo: Figure out what could go wrong and at least acknowledge the the fact that Murphy was an optimist. """ self._parameters = self._config.get('parameters', {}) self._fill_defaults() for k in self._parameters.keys(): try: if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'): parts = self._parameters[k].split(':') tmp = parts[1].replace(']', '') val = self._get_ssm_parameter(tmp) if val: self._parameters[k] = val else: logging.error('SSM parameter {} not found'.format(tmp)) return False elif self._parameters[k] == self.ASK: val = None a1 = '__x___' a2 = '__y___' prompt1 = "Enter value for '{}': ".format(k) prompt2 = "Confirm value for '{}': ".format(k) while a1 != a2: a1 = getpass.getpass(prompt=prompt1) a2 = getpass.getpass(prompt=prompt2) if a1 == a2: val = a1 else: print('values do not match, try again') self._parameters[k] = val except: pass return True
[ "def", "_fill_parameters", "(", "self", ")", ":", "self", ".", "_parameters", "=", "self", ".", "_config", ".", "get", "(", "'parameters'", ",", "{", "}", ")", "self", ".", "_fill_defaults", "(", ")", "for", "k", "in", "self", ".", "_parameters", ".", "keys", "(", ")", ":", "try", ":", "if", "self", ".", "_parameters", "[", "k", "]", ".", "startswith", "(", "self", ".", "SSM", ")", "and", "self", ".", "_parameters", "[", "k", "]", ".", "endswith", "(", "']'", ")", ":", "parts", "=", "self", ".", "_parameters", "[", "k", "]", ".", "split", "(", "':'", ")", "tmp", "=", "parts", "[", "1", "]", ".", "replace", "(", "']'", ",", "''", ")", "val", "=", "self", ".", "_get_ssm_parameter", "(", "tmp", ")", "if", "val", ":", "self", ".", "_parameters", "[", "k", "]", "=", "val", "else", ":", "logging", ".", "error", "(", "'SSM parameter {} not found'", ".", "format", "(", "tmp", ")", ")", "return", "False", "elif", "self", ".", "_parameters", "[", "k", "]", "==", "self", ".", "ASK", ":", "val", "=", "None", "a1", "=", "'__x___'", "a2", "=", "'__y___'", "prompt1", "=", "\"Enter value for '{}': \"", ".", "format", "(", "k", ")", "prompt2", "=", "\"Confirm value for '{}': \"", ".", "format", "(", "k", ")", "while", "a1", "!=", "a2", ":", "a1", "=", "getpass", ".", "getpass", "(", "prompt", "=", "prompt1", ")", "a2", "=", "getpass", ".", "getpass", "(", "prompt", "=", "prompt2", ")", "if", "a1", "==", "a2", ":", "val", "=", "a1", "else", ":", "print", "(", "'values do not match, try again'", ")", "self", ".", "_parameters", "[", "k", "]", "=", "val", "except", ":", "pass", "return", "True" ]
Fill in the _parameters dict from the properties file. Args: None Returns: True Todo: Figure out what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
[ "Fill", "in", "the", "_parameters", "dict", "from", "the", "properties", "file", "." ]
python
train
35.521739
SHTOOLS/SHTOOLS
pyshtools/shclasses/shmagcoeffs.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shmagcoeffs.py#L1344-L1383
def change_ref(self, r0=None, lmax=None): """ Return a new SHMagCoeffs class instance with a different reference r0. Usage ----- clm = x.change_ref([r0, lmax]) Returns ------- clm : SHMagCoeffs class instance. Parameters ---------- r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the magnetic potential, but using a difference reference r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body. """ if lmax is None: lmax = self.lmax clm = self.pad(lmax) if r0 is not None and r0 != self.r0: for l in _np.arange(lmax+1): clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2) if self.errors is not None: clm.errors[:, l, :l+1] *= (self.r0 / r0)**(l+2) clm.r0 = r0 return clm
[ "def", "change_ref", "(", "self", ",", "r0", "=", "None", ",", "lmax", "=", "None", ")", ":", "if", "lmax", "is", "None", ":", "lmax", "=", "self", ".", "lmax", "clm", "=", "self", ".", "pad", "(", "lmax", ")", "if", "r0", "is", "not", "None", "and", "r0", "!=", "self", ".", "r0", ":", "for", "l", "in", "_np", ".", "arange", "(", "lmax", "+", "1", ")", ":", "clm", ".", "coeffs", "[", ":", ",", "l", ",", ":", "l", "+", "1", "]", "*=", "(", "self", ".", "r0", "/", "r0", ")", "**", "(", "l", "+", "2", ")", "if", "self", ".", "errors", "is", "not", "None", ":", "clm", ".", "errors", "[", ":", ",", "l", ",", ":", "l", "+", "1", "]", "*=", "(", "self", ".", "r0", "/", "r0", ")", "**", "(", "l", "+", "2", ")", "clm", ".", "r0", "=", "r0", "return", "clm" ]
Return a new SHMagCoeffs class instance with a different reference r0. Usage ----- clm = x.change_ref([r0, lmax]) Returns ------- clm : SHMagCoeffs class instance. Parameters ---------- r0 : float, optional, default = self.r0 The reference radius of the spherical harmonic coefficients. lmax : int, optional, default = self.lmax Maximum spherical harmonic degree to output. Description ----------- This method returns a new class instance of the magnetic potential, but using a difference reference r0. When changing the reference radius r0, the spherical harmonic coefficients will be upward or downward continued under the assumption that the reference radius is exterior to the body.
[ "Return", "a", "new", "SHMagCoeffs", "class", "instance", "with", "a", "different", "reference", "r0", "." ]
python
train
31.9
drewsonne/aws-autodiscovery-templater
awsautodiscoverytemplater/command.py
https://github.com/drewsonne/aws-autodiscovery-templater/blob/9ef2edd6a373aeb5d343b841550c210966efe079/awsautodiscoverytemplater/command.py#L247-L260
def _parse_cli_filters(filters): """ Parse the filters from the CLI and turn them into a filter dict for boto. :param filters: :return: """ parsed_filters = [] for filter_entry in filters: filter_parts = re.match('^Name=(?P<name_value>[^,]+),Values=\[?(?P<key_values>[^\]]+)\]?', filter_entry) parsed_filters.append({ 'Name': filter_parts.group('name_value'), 'Values': filter_parts.group('key_values').split(',') }) return parsed_filters
[ "def", "_parse_cli_filters", "(", "filters", ")", ":", "parsed_filters", "=", "[", "]", "for", "filter_entry", "in", "filters", ":", "filter_parts", "=", "re", ".", "match", "(", "'^Name=(?P<name_value>[^,]+),Values=\\[?(?P<key_values>[^\\]]+)\\]?'", ",", "filter_entry", ")", "parsed_filters", ".", "append", "(", "{", "'Name'", ":", "filter_parts", ".", "group", "(", "'name_value'", ")", ",", "'Values'", ":", "filter_parts", ".", "group", "(", "'key_values'", ")", ".", "split", "(", "','", ")", "}", ")", "return", "parsed_filters" ]
Parse the filters from the CLI and turn them into a filter dict for boto. :param filters: :return:
[ "Parse", "the", "filters", "from", "the", "CLI", "and", "turn", "them", "into", "a", "filter", "dict", "for", "boto", ".", ":", "param", "filters", ":", ":", "return", ":" ]
python
train
39.785714
AlexMathew/scrapple
scrapple/cmd.py
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/cmd.py#L49-L67
def runCLI(): """ The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class. """ args = docopt(__doc__, version='0.3.0') try: check_arguments(args) command_list = ['genconfig', 'run', 'generate'] select = itemgetter('genconfig', 'run', 'generate') selectedCommand = command_list[select(args).index(True)] cmdClass = get_command_class(selectedCommand) obj = cmdClass(args) obj.execute_command() except POSSIBLE_EXCEPTIONS as e: print('\n', e, '\n')
[ "def", "runCLI", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "'0.3.0'", ")", "try", ":", "check_arguments", "(", "args", ")", "command_list", "=", "[", "'genconfig'", ",", "'run'", ",", "'generate'", "]", "select", "=", "itemgetter", "(", "'genconfig'", ",", "'run'", ",", "'generate'", ")", "selectedCommand", "=", "command_list", "[", "select", "(", "args", ")", ".", "index", "(", "True", ")", "]", "cmdClass", "=", "get_command_class", "(", "selectedCommand", ")", "obj", "=", "cmdClass", "(", "args", ")", "obj", ".", "execute_command", "(", ")", "except", "POSSIBLE_EXCEPTIONS", "as", "e", ":", "print", "(", "'\\n'", ",", "e", ",", "'\\n'", ")" ]
The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class.
[ "The", "starting", "point", "for", "the", "execution", "of", "the", "Scrapple", "command", "line", "tool", "." ]
python
train
41.105263
PySimpleGUI/PySimpleGUI
DemoPrograms/Demo_DOC_Viewer_PIL.py
https://github.com/PySimpleGUI/PySimpleGUI/blob/08184197f5bd4580ab5e5aca28bdda30f87b86fc/DemoPrograms/Demo_DOC_Viewer_PIL.py#L75-L118
def get_page(pno, zoom = False, max_size = None, first = False): """Return a PNG image for a document page number. """ dlist = dlist_tab[pno] # get display list of page number if not dlist: # create if not yet there dlist_tab[pno] = doc[pno].getDisplayList() dlist = dlist_tab[pno] r = dlist.rect # the page rectangle clip = r # ensure image fits screen: # exploit, but do not exceed width or height zoom_0 = 1 if max_size: zoom_0 = min(1, max_size[0] / r.width, max_size[1] / r.height) if zoom_0 == 1: zoom_0 = min(max_size[0] / r.width, max_size[1] / r.height) mat_0 = fitz.Matrix(zoom_0, zoom_0) if not zoom: # show total page pix = dlist.getPixmap(matrix = mat_0, alpha=False) else: mp = r.tl + (r.br - r.tl) * 0.5 # page rect center w2 = r.width / 2 h2 = r.height / 2 clip = r * 0.5 tl = zoom[0] # old top-left tl.x += zoom[1] * (w2 / 2) tl.x = max(0, tl.x) tl.x = min(w2, tl.x) tl.y += zoom[2] * (h2 / 2) tl.y = max(0, tl.y) tl.y = min(h2, tl.y) clip = fitz.Rect(tl, tl.x + w2, tl.y + h2) mat = mat_0 * fitz.Matrix(2, 2) # zoom matrix pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip) if first: # first call: tkinter still inactive img = pix.getPNGData() # so use fitz png output else: # else take tk photo image pilimg = Image.frombytes("RGB", [pix.width, pix.height], pix.samples) img = ImageTk.PhotoImage(pilimg) return img, clip.tl
[ "def", "get_page", "(", "pno", ",", "zoom", "=", "False", ",", "max_size", "=", "None", ",", "first", "=", "False", ")", ":", "dlist", "=", "dlist_tab", "[", "pno", "]", "# get display list of page number", "if", "not", "dlist", ":", "# create if not yet there", "dlist_tab", "[", "pno", "]", "=", "doc", "[", "pno", "]", ".", "getDisplayList", "(", ")", "dlist", "=", "dlist_tab", "[", "pno", "]", "r", "=", "dlist", ".", "rect", "# the page rectangle", "clip", "=", "r", "# ensure image fits screen:", "# exploit, but do not exceed width or height", "zoom_0", "=", "1", "if", "max_size", ":", "zoom_0", "=", "min", "(", "1", ",", "max_size", "[", "0", "]", "/", "r", ".", "width", ",", "max_size", "[", "1", "]", "/", "r", ".", "height", ")", "if", "zoom_0", "==", "1", ":", "zoom_0", "=", "min", "(", "max_size", "[", "0", "]", "/", "r", ".", "width", ",", "max_size", "[", "1", "]", "/", "r", ".", "height", ")", "mat_0", "=", "fitz", ".", "Matrix", "(", "zoom_0", ",", "zoom_0", ")", "if", "not", "zoom", ":", "# show total page", "pix", "=", "dlist", ".", "getPixmap", "(", "matrix", "=", "mat_0", ",", "alpha", "=", "False", ")", "else", ":", "mp", "=", "r", ".", "tl", "+", "(", "r", ".", "br", "-", "r", ".", "tl", ")", "*", "0.5", "# page rect center", "w2", "=", "r", ".", "width", "/", "2", "h2", "=", "r", ".", "height", "/", "2", "clip", "=", "r", "*", "0.5", "tl", "=", "zoom", "[", "0", "]", "# old top-left", "tl", ".", "x", "+=", "zoom", "[", "1", "]", "*", "(", "w2", "/", "2", ")", "tl", ".", "x", "=", "max", "(", "0", ",", "tl", ".", "x", ")", "tl", ".", "x", "=", "min", "(", "w2", ",", "tl", ".", "x", ")", "tl", ".", "y", "+=", "zoom", "[", "2", "]", "*", "(", "h2", "/", "2", ")", "tl", ".", "y", "=", "max", "(", "0", ",", "tl", ".", "y", ")", "tl", ".", "y", "=", "min", "(", "h2", ",", "tl", ".", "y", ")", "clip", "=", "fitz", ".", "Rect", "(", "tl", ",", "tl", ".", "x", "+", "w2", ",", "tl", ".", "y", "+", "h2", ")", "mat", "=", "mat_0", "*", "fitz", ".", "Matrix", "(", "2", ",", "2", ")", "# zoom matrix", "pix", "=", "dlist", ".", "getPixmap", "(", "alpha", "=", "False", ",", "matrix", "=", "mat", ",", "clip", "=", "clip", ")", "if", "first", ":", "# first call: tkinter still inactive", "img", "=", "pix", ".", "getPNGData", "(", ")", "# so use fitz png output", "else", ":", "# else take tk photo image", "pilimg", "=", "Image", ".", "frombytes", "(", "\"RGB\"", ",", "[", "pix", ".", "width", ",", "pix", ".", "height", "]", ",", "pix", ".", "samples", ")", "img", "=", "ImageTk", ".", "PhotoImage", "(", "pilimg", ")", "return", "img", ",", "clip", ".", "tl" ]
Return a PNG image for a document page number.
[ "Return", "a", "PNG", "image", "for", "a", "document", "page", "number", "." ]
python
train
37.772727
Phyks/libbmc
libbmc/isbn.py
https://github.com/Phyks/libbmc/blob/9ef1a29d2514157d1edd6c13ecbd61b07ae9315e/libbmc/isbn.py#L67-L85
def get_bibtex(isbn_identifier): """ Get a BibTeX string for the given ISBN. :param isbn_identifier: ISBN to fetch BibTeX entry for. :returns: A BibTeX string or ``None`` if could not fetch it. >>> get_bibtex('9783161484100') '@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}' """ # Try to find the BibTeX using associated DOIs bibtex = doi.get_bibtex(to_doi(isbn_identifier)) if bibtex is None: # In some cases, there are no DOIs for a given ISBN. In this case, try # to fetch bibtex directly from the ISBN, using a combination of # Google Books and worldcat.org results. bibtex = isbnlib.registry.bibformatters['bibtex']( isbnlib.meta(isbn_identifier, 'default')) return bibtex
[ "def", "get_bibtex", "(", "isbn_identifier", ")", ":", "# Try to find the BibTeX using associated DOIs", "bibtex", "=", "doi", ".", "get_bibtex", "(", "to_doi", "(", "isbn_identifier", ")", ")", "if", "bibtex", "is", "None", ":", "# In some cases, there are no DOIs for a given ISBN. In this case, try", "# to fetch bibtex directly from the ISBN, using a combination of", "# Google Books and worldcat.org results.", "bibtex", "=", "isbnlib", ".", "registry", ".", "bibformatters", "[", "'bibtex'", "]", "(", "isbnlib", ".", "meta", "(", "isbn_identifier", ",", "'default'", ")", ")", "return", "bibtex" ]
Get a BibTeX string for the given ISBN. :param isbn_identifier: ISBN to fetch BibTeX entry for. :returns: A BibTeX string or ``None`` if could not fetch it. >>> get_bibtex('9783161484100') '@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
[ "Get", "a", "BibTeX", "string", "for", "the", "given", "ISBN", "." ]
python
train
48.631579
SeabornGames/RequestClient
example_bindings/account.py
https://github.com/SeabornGames/RequestClient/blob/21aeb951ddfdb6ee453ad0edc896ff224e06425d/example_bindings/account.py#L49-L55
def put(self, name=None, user_ids=None): """ :param name: str of name for the account, defaults to the created timestamp :param user_ids: list of int of users to give access to this account defaults to current user :return: Account dict created """ return self.connection.put('account', data=dict(name=name, user_ids=user_ids))
[ "def", "put", "(", "self", ",", "name", "=", "None", ",", "user_ids", "=", "None", ")", ":", "return", "self", ".", "connection", ".", "put", "(", "'account'", ",", "data", "=", "dict", "(", "name", "=", "name", ",", "user_ids", "=", "user_ids", ")", ")" ]
:param name: str of name for the account, defaults to the created timestamp :param user_ids: list of int of users to give access to this account defaults to current user :return: Account dict created
[ ":", "param", "name", ":", "str", "of", "name", "for", "the", "account", "defaults", "to", "the", "created", "timestamp", ":", "param", "user_ids", ":", "list", "of", "int", "of", "users", "to", "give", "access", "to", "this", "account", "defaults", "to", "current", "user", ":", "return", ":", "Account", "dict", "created" ]
python
train
54.428571
sorgerlab/indra
indra/tools/assemble_corpus.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/assemble_corpus.py#L742-L783
def filter_belief(stmts_in, belief_cutoff, **kwargs): """Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements. """ dump_pkl = kwargs.get('save') logger.info('Filtering %d statements to above %f belief' % (len(stmts_in), belief_cutoff)) # The first round of filtering is in the top-level list stmts_out = [] # Now we eliminate supports/supported-by for stmt in stmts_in: if stmt.belief < belief_cutoff: continue stmts_out.append(stmt) supp_by = [] supp = [] for st in stmt.supports: if st.belief >= belief_cutoff: supp.append(st) for st in stmt.supported_by: if st.belief >= belief_cutoff: supp_by.append(st) stmt.supports = supp stmt.supported_by = supp_by logger.info('%d statements after filter...' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
[ "def", "filter_belief", "(", "stmts_in", ",", "belief_cutoff", ",", "*", "*", "kwargs", ")", ":", "dump_pkl", "=", "kwargs", ".", "get", "(", "'save'", ")", "logger", ".", "info", "(", "'Filtering %d statements to above %f belief'", "%", "(", "len", "(", "stmts_in", ")", ",", "belief_cutoff", ")", ")", "# The first round of filtering is in the top-level list", "stmts_out", "=", "[", "]", "# Now we eliminate supports/supported-by", "for", "stmt", "in", "stmts_in", ":", "if", "stmt", ".", "belief", "<", "belief_cutoff", ":", "continue", "stmts_out", ".", "append", "(", "stmt", ")", "supp_by", "=", "[", "]", "supp", "=", "[", "]", "for", "st", "in", "stmt", ".", "supports", ":", "if", "st", ".", "belief", ">=", "belief_cutoff", ":", "supp", ".", "append", "(", "st", ")", "for", "st", "in", "stmt", ".", "supported_by", ":", "if", "st", ".", "belief", ">=", "belief_cutoff", ":", "supp_by", ".", "append", "(", "st", ")", "stmt", ".", "supports", "=", "supp", "stmt", ".", "supported_by", "=", "supp_by", "logger", ".", "info", "(", "'%d statements after filter...'", "%", "len", "(", "stmts_out", ")", ")", "if", "dump_pkl", ":", "dump_statements", "(", "stmts_out", ",", "dump_pkl", ")", "return", "stmts_out" ]
Filter to statements with belief above a given cutoff. Parameters ---------- stmts_in : list[indra.statements.Statement] A list of statements to filter. belief_cutoff : float Only statements with belief above the belief_cutoff will be returned. Here 0 < belief_cutoff < 1. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of filtered statements.
[ "Filter", "to", "statements", "with", "belief", "above", "a", "given", "cutoff", "." ]
python
train
33.666667
PmagPy/PmagPy
dialogs/magic_grid3.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/dialogs/magic_grid3.py#L345-L361
def update_changes_after_row_delete(self, row_num): """ Update self.changes so that row numbers for edited rows are still correct. I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3. This function updates self.changes to reflect that. """ if row_num in self.changes.copy(): self.changes.remove(row_num) updated_rows = [] for changed_row in self.changes: if changed_row == -1: updated_rows.append(-1) if changed_row > row_num: updated_rows.append(changed_row - 1) if changed_row < row_num: updated_rows.append(changed_row) self.changes = set(updated_rows)
[ "def", "update_changes_after_row_delete", "(", "self", ",", "row_num", ")", ":", "if", "row_num", "in", "self", ".", "changes", ".", "copy", "(", ")", ":", "self", ".", "changes", ".", "remove", "(", "row_num", ")", "updated_rows", "=", "[", "]", "for", "changed_row", "in", "self", ".", "changes", ":", "if", "changed_row", "==", "-", "1", ":", "updated_rows", ".", "append", "(", "-", "1", ")", "if", "changed_row", ">", "row_num", ":", "updated_rows", ".", "append", "(", "changed_row", "-", "1", ")", "if", "changed_row", "<", "row_num", ":", "updated_rows", ".", "append", "(", "changed_row", ")", "self", ".", "changes", "=", "set", "(", "updated_rows", ")" ]
Update self.changes so that row numbers for edited rows are still correct. I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3. This function updates self.changes to reflect that.
[ "Update", "self", ".", "changes", "so", "that", "row", "numbers", "for", "edited", "rows", "are", "still", "correct", ".", "I", ".", "e", ".", "if", "row", "4", "was", "edited", "and", "then", "row", "2", "was", "deleted", "row", "4", "becomes", "row", "3", ".", "This", "function", "updates", "self", ".", "changes", "to", "reflect", "that", "." ]
python
train
42.882353
gbowerman/azurerm
azurerm/computerp.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/computerp.py#L379-L396
def get_vmss(access_token, subscription_id, resource_group, vmss_name): '''Get virtual machine scale set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of scale set properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
[ "def", "get_vmss", "(", "access_token", ",", "subscription_id", ",", "resource_group", ",", "vmss_name", ")", ":", "endpoint", "=", "''", ".", "join", "(", "[", "get_rm_endpoint", "(", ")", ",", "'/subscriptions/'", ",", "subscription_id", ",", "'/resourceGroups/'", ",", "resource_group", ",", "'/providers/Microsoft.Compute/virtualMachineScaleSets/'", ",", "vmss_name", ",", "'?api-version='", ",", "COMP_API", "]", ")", "return", "do_get", "(", "endpoint", ",", "access_token", ")" ]
Get virtual machine scale set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. Returns: HTTP response. JSON body of scale set properties.
[ "Get", "virtual", "machine", "scale", "set", "details", "." ]
python
train
43.277778
Microsoft/ApplicationInsights-Python
applicationinsights/flask/ext.py
https://github.com/Microsoft/ApplicationInsights-Python/blob/8452ab7126f9bb6964637d4aa1258c2af17563d6/applicationinsights/flask/ext.py#L155-L183
def _init_exception_logging(self, app): """ Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING`` is set in the Flask config. Args: app (flask.Flask). the Flask application for which to initialize the extension. """ enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False) if not enabled: return exception_telemetry_client = TelemetryClient( self._key, telemetry_channel=self._channel) @app.errorhandler(Exception) def exception_handler(exception): if HTTPException and isinstance(exception, HTTPException): return exception try: raise exception except Exception: exception_telemetry_client.track_exception() finally: raise exception self._exception_telemetry_client = exception_telemetry_client
[ "def", "_init_exception_logging", "(", "self", ",", "app", ")", ":", "enabled", "=", "not", "app", ".", "config", ".", "get", "(", "CONF_DISABLE_EXCEPTION_LOGGING", ",", "False", ")", "if", "not", "enabled", ":", "return", "exception_telemetry_client", "=", "TelemetryClient", "(", "self", ".", "_key", ",", "telemetry_channel", "=", "self", ".", "_channel", ")", "@", "app", ".", "errorhandler", "(", "Exception", ")", "def", "exception_handler", "(", "exception", ")", ":", "if", "HTTPException", "and", "isinstance", "(", "exception", ",", "HTTPException", ")", ":", "return", "exception", "try", ":", "raise", "exception", "except", "Exception", ":", "exception_telemetry_client", ".", "track_exception", "(", ")", "finally", ":", "raise", "exception", "self", ".", "_exception_telemetry_client", "=", "exception_telemetry_client" ]
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING`` is set in the Flask config. Args: app (flask.Flask). the Flask application for which to initialize the extension.
[ "Sets", "up", "exception", "logging", "unless", "APPINSIGHTS_DISABLE_EXCEPTION_LOGGING", "is", "set", "in", "the", "Flask", "config", "." ]
python
train
32.448276
kstaniek/condoor
condoor/device.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/device.py#L106-L141
def connect(self, ctrl): """Connect to the device.""" if self.prompt: self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) else: self.prompt_re = self.driver.prompt_re self.ctrl = ctrl if self.protocol.connect(self.driver): if self.protocol.authenticate(self.driver): self.ctrl.try_read_prompt(1) if not self.prompt: self.prompt = self.ctrl.detect_prompt() if self.is_target: self.update_config_mode() if self.mode is not None and self.mode != 'global': self.last_error_msg = "Device is not in global mode. Disconnected." self.chain.disconnect() return False self.prompt_re = self.driver.make_dynamic_prompt(self.prompt) self.connected = True if self.is_target is False: if self.os_version is None: self.update_os_version() self.update_hostname() else: self._connected_to_target() return True else: self.connected = False return False
[ "def", "connect", "(", "self", ",", "ctrl", ")", ":", "if", "self", ".", "prompt", ":", "self", ".", "prompt_re", "=", "self", ".", "driver", ".", "make_dynamic_prompt", "(", "self", ".", "prompt", ")", "else", ":", "self", ".", "prompt_re", "=", "self", ".", "driver", ".", "prompt_re", "self", ".", "ctrl", "=", "ctrl", "if", "self", ".", "protocol", ".", "connect", "(", "self", ".", "driver", ")", ":", "if", "self", ".", "protocol", ".", "authenticate", "(", "self", ".", "driver", ")", ":", "self", ".", "ctrl", ".", "try_read_prompt", "(", "1", ")", "if", "not", "self", ".", "prompt", ":", "self", ".", "prompt", "=", "self", ".", "ctrl", ".", "detect_prompt", "(", ")", "if", "self", ".", "is_target", ":", "self", ".", "update_config_mode", "(", ")", "if", "self", ".", "mode", "is", "not", "None", "and", "self", ".", "mode", "!=", "'global'", ":", "self", ".", "last_error_msg", "=", "\"Device is not in global mode. Disconnected.\"", "self", ".", "chain", ".", "disconnect", "(", ")", "return", "False", "self", ".", "prompt_re", "=", "self", ".", "driver", ".", "make_dynamic_prompt", "(", "self", ".", "prompt", ")", "self", ".", "connected", "=", "True", "if", "self", ".", "is_target", "is", "False", ":", "if", "self", ".", "os_version", "is", "None", ":", "self", ".", "update_os_version", "(", ")", "self", ".", "update_hostname", "(", ")", "else", ":", "self", ".", "_connected_to_target", "(", ")", "return", "True", "else", ":", "self", ".", "connected", "=", "False", "return", "False" ]
Connect to the device.
[ "Connect", "to", "the", "device", "." ]
python
train
35.166667
dgomes/pyipma
pyipma/api.py
https://github.com/dgomes/pyipma/blob/cd808abeb70dca0e336afdf55bef3f73973eaa71/pyipma/api.py#L47-L74
async def stations(self): """Retrieve stations.""" data = await self.retrieve(API_DISTRITS) Station = namedtuple('Station', ['latitude', 'longitude', 'idAreaAviso', 'idConselho', 'idDistrito', 'idRegiao', 'globalIdLocal', 'local']) _stations = [] for station in data['data']: _station = Station( self._to_number(station['latitude']), self._to_number(station['longitude']), station['idAreaAviso'], station['idConcelho'], station['idDistrito'], station['idRegiao'], station['globalIdLocal']//100 * 100, station['local'], ) _stations.append(_station) return _stations
[ "async", "def", "stations", "(", "self", ")", ":", "data", "=", "await", "self", ".", "retrieve", "(", "API_DISTRITS", ")", "Station", "=", "namedtuple", "(", "'Station'", ",", "[", "'latitude'", ",", "'longitude'", ",", "'idAreaAviso'", ",", "'idConselho'", ",", "'idDistrito'", ",", "'idRegiao'", ",", "'globalIdLocal'", ",", "'local'", "]", ")", "_stations", "=", "[", "]", "for", "station", "in", "data", "[", "'data'", "]", ":", "_station", "=", "Station", "(", "self", ".", "_to_number", "(", "station", "[", "'latitude'", "]", ")", ",", "self", ".", "_to_number", "(", "station", "[", "'longitude'", "]", ")", ",", "station", "[", "'idAreaAviso'", "]", ",", "station", "[", "'idConcelho'", "]", ",", "station", "[", "'idDistrito'", "]", ",", "station", "[", "'idRegiao'", "]", ",", "station", "[", "'globalIdLocal'", "]", "//", "100", "*", "100", ",", "station", "[", "'local'", "]", ",", ")", "_stations", ".", "append", "(", "_station", ")", "return", "_stations" ]
Retrieve stations.
[ "Retrieve", "stations", "." ]
python
train
31.535714
alan-turing-institute/topic-modelling-tools
topicmodels/LDA/gibbs.py
https://github.com/alan-turing-institute/topic-modelling-tools/blob/f0cf90cdd06f1072e824b446f201c7469b9de5df/topicmodels/LDA/gibbs.py#L137-L151
def dt_comp(self, sampled_topics): """ Compute document-topic matrix from sampled_topics. """ samples = sampled_topics.shape[0] dt = np.zeros((self.D, self.K, samples)) for s in range(samples): dt[:, :, s] = \ samplers_lda.dt_comp(self.docid, sampled_topics[s, :], self.N, self.K, self.D, self.alpha) return dt
[ "def", "dt_comp", "(", "self", ",", "sampled_topics", ")", ":", "samples", "=", "sampled_topics", ".", "shape", "[", "0", "]", "dt", "=", "np", ".", "zeros", "(", "(", "self", ".", "D", ",", "self", ".", "K", ",", "samples", ")", ")", "for", "s", "in", "range", "(", "samples", ")", ":", "dt", "[", ":", ",", ":", ",", "s", "]", "=", "samplers_lda", ".", "dt_comp", "(", "self", ".", "docid", ",", "sampled_topics", "[", "s", ",", ":", "]", ",", "self", ".", "N", ",", "self", ".", "K", ",", "self", ".", "D", ",", "self", ".", "alpha", ")", "return", "dt" ]
Compute document-topic matrix from sampled_topics.
[ "Compute", "document", "-", "topic", "matrix", "from", "sampled_topics", "." ]
python
train
29.666667
kubernetes-client/python
kubernetes/client/apis/core_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L12730-L12757
def list_namespaced_resource_quota(self, namespace, **kwargs): """ list or watch objects of kind ResourceQuota This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_resource_quota(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ResourceQuotaList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs) else: (data) = self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs) return data
[ "def", "list_namespaced_resource_quota", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_namespaced_resource_quota_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "list_namespaced_resource_quota_with_http_info", "(", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
list or watch objects of kind ResourceQuota This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_namespaced_resource_quota(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ResourceQuotaList If the method is called asynchronously, returns the request thread.
[ "list", "or", "watch", "objects", "of", "kind", "ResourceQuota", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "list_namespaced_resource_quota", "(", "namespace", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
167.571429
acutesoftware/AIKIF
aikif/environments/happiness.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/environments/happiness.py#L129-L153
def solve(self, max_worlds=10000, silent=False): """ find the best world to make people happy """ self.num_worlds = 0 num_unhappy = 0 for tax_rate in range(self.tax_range[0],self.tax_range[1]): for equity in range(self.equity_range[0],self.equity_range[1]): for tradition in range(self.tradition_range[0],self.tradition_range[1]): self.num_worlds += 1 if self.num_worlds > max_worlds: break w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10]) world_happiness = 0 num_unhappy = 0 for person in self.all_people: wh = Happiness(person, w) world_happiness += wh.rating if wh.rating < 0: num_unhappy += 1 if world_happiness > self.net_happiness: self.net_happiness = world_happiness self.unhappy_people = num_unhappy if not silent: print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people))
[ "def", "solve", "(", "self", ",", "max_worlds", "=", "10000", ",", "silent", "=", "False", ")", ":", "self", ".", "num_worlds", "=", "0", "num_unhappy", "=", "0", "for", "tax_rate", "in", "range", "(", "self", ".", "tax_range", "[", "0", "]", ",", "self", ".", "tax_range", "[", "1", "]", ")", ":", "for", "equity", "in", "range", "(", "self", ".", "equity_range", "[", "0", "]", ",", "self", ".", "equity_range", "[", "1", "]", ")", ":", "for", "tradition", "in", "range", "(", "self", ".", "tradition_range", "[", "0", "]", ",", "self", ".", "tradition_range", "[", "1", "]", ")", ":", "self", ".", "num_worlds", "+=", "1", "if", "self", ".", "num_worlds", ">", "max_worlds", ":", "break", "w", "=", "World", "(", "str", "(", "self", ".", "num_worlds", ")", ".", "zfill", "(", "6", ")", ",", "[", "5000", ",", "tax_rate", "/", "10", ",", "tradition", "/", "10", ",", "equity", "/", "10", "]", ")", "world_happiness", "=", "0", "num_unhappy", "=", "0", "for", "person", "in", "self", ".", "all_people", ":", "wh", "=", "Happiness", "(", "person", ",", "w", ")", "world_happiness", "+=", "wh", ".", "rating", "if", "wh", ".", "rating", "<", "0", ":", "num_unhappy", "+=", "1", "if", "world_happiness", ">", "self", ".", "net_happiness", ":", "self", ".", "net_happiness", "=", "world_happiness", "self", ".", "unhappy_people", "=", "num_unhappy", "if", "not", "silent", ":", "print", "(", "'found better world - '", "+", "w", ".", "nme", "+", "' = '", "+", "str", "(", "world_happiness", ")", "+", "' - total unhappy_people = '", "+", "str", "(", "self", ".", "unhappy_people", ")", ")" ]
find the best world to make people happy
[ "find", "the", "best", "world", "to", "make", "people", "happy" ]
python
train
52.2
useblocks/sphinxcontrib-needs
sphinxcontrib/needs/directives/need.py
https://github.com/useblocks/sphinxcontrib-needs/blob/f49af4859a74e9fe76de5b9133c01335ac6ae191/sphinxcontrib/needs/directives/need.py#L518-L614
def construct_meta(need_data, env): """ Constructs the node-structure for the status container :param need_data: need_info container :return: node """ hide_options = env.config.needs_hide_options if not isinstance(hide_options, list): raise SphinxError('Config parameter needs_hide_options must be of type list') node_meta = nodes.line_block(classes=['needs_meta']) # need parameters param_status = "status: " param_tags = "tags: " if need_data["status"] is not None and 'status' not in hide_options: status_line = nodes.line(classes=['status']) # node_status = nodes.line(param_status, param_status, classes=['status']) node_status = nodes.inline(param_status, param_status, classes=['status']) status_line.append(node_status) status_line.append(nodes.inline(need_data["status"], need_data["status"], classes=["needs-status", str(need_data['status'])])) node_meta.append(status_line) if need_data["tags"] and 'tags' not in hide_options: tag_line = nodes.line(classes=['tags']) # node_tags = nodes.line(param_tags, param_tags, classes=['tags']) node_tags = nodes.inline(param_tags, param_tags, classes=['tags']) tag_line.append(node_tags) for tag in need_data['tags']: # node_tags.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) # node_tags.append(nodes.inline(' ', ' ')) tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)])) tag_line.append(nodes.inline(' ', ' ')) node_meta.append(tag_line) # Links incoming if need_data['links_back'] and 'links_back' not in hide_options: node_incoming_line = nodes.line(classes=['links', 'incoming']) prefix = "links incoming: " node_incoming_prefix = nodes.inline(prefix, prefix) node_incoming_line.append(node_incoming_prefix) node_incoming_links = Need_incoming(reftarget=need_data['id']) node_incoming_links.append(nodes.inline(need_data['id'], need_data['id'])) node_incoming_line.append(node_incoming_links) node_meta.append(node_incoming_line) # # Links outgoing if need_data['links'] and 'links' not in hide_options: node_outgoing_line = nodes.line(classes=['links', 'outgoing']) prefix = "links outgoing: " node_outgoing_prefix = nodes.inline(prefix, prefix) node_outgoing_line.append(node_outgoing_prefix) node_outgoing_links = Need_outgoing(reftarget=need_data['id']) node_outgoing_links.append(nodes.inline(need_data['id'], need_data['id'])) node_outgoing_line.append(node_outgoing_links) node_meta.append(node_outgoing_line) extra_options = getattr(env.config, 'needs_extra_options', {}) node_extra_options = [] for key, value in extra_options.items(): if key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) option_line = nodes.line(classes=['extra_option']) option_line.append(nodes.inline(param_option, param_option, classes=['extra_option'])) option_line.append(nodes.inline(param_data, param_data, classes=["needs-extra-option", str(key)])) node_extra_options.append(option_line) node_meta += node_extra_options global_options = getattr(env.config, 'needs_global_options', {}) node_global_options = [] for key, value in global_options.items(): # If a global option got locally overwritten, it must already part of extra_options. # In this skipp output, as this is done during extra_option handling if key in extra_options or key in hide_options: continue param_data = need_data[key] if param_data is None or not param_data: continue param_option = '{}: '.format(key) global_option_line = nodes.line(classes=['global_option']) global_option_line.append(nodes.inline(param_option, param_option, classes=['global_option'])) global_option_line.append(nodes.inline(param_data, param_data, classes=["needs-global-option", str(key)])) node_global_options.append(global_option_line) node_meta += node_global_options return node_meta
[ "def", "construct_meta", "(", "need_data", ",", "env", ")", ":", "hide_options", "=", "env", ".", "config", ".", "needs_hide_options", "if", "not", "isinstance", "(", "hide_options", ",", "list", ")", ":", "raise", "SphinxError", "(", "'Config parameter needs_hide_options must be of type list'", ")", "node_meta", "=", "nodes", ".", "line_block", "(", "classes", "=", "[", "'needs_meta'", "]", ")", "# need parameters", "param_status", "=", "\"status: \"", "param_tags", "=", "\"tags: \"", "if", "need_data", "[", "\"status\"", "]", "is", "not", "None", "and", "'status'", "not", "in", "hide_options", ":", "status_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'status'", "]", ")", "# node_status = nodes.line(param_status, param_status, classes=['status'])", "node_status", "=", "nodes", ".", "inline", "(", "param_status", ",", "param_status", ",", "classes", "=", "[", "'status'", "]", ")", "status_line", ".", "append", "(", "node_status", ")", "status_line", ".", "append", "(", "nodes", ".", "inline", "(", "need_data", "[", "\"status\"", "]", ",", "need_data", "[", "\"status\"", "]", ",", "classes", "=", "[", "\"needs-status\"", ",", "str", "(", "need_data", "[", "'status'", "]", ")", "]", ")", ")", "node_meta", ".", "append", "(", "status_line", ")", "if", "need_data", "[", "\"tags\"", "]", "and", "'tags'", "not", "in", "hide_options", ":", "tag_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'tags'", "]", ")", "# node_tags = nodes.line(param_tags, param_tags, classes=['tags'])", "node_tags", "=", "nodes", ".", "inline", "(", "param_tags", ",", "param_tags", ",", "classes", "=", "[", "'tags'", "]", ")", "tag_line", ".", "append", "(", "node_tags", ")", "for", "tag", "in", "need_data", "[", "'tags'", "]", ":", "# node_tags.append(nodes.inline(tag, tag, classes=[\"needs-tag\", str(tag)]))", "# node_tags.append(nodes.inline(' ', ' '))", "tag_line", ".", "append", "(", "nodes", ".", "inline", "(", "tag", ",", "tag", ",", "classes", "=", "[", "\"needs-tag\"", ",", "str", "(", "tag", ")", "]", ")", ")", "tag_line", ".", "append", "(", "nodes", ".", "inline", "(", "' '", ",", "' '", ")", ")", "node_meta", ".", "append", "(", "tag_line", ")", "# Links incoming", "if", "need_data", "[", "'links_back'", "]", "and", "'links_back'", "not", "in", "hide_options", ":", "node_incoming_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'links'", ",", "'incoming'", "]", ")", "prefix", "=", "\"links incoming: \"", "node_incoming_prefix", "=", "nodes", ".", "inline", "(", "prefix", ",", "prefix", ")", "node_incoming_line", ".", "append", "(", "node_incoming_prefix", ")", "node_incoming_links", "=", "Need_incoming", "(", "reftarget", "=", "need_data", "[", "'id'", "]", ")", "node_incoming_links", ".", "append", "(", "nodes", ".", "inline", "(", "need_data", "[", "'id'", "]", ",", "need_data", "[", "'id'", "]", ")", ")", "node_incoming_line", ".", "append", "(", "node_incoming_links", ")", "node_meta", ".", "append", "(", "node_incoming_line", ")", "# # Links outgoing", "if", "need_data", "[", "'links'", "]", "and", "'links'", "not", "in", "hide_options", ":", "node_outgoing_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'links'", ",", "'outgoing'", "]", ")", "prefix", "=", "\"links outgoing: \"", "node_outgoing_prefix", "=", "nodes", ".", "inline", "(", "prefix", ",", "prefix", ")", "node_outgoing_line", ".", "append", "(", "node_outgoing_prefix", ")", "node_outgoing_links", "=", "Need_outgoing", "(", "reftarget", "=", "need_data", "[", "'id'", "]", ")", "node_outgoing_links", ".", "append", "(", "nodes", ".", "inline", "(", "need_data", "[", "'id'", "]", ",", "need_data", "[", "'id'", "]", ")", ")", "node_outgoing_line", ".", "append", "(", "node_outgoing_links", ")", "node_meta", ".", "append", "(", "node_outgoing_line", ")", "extra_options", "=", "getattr", "(", "env", ".", "config", ",", "'needs_extra_options'", ",", "{", "}", ")", "node_extra_options", "=", "[", "]", "for", "key", ",", "value", "in", "extra_options", ".", "items", "(", ")", ":", "if", "key", "in", "hide_options", ":", "continue", "param_data", "=", "need_data", "[", "key", "]", "if", "param_data", "is", "None", "or", "not", "param_data", ":", "continue", "param_option", "=", "'{}: '", ".", "format", "(", "key", ")", "option_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'extra_option'", "]", ")", "option_line", ".", "append", "(", "nodes", ".", "inline", "(", "param_option", ",", "param_option", ",", "classes", "=", "[", "'extra_option'", "]", ")", ")", "option_line", ".", "append", "(", "nodes", ".", "inline", "(", "param_data", ",", "param_data", ",", "classes", "=", "[", "\"needs-extra-option\"", ",", "str", "(", "key", ")", "]", ")", ")", "node_extra_options", ".", "append", "(", "option_line", ")", "node_meta", "+=", "node_extra_options", "global_options", "=", "getattr", "(", "env", ".", "config", ",", "'needs_global_options'", ",", "{", "}", ")", "node_global_options", "=", "[", "]", "for", "key", ",", "value", "in", "global_options", ".", "items", "(", ")", ":", "# If a global option got locally overwritten, it must already part of extra_options.", "# In this skipp output, as this is done during extra_option handling", "if", "key", "in", "extra_options", "or", "key", "in", "hide_options", ":", "continue", "param_data", "=", "need_data", "[", "key", "]", "if", "param_data", "is", "None", "or", "not", "param_data", ":", "continue", "param_option", "=", "'{}: '", ".", "format", "(", "key", ")", "global_option_line", "=", "nodes", ".", "line", "(", "classes", "=", "[", "'global_option'", "]", ")", "global_option_line", ".", "append", "(", "nodes", ".", "inline", "(", "param_option", ",", "param_option", ",", "classes", "=", "[", "'global_option'", "]", ")", ")", "global_option_line", ".", "append", "(", "nodes", ".", "inline", "(", "param_data", ",", "param_data", ",", "classes", "=", "[", "\"needs-global-option\"", ",", "str", "(", "key", ")", "]", ")", ")", "node_global_options", ".", "append", "(", "global_option_line", ")", "node_meta", "+=", "node_global_options", "return", "node_meta" ]
Constructs the node-structure for the status container :param need_data: need_info container :return: node
[ "Constructs", "the", "node", "-", "structure", "for", "the", "status", "container", ":", "param", "need_data", ":", "need_info", "container", ":", "return", ":", "node" ]
python
train
45.587629
openvax/mhctools
mhctools/cli/parsing_helpers.py
https://github.com/openvax/mhctools/blob/b329b4dccd60fae41296816b8cbfe15d6ca07e67/mhctools/cli/parsing_helpers.py#L17-L37
def parse_int_list(string): """ Parses a string of numbers and ranges into a list of integers. Ranges are separated by dashes and inclusive of both the start and end number. Example: parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13] """ integers = [] for comma_part in string.split(","): for substring in comma_part.split(" "): if len(substring) == 0: continue if "-" in substring: left, right = substring.split("-") left_val = int(left.strip()) right_val = int(right.strip()) integers.extend(range(left_val, right_val + 1)) else: integers.append(int(substring.strip())) return integers
[ "def", "parse_int_list", "(", "string", ")", ":", "integers", "=", "[", "]", "for", "comma_part", "in", "string", ".", "split", "(", "\",\"", ")", ":", "for", "substring", "in", "comma_part", ".", "split", "(", "\" \"", ")", ":", "if", "len", "(", "substring", ")", "==", "0", ":", "continue", "if", "\"-\"", "in", "substring", ":", "left", ",", "right", "=", "substring", ".", "split", "(", "\"-\"", ")", "left_val", "=", "int", "(", "left", ".", "strip", "(", ")", ")", "right_val", "=", "int", "(", "right", ".", "strip", "(", ")", ")", "integers", ".", "extend", "(", "range", "(", "left_val", ",", "right_val", "+", "1", ")", ")", "else", ":", "integers", ".", "append", "(", "int", "(", "substring", ".", "strip", "(", ")", ")", ")", "return", "integers" ]
Parses a string of numbers and ranges into a list of integers. Ranges are separated by dashes and inclusive of both the start and end number. Example: parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
[ "Parses", "a", "string", "of", "numbers", "and", "ranges", "into", "a", "list", "of", "integers", ".", "Ranges", "are", "separated", "by", "dashes", "and", "inclusive", "of", "both", "the", "start", "and", "end", "number", "." ]
python
valid
35.666667
cytoscape/py2cytoscape
py2cytoscape/util/util_networkx.py
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/util/util_networkx.py#L120-L151
def to_networkx(cyjs, directed=True): """ Convert Cytoscape.js-style JSON object into NetworkX object. By default, data will be handles as a directed graph. """ if directed: g = nx.MultiDiGraph() else: g = nx.MultiGraph() network_data = cyjs[DATA] if network_data is not None: for key in network_data.keys(): g.graph[key] = network_data[key] nodes = cyjs[ELEMENTS][NODES] edges = cyjs[ELEMENTS][EDGES] for node in nodes: data = node[DATA] g.add_node(data[ID], attr_dict=data) for edge in edges: data = edge[DATA] source = data[SOURCE] target = data[TARGET] g.add_edge(source, target, attr_dict=data) return g
[ "def", "to_networkx", "(", "cyjs", ",", "directed", "=", "True", ")", ":", "if", "directed", ":", "g", "=", "nx", ".", "MultiDiGraph", "(", ")", "else", ":", "g", "=", "nx", ".", "MultiGraph", "(", ")", "network_data", "=", "cyjs", "[", "DATA", "]", "if", "network_data", "is", "not", "None", ":", "for", "key", "in", "network_data", ".", "keys", "(", ")", ":", "g", ".", "graph", "[", "key", "]", "=", "network_data", "[", "key", "]", "nodes", "=", "cyjs", "[", "ELEMENTS", "]", "[", "NODES", "]", "edges", "=", "cyjs", "[", "ELEMENTS", "]", "[", "EDGES", "]", "for", "node", "in", "nodes", ":", "data", "=", "node", "[", "DATA", "]", "g", ".", "add_node", "(", "data", "[", "ID", "]", ",", "attr_dict", "=", "data", ")", "for", "edge", "in", "edges", ":", "data", "=", "edge", "[", "DATA", "]", "source", "=", "data", "[", "SOURCE", "]", "target", "=", "data", "[", "TARGET", "]", "g", ".", "add_edge", "(", "source", ",", "target", ",", "attr_dict", "=", "data", ")", "return", "g" ]
Convert Cytoscape.js-style JSON object into NetworkX object. By default, data will be handles as a directed graph.
[ "Convert", "Cytoscape", ".", "js", "-", "style", "JSON", "object", "into", "NetworkX", "object", "." ]
python
train
22.53125
RudolfCardinal/pythonlib
cardinal_pythonlib/fileops.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/fileops.py#L155-L179
def copyglob(src: str, dest: str, allow_nothing: bool = False, allow_nonfiles: bool = False) -> None: """ Copies files whose filenames match the glob src" into the directory "dest". Raises an error if no files are copied, unless allow_nothing is True. Args: src: source glob (e.g. ``/somewhere/*.txt``) dest: destination directory allow_nothing: don't raise an exception if no files are found allow_nonfiles: copy things that are not files too (as judged by :func:`os.path.isfile`). Raises: ValueError: if no files are found and ``allow_nothing`` is not set """ something = False for filename in glob.glob(src): if allow_nonfiles or os.path.isfile(filename): shutil.copy(filename, dest) something = True if something or allow_nothing: return raise ValueError("No files found matching: {}".format(src))
[ "def", "copyglob", "(", "src", ":", "str", ",", "dest", ":", "str", ",", "allow_nothing", ":", "bool", "=", "False", ",", "allow_nonfiles", ":", "bool", "=", "False", ")", "->", "None", ":", "something", "=", "False", "for", "filename", "in", "glob", ".", "glob", "(", "src", ")", ":", "if", "allow_nonfiles", "or", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "shutil", ".", "copy", "(", "filename", ",", "dest", ")", "something", "=", "True", "if", "something", "or", "allow_nothing", ":", "return", "raise", "ValueError", "(", "\"No files found matching: {}\"", ".", "format", "(", "src", ")", ")" ]
Copies files whose filenames match the glob src" into the directory "dest". Raises an error if no files are copied, unless allow_nothing is True. Args: src: source glob (e.g. ``/somewhere/*.txt``) dest: destination directory allow_nothing: don't raise an exception if no files are found allow_nonfiles: copy things that are not files too (as judged by :func:`os.path.isfile`). Raises: ValueError: if no files are found and ``allow_nothing`` is not set
[ "Copies", "files", "whose", "filenames", "match", "the", "glob", "src", "into", "the", "directory", "dest", ".", "Raises", "an", "error", "if", "no", "files", "are", "copied", "unless", "allow_nothing", "is", "True", "." ]
python
train
37.12
chakki-works/seqeval
seqeval/callbacks.py
https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L57-L76
def convert_idx_to_name(self, y, lens): """Convert label index to name. Args: y (list): label index list. lens (list): true length of y. Returns: y: label name list. Examples: >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'} >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]] >>> lens = [1, 2, 3] >>> self.convert_idx_to_name(y, lens) [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']] """ y = [[self.id2label[idx] for idx in row[:l]] for row, l in zip(y, lens)] return y
[ "def", "convert_idx_to_name", "(", "self", ",", "y", ",", "lens", ")", ":", "y", "=", "[", "[", "self", ".", "id2label", "[", "idx", "]", "for", "idx", "in", "row", "[", ":", "l", "]", "]", "for", "row", ",", "l", "in", "zip", "(", "y", ",", "lens", ")", "]", "return", "y" ]
Convert label index to name. Args: y (list): label index list. lens (list): true length of y. Returns: y: label name list. Examples: >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'} >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]] >>> lens = [1, 2, 3] >>> self.convert_idx_to_name(y, lens) [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
[ "Convert", "label", "index", "to", "name", "." ]
python
train
31.35
merll/docker-fabric
dockerfabric/utils/users.py
https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/utils/users.py#L101-L122
def get_or_create_group(groupname, gid_preset, system=False, id_dependent=True): """ Returns the id for the given group, and creates it first in case it does not exist. :param groupname: Group name. :type groupname: unicode :param gid_preset: Group id to set if a new group is created. :type gid_preset: int or unicode :param system: Create a system group. :type system: bool :param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown. :type id_dependent: bool :return: Group id of the existing or new group. :rtype: int """ gid = get_group_id(groupname) if gid is None: create_group(groupname, gid_preset, system) return gid_preset elif id_dependent and gid != gid_preset: error("Present group id '{0}' does not match the required id of the environment '{1}'.".format(gid, gid_preset)) return gid
[ "def", "get_or_create_group", "(", "groupname", ",", "gid_preset", ",", "system", "=", "False", ",", "id_dependent", "=", "True", ")", ":", "gid", "=", "get_group_id", "(", "groupname", ")", "if", "gid", "is", "None", ":", "create_group", "(", "groupname", ",", "gid_preset", ",", "system", ")", "return", "gid_preset", "elif", "id_dependent", "and", "gid", "!=", "gid_preset", ":", "error", "(", "\"Present group id '{0}' does not match the required id of the environment '{1}'.\"", ".", "format", "(", "gid", ",", "gid_preset", ")", ")", "return", "gid" ]
Returns the id for the given group, and creates it first in case it does not exist. :param groupname: Group name. :type groupname: unicode :param gid_preset: Group id to set if a new group is created. :type gid_preset: int or unicode :param system: Create a system group. :type system: bool :param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown. :type id_dependent: bool :return: Group id of the existing or new group. :rtype: int
[ "Returns", "the", "id", "for", "the", "given", "group", "and", "creates", "it", "first", "in", "case", "it", "does", "not", "exist", "." ]
python
train
41.409091
saltstack/salt
salt/modules/nftables.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nftables.py#L337-L367
def save(filename=None, family='ipv4'): ''' Save the current in-memory rules to disk CLI Example: .. code-block:: bash salt '*' nftables.save /etc/nftables ''' if _conf() and not filename: filename = _conf() nft_families = ['ip', 'ip6', 'arp', 'bridge'] rules = "#! nft -f\n" for family in nft_families: out = get_rules(family) if out: rules += '\n' rules = rules + '\n'.join(out) rules = rules + '\n' try: with salt.utils.files.fopen(filename, 'wb') as _fh: # Write out any changes _fh.writelines(salt.utils.data.encode(rules)) except (IOError, OSError) as exc: raise CommandExecutionError( 'Problem writing to configuration file: {0}'.format(exc) ) return rules
[ "def", "save", "(", "filename", "=", "None", ",", "family", "=", "'ipv4'", ")", ":", "if", "_conf", "(", ")", "and", "not", "filename", ":", "filename", "=", "_conf", "(", ")", "nft_families", "=", "[", "'ip'", ",", "'ip6'", ",", "'arp'", ",", "'bridge'", "]", "rules", "=", "\"#! nft -f\\n\"", "for", "family", "in", "nft_families", ":", "out", "=", "get_rules", "(", "family", ")", "if", "out", ":", "rules", "+=", "'\\n'", "rules", "=", "rules", "+", "'\\n'", ".", "join", "(", "out", ")", "rules", "=", "rules", "+", "'\\n'", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "filename", ",", "'wb'", ")", "as", "_fh", ":", "# Write out any changes", "_fh", ".", "writelines", "(", "salt", ".", "utils", ".", "data", ".", "encode", "(", "rules", ")", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Problem writing to configuration file: {0}'", ".", "format", "(", "exc", ")", ")", "return", "rules" ]
Save the current in-memory rules to disk CLI Example: .. code-block:: bash salt '*' nftables.save /etc/nftables
[ "Save", "the", "current", "in", "-", "memory", "rules", "to", "disk" ]
python
train
25.870968
etcher-be/emiz
emiz/avwx/__init__.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/avwx/__init__.py#L115-L121
def speech(self) -> str: """ Report summary designed to be read by a text-to-speech program """ if not self.data: self.update() return speech.metar(self.data, self.units)
[ "def", "speech", "(", "self", ")", "->", "str", ":", "if", "not", "self", ".", "data", ":", "self", ".", "update", "(", ")", "return", "speech", ".", "metar", "(", "self", ".", "data", ",", "self", ".", "units", ")" ]
Report summary designed to be read by a text-to-speech program
[ "Report", "summary", "designed", "to", "be", "read", "by", "a", "text", "-", "to", "-", "speech", "program" ]
python
train
30.857143
allenai/allennlp
allennlp/common/util.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/util.py#L177-L206
def prepare_environment(params: Params): """ Sets random seeds for reproducible experiments. This may not work as expected if you use this from within a python project in which you have already imported Pytorch. If you use the scripts/run_model.py entry point to training models with this library, your experiments should be reasonably reproducible. If you are using this from your own project, you will want to call this function before importing Pytorch. Complete determinism is very difficult to achieve with libraries doing optimized linear algebra due to massively parallel execution, which is exacerbated by using GPUs. Parameters ---------- params: Params object or dict, required. A ``Params`` object or dict holding the json parameters. """ seed = params.pop_int("random_seed", 13370) numpy_seed = params.pop_int("numpy_seed", 1337) torch_seed = params.pop_int("pytorch_seed", 133) if seed is not None: random.seed(seed) if numpy_seed is not None: numpy.random.seed(numpy_seed) if torch_seed is not None: torch.manual_seed(torch_seed) # Seed all GPUs with the same seed if available. if torch.cuda.is_available(): torch.cuda.manual_seed_all(torch_seed) log_pytorch_version_info()
[ "def", "prepare_environment", "(", "params", ":", "Params", ")", ":", "seed", "=", "params", ".", "pop_int", "(", "\"random_seed\"", ",", "13370", ")", "numpy_seed", "=", "params", ".", "pop_int", "(", "\"numpy_seed\"", ",", "1337", ")", "torch_seed", "=", "params", ".", "pop_int", "(", "\"pytorch_seed\"", ",", "133", ")", "if", "seed", "is", "not", "None", ":", "random", ".", "seed", "(", "seed", ")", "if", "numpy_seed", "is", "not", "None", ":", "numpy", ".", "random", ".", "seed", "(", "numpy_seed", ")", "if", "torch_seed", "is", "not", "None", ":", "torch", ".", "manual_seed", "(", "torch_seed", ")", "# Seed all GPUs with the same seed if available.", "if", "torch", ".", "cuda", ".", "is_available", "(", ")", ":", "torch", ".", "cuda", ".", "manual_seed_all", "(", "torch_seed", ")", "log_pytorch_version_info", "(", ")" ]
Sets random seeds for reproducible experiments. This may not work as expected if you use this from within a python project in which you have already imported Pytorch. If you use the scripts/run_model.py entry point to training models with this library, your experiments should be reasonably reproducible. If you are using this from your own project, you will want to call this function before importing Pytorch. Complete determinism is very difficult to achieve with libraries doing optimized linear algebra due to massively parallel execution, which is exacerbated by using GPUs. Parameters ---------- params: Params object or dict, required. A ``Params`` object or dict holding the json parameters.
[ "Sets", "random", "seeds", "for", "reproducible", "experiments", ".", "This", "may", "not", "work", "as", "expected", "if", "you", "use", "this", "from", "within", "a", "python", "project", "in", "which", "you", "have", "already", "imported", "Pytorch", ".", "If", "you", "use", "the", "scripts", "/", "run_model", ".", "py", "entry", "point", "to", "training", "models", "with", "this", "library", "your", "experiments", "should", "be", "reasonably", "reproducible", ".", "If", "you", "are", "using", "this", "from", "your", "own", "project", "you", "will", "want", "to", "call", "this", "function", "before", "importing", "Pytorch", ".", "Complete", "determinism", "is", "very", "difficult", "to", "achieve", "with", "libraries", "doing", "optimized", "linear", "algebra", "due", "to", "massively", "parallel", "execution", "which", "is", "exacerbated", "by", "using", "GPUs", "." ]
python
train
43.266667
BernardFW/bernard
src/bernard/platforms/facebook/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L193-L203
async def get_timezone(self) -> Optional[tzinfo]: """ We can't exactly know the time zone of the user from what Facebook gives (fucking morons) but we can still give something that'll work until next DST. """ u = await self._get_user() diff = float(u.get('timezone', 0)) * 3600.0 return tz.tzoffset('ITC', diff)
[ "async", "def", "get_timezone", "(", "self", ")", "->", "Optional", "[", "tzinfo", "]", ":", "u", "=", "await", "self", ".", "_get_user", "(", ")", "diff", "=", "float", "(", "u", ".", "get", "(", "'timezone'", ",", "0", ")", ")", "*", "3600.0", "return", "tz", ".", "tzoffset", "(", "'ITC'", ",", "diff", ")" ]
We can't exactly know the time zone of the user from what Facebook gives (fucking morons) but we can still give something that'll work until next DST.
[ "We", "can", "t", "exactly", "know", "the", "time", "zone", "of", "the", "user", "from", "what", "Facebook", "gives", "(", "fucking", "morons", ")", "but", "we", "can", "still", "give", "something", "that", "ll", "work", "until", "next", "DST", "." ]
python
train
33.363636
wavefrontHQ/python-client
wavefront_api_client/api/settings_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/settings_api.py#L210-L230
def get_default_user_groups(self, **kwargs): # noqa: E501 """Get default user groups customer preferences # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_default_user_groups(async_req=True) >>> result = thread.get() :param async_req bool :param User body: :return: ResponseContainerListUserGroup If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "get_default_user_groups", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "get_default_user_groups_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "get_default_user_groups_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
Get default user groups customer preferences # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_default_user_groups(async_req=True) >>> result = thread.get() :param async_req bool :param User body: :return: ResponseContainerListUserGroup If the method is called asynchronously, returns the request thread.
[ "Get", "default", "user", "groups", "customer", "preferences", "#", "noqa", ":", "E501" ]
python
train
42.190476
jeroyang/txttk
txttk/nlptools.py
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L70-L89
def word_tokenize(sentence): """ A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.'] """ date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?' number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)' arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]' word_pattern = r'[\w]+' non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~')) space_pattern = r'\s' anything_pattern = r'.' patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern] big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns]) for match in re.finditer(big_pattern, sentence): yield match.group(0)
[ "def", "word_tokenize", "(", "sentence", ")", ":", "date_pattern", "=", "r'\\d\\d(\\d\\d)?[\\\\-]\\d\\d[\\\\-]\\d\\d(\\d\\d)?'", "number_pattern", "=", "r'[\\+-]?(\\d+\\.\\d+|\\d{1,3},(\\d{3},)*\\d{3}|\\d+)'", "arr_pattern", "=", "r'(?: \\w\\.){2,3}|(?:\\A|\\s)(?:\\w\\.){2,3}|[A-Z]\\. [a-z]'", "word_pattern", "=", "r'[\\w]+'", "non_space_pattern", "=", "r'[{}]|\\w'", ".", "format", "(", "re", ".", "escape", "(", "'!\"#$%&()*,./:;<=>?@[\\]^_-`{|}~'", ")", ")", "space_pattern", "=", "r'\\s'", "anything_pattern", "=", "r'.'", "patterns", "=", "[", "date_pattern", ",", "number_pattern", ",", "arr_pattern", ",", "word_pattern", ",", "non_space_pattern", ",", "space_pattern", ",", "anything_pattern", "]", "big_pattern", "=", "r'|'", ".", "join", "(", "[", "(", "'('", "+", "pattern", "+", "')'", ")", "for", "pattern", "in", "patterns", "]", ")", "for", "match", "in", "re", ".", "finditer", "(", "big_pattern", ",", "sentence", ")", ":", "yield", "match", ".", "group", "(", "0", ")" ]
A generator which yields tokens based on the given sentence without deleting anything. >>> context = "I love you. Please don't leave." >>> list(word_tokenize(context)) ['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
[ "A", "generator", "which", "yields", "tokens", "based", "on", "the", "given", "sentence", "without", "deleting", "anything", "." ]
python
train
47.8
python-wink/python-wink
src/pywink/devices/button.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/button.py#L18-L23
def update_state(self): """ Update state with latest info from Wink API. """ response = self.api_interface.get_device_state(self, type_override="button") return self._update_state_from_response(response)
[ "def", "update_state", "(", "self", ")", ":", "response", "=", "self", ".", "api_interface", ".", "get_device_state", "(", "self", ",", "type_override", "=", "\"button\"", ")", "return", "self", ".", "_update_state_from_response", "(", "response", ")" ]
Update state with latest info from Wink API.
[ "Update", "state", "with", "latest", "info", "from", "Wink", "API", "." ]
python
train
39.666667
hanzhichao2000/pysentiment
pysentiment/base.py
https://github.com/hanzhichao2000/pysentiment/blob/ea2ac15f38ee2f68f0ef2bbb48b89acdd9c7f766/pysentiment/base.py#L101-L121
def get_score(self, terms): """Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict """ assert isinstance(terms, list) or isinstance(terms, tuple) score_li = np.asarray([self._get_score(t) for t in terms]) s_pos = np.sum(score_li[score_li > 0]) s_neg = -np.sum(score_li[score_li < 0]) s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON) s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON) return {self.TAG_POS: s_pos, self.TAG_NEG: s_neg, self.TAG_POL: s_pol, self.TAG_SUB: s_sub}
[ "def", "get_score", "(", "self", ",", "terms", ")", ":", "assert", "isinstance", "(", "terms", ",", "list", ")", "or", "isinstance", "(", "terms", ",", "tuple", ")", "score_li", "=", "np", ".", "asarray", "(", "[", "self", ".", "_get_score", "(", "t", ")", "for", "t", "in", "terms", "]", ")", "s_pos", "=", "np", ".", "sum", "(", "score_li", "[", "score_li", ">", "0", "]", ")", "s_neg", "=", "-", "np", ".", "sum", "(", "score_li", "[", "score_li", "<", "0", "]", ")", "s_pol", "=", "(", "s_pos", "-", "s_neg", ")", "*", "1.0", "/", "(", "(", "s_pos", "+", "s_neg", ")", "+", "self", ".", "EPSILON", ")", "s_sub", "=", "(", "s_pos", "+", "s_neg", ")", "*", "1.0", "/", "(", "len", "(", "score_li", ")", "+", "self", ".", "EPSILON", ")", "return", "{", "self", ".", "TAG_POS", ":", "s_pos", ",", "self", ".", "TAG_NEG", ":", "s_neg", ",", "self", ".", "TAG_POL", ":", "s_pol", ",", "self", ".", "TAG_SUB", ":", "s_sub", "}" ]
Get score for a list of terms. :type terms: list :param terms: A list of terms to be analyzed. :returns: dict
[ "Get", "score", "for", "a", "list", "of", "terms", ".", ":", "type", "terms", ":", "list", ":", "param", "terms", ":", "A", "list", "of", "terms", "to", "be", "analyzed", ".", ":", "returns", ":", "dict" ]
python
train
34.285714
reingart/gui2py
gui/menu.py
https://github.com/reingart/gui2py/blob/aca0a05f6fcde55c94ad7cc058671a06608b01a4/gui/menu.py#L229-L237
def find(self, item_id=None): "Recursively find a menu item by its id (useful for event handlers)" for it in self: if it.id == item_id: return it elif isinstance(it, Menu): found = it.find(item_id) if found: return found
[ "def", "find", "(", "self", ",", "item_id", "=", "None", ")", ":", "for", "it", "in", "self", ":", "if", "it", ".", "id", "==", "item_id", ":", "return", "it", "elif", "isinstance", "(", "it", ",", "Menu", ")", ":", "found", "=", "it", ".", "find", "(", "item_id", ")", "if", "found", ":", "return", "found" ]
Recursively find a menu item by its id (useful for event handlers)
[ "Recursively", "find", "a", "menu", "item", "by", "its", "id", "(", "useful", "for", "event", "handlers", ")" ]
python
test
36.444444
theosysbio/means
src/means/core/problems.py
https://github.com/theosysbio/means/blob/fe164916a1d84ab2a4fa039871d38ccdf638b1db/src/means/core/problems.py#L185-L206
def _repr_latex_(self): """ This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX. How Cool is this? """ # TODO: we're mixing HTML with latex here. That is not necessarily a good idea, but works # with IPython 1.2.0. Once IPython 2.0 is released, this needs to be changed to _ipython_display_ lines = [] lines.append(r"<h1>{0}</h1>".format(self.__class__.__name__)) lines.append("<p>Method: <code>{0!r}</code></p>".format(self.method)) lines.append("<p>Parameters: <code>{0!r}</code></p>".format(self.parameters)) lines.append("<p>Terms:</p>") lines.append("<ul>") lines.extend(['<li><code>{0!r}</code></li>'.format(lhs) for lhs in self.left_hand_side_descriptors]) lines.append("</ul>") lines.append('<hr />') lines.append(r"\begin{align*}") for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side): lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs))) lines.append(r"\end{align*}") return "\n".join(lines)
[ "def", "_repr_latex_", "(", "self", ")", ":", "# TODO: we're mixing HTML with latex here. That is not necessarily a good idea, but works", "# with IPython 1.2.0. Once IPython 2.0 is released, this needs to be changed to _ipython_display_", "lines", "=", "[", "]", "lines", ".", "append", "(", "r\"<h1>{0}</h1>\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "lines", ".", "append", "(", "\"<p>Method: <code>{0!r}</code></p>\"", ".", "format", "(", "self", ".", "method", ")", ")", "lines", ".", "append", "(", "\"<p>Parameters: <code>{0!r}</code></p>\"", ".", "format", "(", "self", ".", "parameters", ")", ")", "lines", ".", "append", "(", "\"<p>Terms:</p>\"", ")", "lines", ".", "append", "(", "\"<ul>\"", ")", "lines", ".", "extend", "(", "[", "'<li><code>{0!r}</code></li>'", ".", "format", "(", "lhs", ")", "for", "lhs", "in", "self", ".", "left_hand_side_descriptors", "]", ")", "lines", ".", "append", "(", "\"</ul>\"", ")", "lines", ".", "append", "(", "'<hr />'", ")", "lines", ".", "append", "(", "r\"\\begin{align*}\"", ")", "for", "lhs", ",", "rhs", "in", "zip", "(", "self", ".", "left_hand_side_descriptors", ",", "self", ".", "right_hand_side", ")", ":", "lines", ".", "append", "(", "r\"\\dot{{{0}}} &= {1} \\\\\"", ".", "format", "(", "sympy", ".", "latex", "(", "lhs", ".", "symbol", ")", ",", "sympy", ".", "latex", "(", "rhs", ")", ")", ")", "lines", ".", "append", "(", "r\"\\end{align*}\"", ")", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX. How Cool is this?
[ "This", "is", "used", "in", "IPython", "notebook", "it", "allows", "us", "to", "render", "the", "ODEProblem", "object", "in", "LaTeX", ".", "How", "Cool", "is", "this?" ]
python
train
51.727273
meraki-analytics/datapipelines-python
datapipelines/sources.py
https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/sources.py#L49-L59
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: """Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects. """ pass
[ "def", "get_many", "(", "self", ",", "type", ":", "Type", "[", "T", "]", ",", "query", ":", "Mapping", "[", "str", ",", "Any", "]", ",", "context", ":", "PipelineContext", "=", "None", ")", "->", "Iterable", "[", "T", "]", ":", "pass" ]
Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects.
[ "Gets", "a", "query", "from", "the", "data", "source", "which", "contains", "a", "request", "for", "multiple", "objects", "." ]
python
train
39.545455
mwgielen/jackal
jackal/scripts/nessus.py
https://github.com/mwgielen/jackal/blob/7fe62732eb5194b7246215d5277fb37c398097bf/jackal/scripts/nessus.py#L24-L32
def get_template_uuid(self): """ Retrieves the uuid of the given template name. """ response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False) templates = json.loads(response.text) for template in templates['templates']: if template['name'] == self.template_name: return template['uuid']
[ "def", "get_template_uuid", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "url", "+", "'editor/scan/templates'", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "False", ")", "templates", "=", "json", ".", "loads", "(", "response", ".", "text", ")", "for", "template", "in", "templates", "[", "'templates'", "]", ":", "if", "template", "[", "'name'", "]", "==", "self", ".", "template_name", ":", "return", "template", "[", "'uuid'", "]" ]
Retrieves the uuid of the given template name.
[ "Retrieves", "the", "uuid", "of", "the", "given", "template", "name", "." ]
python
valid
44
Arello-Mobile/swagger2rst
swg2rst/utils/rst.py
https://github.com/Arello-Mobile/swagger2rst/blob/e519f70701477dcc9f0bb237ee5b8e08e848701b/swg2rst/utils/rst.py#L74-L95
def get_type_description(self, _type, suffix='', *args, **kwargs): """ Get description of type :param suffix: :param str _type: :rtype: str """ if not SchemaObjects.contains(_type): return _type schema = SchemaObjects.get(_type) if schema.all_of: models = ','.join( (self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of) ) result = '{}'.format(models.split(',')[0]) for r in models.split(',')[1:]: result += ' extended {}'.format(r) elif schema.is_array: result = 'array of {}'.format( self.get_type_description(schema.item['type'], *args, **kwargs)) else: result = ':ref:`{} <{}{}>`'.format(schema.name, schema.schema_id, suffix) return result
[ "def", "get_type_description", "(", "self", ",", "_type", ",", "suffix", "=", "''", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "SchemaObjects", ".", "contains", "(", "_type", ")", ":", "return", "_type", "schema", "=", "SchemaObjects", ".", "get", "(", "_type", ")", "if", "schema", ".", "all_of", ":", "models", "=", "','", ".", "join", "(", "(", "self", ".", "get_type_description", "(", "_type", ",", "*", "args", ",", "*", "*", "kwargs", ")", "for", "_type", "in", "schema", ".", "all_of", ")", ")", "result", "=", "'{}'", ".", "format", "(", "models", ".", "split", "(", "','", ")", "[", "0", "]", ")", "for", "r", "in", "models", ".", "split", "(", "','", ")", "[", "1", ":", "]", ":", "result", "+=", "' extended {}'", ".", "format", "(", "r", ")", "elif", "schema", ".", "is_array", ":", "result", "=", "'array of {}'", ".", "format", "(", "self", ".", "get_type_description", "(", "schema", ".", "item", "[", "'type'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")", "else", ":", "result", "=", "':ref:`{} <{}{}>`'", ".", "format", "(", "schema", ".", "name", ",", "schema", ".", "schema_id", ",", "suffix", ")", "return", "result" ]
Get description of type :param suffix: :param str _type: :rtype: str
[ "Get", "description", "of", "type", ":", "param", "suffix", ":", ":", "param", "str", "_type", ":", ":", "rtype", ":", "str" ]
python
train
39.409091