repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L37-L45
def title_prefix(soup): "titlePrefix for article JSON is only articles with certain display_channel values" prefix = None display_channel_match_list = ['feature article', 'insight', 'editorial'] for d_channel in display_channel(soup): if d_channel.lower() in display_channel_match_list: if raw_parser.sub_display_channel(soup): prefix = node_text(first(raw_parser.sub_display_channel(soup))) return prefix
[ "def", "title_prefix", "(", "soup", ")", ":", "prefix", "=", "None", "display_channel_match_list", "=", "[", "'feature article'", ",", "'insight'", ",", "'editorial'", "]", "for", "d_channel", "in", "display_channel", "(", "soup", ")", ":", "if", "d_channel", ".", "lower", "(", ")", "in", "display_channel_match_list", ":", "if", "raw_parser", ".", "sub_display_channel", "(", "soup", ")", ":", "prefix", "=", "node_text", "(", "first", "(", "raw_parser", ".", "sub_display_channel", "(", "soup", ")", ")", ")", "return", "prefix" ]
titlePrefix for article JSON is only articles with certain display_channel values
[ "titlePrefix", "for", "article", "JSON", "is", "only", "articles", "with", "certain", "display_channel", "values" ]
python
train
tsroten/pynlpir
pynlpir/pos_map.py
https://github.com/tsroten/pynlpir/blob/8d5e994796a2b5d513f7db8d76d7d24a85d531b1/pynlpir/pos_map.py#L173-L191
def get_pos_name(code, name='parent', english=True, pos_tags=POS_MAP): """Gets the part of speech name for *code*. :param str code: The part of speech code to lookup, e.g. ``'nsf'``. :param str name: Which part of speech name to include in the output. Must be one of ``'parent'``, ``'child'``, or ``'all'``. Defaults to ``'parent'``. ``'parent'`` indicates that only the most generic name should be used, e.g. ``'noun'`` for ``'nsf'``. ``'child'`` indicates that the most specific name should be used, e.g. ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all names should be used, e.g. ``('noun', 'toponym', 'transcribed toponym')`` for ``'nsf'``. :param bool english: Whether to return an English or Chinese name. :param dict pos_tags: Custom part of speech tags to use. :returns: ``str`` (``unicode`` for Python 2) if *name* is ``'parent'`` or ``'child'``. ``tuple`` if *name* is ``'all'``. """ return _get_pos_name(code, name, english, pos_tags)
[ "def", "get_pos_name", "(", "code", ",", "name", "=", "'parent'", ",", "english", "=", "True", ",", "pos_tags", "=", "POS_MAP", ")", ":", "return", "_get_pos_name", "(", "code", ",", "name", ",", "english", ",", "pos_tags", ")" ]
Gets the part of speech name for *code*. :param str code: The part of speech code to lookup, e.g. ``'nsf'``. :param str name: Which part of speech name to include in the output. Must be one of ``'parent'``, ``'child'``, or ``'all'``. Defaults to ``'parent'``. ``'parent'`` indicates that only the most generic name should be used, e.g. ``'noun'`` for ``'nsf'``. ``'child'`` indicates that the most specific name should be used, e.g. ``'transcribed toponym'`` for ``'nsf'``. ``'all'`` indicates that all names should be used, e.g. ``('noun', 'toponym', 'transcribed toponym')`` for ``'nsf'``. :param bool english: Whether to return an English or Chinese name. :param dict pos_tags: Custom part of speech tags to use. :returns: ``str`` (``unicode`` for Python 2) if *name* is ``'parent'`` or ``'child'``. ``tuple`` if *name* is ``'all'``.
[ "Gets", "the", "part", "of", "speech", "name", "for", "*", "code", "*", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/symbol/symbol_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/symbol/symbol_client.py#L72-L93
def create_requests_request_id_debug_entries(self, batch, request_id, collection): """CreateRequestsRequestIdDebugEntries. [Preview API] Create debug entries for a symbol request as specified by its identifier. :param :class:`<DebugEntryCreateBatch> <azure.devops.v5_0.symbol.models.DebugEntryCreateBatch>` batch: A batch that contains debug entries to create. :param str request_id: The symbol request identifier. :param str collection: A valid debug entry collection name. Must be "debugentries". :rtype: [DebugEntry] """ route_values = {} if request_id is not None: route_values['requestId'] = self._serialize.url('request_id', request_id, 'str') query_parameters = {} if collection is not None: query_parameters['collection'] = self._serialize.query('collection', collection, 'str') content = self._serialize.body(batch, 'DebugEntryCreateBatch') response = self._send(http_method='POST', location_id='ebc09fe3-1b20-4667-abc5-f2b60fe8de52', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('[DebugEntry]', self._unwrap_collection(response))
[ "def", "create_requests_request_id_debug_entries", "(", "self", ",", "batch", ",", "request_id", ",", "collection", ")", ":", "route_values", "=", "{", "}", "if", "request_id", "is", "not", "None", ":", "route_values", "[", "'requestId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'request_id'", ",", "request_id", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "collection", "is", "not", "None", ":", "query_parameters", "[", "'collection'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'collection'", ",", "collection", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "batch", ",", "'DebugEntryCreateBatch'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'ebc09fe3-1b20-4667-abc5-f2b60fe8de52'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'[DebugEntry]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
CreateRequestsRequestIdDebugEntries. [Preview API] Create debug entries for a symbol request as specified by its identifier. :param :class:`<DebugEntryCreateBatch> <azure.devops.v5_0.symbol.models.DebugEntryCreateBatch>` batch: A batch that contains debug entries to create. :param str request_id: The symbol request identifier. :param str collection: A valid debug entry collection name. Must be "debugentries". :rtype: [DebugEntry]
[ "CreateRequestsRequestIdDebugEntries", ".", "[", "Preview", "API", "]", "Create", "debug", "entries", "for", "a", "symbol", "request", "as", "specified", "by", "its", "identifier", ".", ":", "param", ":", "class", ":", "<DebugEntryCreateBatch", ">", "<azure", ".", "devops", ".", "v5_0", ".", "symbol", ".", "models", ".", "DebugEntryCreateBatch", ">", "batch", ":", "A", "batch", "that", "contains", "debug", "entries", "to", "create", ".", ":", "param", "str", "request_id", ":", "The", "symbol", "request", "identifier", ".", ":", "param", "str", "collection", ":", "A", "valid", "debug", "entry", "collection", "name", ".", "Must", "be", "debugentries", ".", ":", "rtype", ":", "[", "DebugEntry", "]" ]
python
train
nikcub/floyd
floyd/util/dateformat.py
https://github.com/nikcub/floyd/blob/5772d0047efb11c9ce5f7d234a9da4576ce24edc/floyd/util/dateformat.py#L377-L388
def Z(self): """ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. """ if not self.timezone: return 0 offset = self.timezone.utcoffset(self.data) # Only days can be negative, so negative offsets have days=-1 and # seconds positive. Positive offsets have days=0 return offset.days * 86400 + offset.seconds
[ "def", "Z", "(", "self", ")", ":", "if", "not", "self", ".", "timezone", ":", "return", "0", "offset", "=", "self", ".", "timezone", ".", "utcoffset", "(", "self", ".", "data", ")", "# Only days can be negative, so negative offsets have days=-1 and", "# seconds positive. Positive offsets have days=0", "return", "offset", ".", "days", "*", "86400", "+", "offset", ".", "seconds" ]
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive.
[ "Time", "zone", "offset", "in", "seconds", "(", "i", ".", "e", ".", "-", "43200", "to", "43200", ")", ".", "The", "offset", "for", "timezones", "west", "of", "UTC", "is", "always", "negative", "and", "for", "those", "east", "of", "UTC", "is", "always", "positive", "." ]
python
train
kislyuk/aegea
aegea/packages/github3/github.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/github.py#L1052-L1064
def repository(self, owner, repository): """Returns a Repository object for the specified combination of owner and repository :param str owner: (required) :param str repository: (required) :returns: :class:`Repository <github3.repos.Repository>` """ json = None if owner and repository: url = self._build_url('repos', owner, repository) json = self._json(self._get(url), 200) return Repository(json, self) if json else None
[ "def", "repository", "(", "self", ",", "owner", ",", "repository", ")", ":", "json", "=", "None", "if", "owner", "and", "repository", ":", "url", "=", "self", ".", "_build_url", "(", "'repos'", ",", "owner", ",", "repository", ")", "json", "=", "self", ".", "_json", "(", "self", ".", "_get", "(", "url", ")", ",", "200", ")", "return", "Repository", "(", "json", ",", "self", ")", "if", "json", "else", "None" ]
Returns a Repository object for the specified combination of owner and repository :param str owner: (required) :param str repository: (required) :returns: :class:`Repository <github3.repos.Repository>`
[ "Returns", "a", "Repository", "object", "for", "the", "specified", "combination", "of", "owner", "and", "repository" ]
python
train
hyperledger/sawtooth-core
rest_api/sawtooth_rest_api/route_handlers.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/rest_api/sawtooth_rest_api/route_handlers.py#L124-L185
async def submit_batches(self, request): """Accepts a binary encoded BatchList and submits it to the validator. Request: body: octet-stream BatchList of one or more Batches Response: status: - 202: Batches submitted and pending link: /batches or /batch_statuses link for submitted batches """ timer_ctx = self._post_batches_total_time.time() self._post_batches_count.inc() # Parse request if request.headers['Content-Type'] != 'application/octet-stream': LOGGER.debug( 'Submission headers had wrong Content-Type: %s', request.headers['Content-Type']) self._post_batches_error.inc() raise errors.SubmissionWrongContentType() body = await request.read() if not body: LOGGER.debug('Submission contained an empty body') self._post_batches_error.inc() raise errors.NoBatchesSubmitted() try: batch_list = BatchList() batch_list.ParseFromString(body) except DecodeError: LOGGER.debug('Submission body could not be decoded: %s', body) self._post_batches_error.inc() raise errors.BadProtobufSubmitted() # Query validator error_traps = [error_handlers.BatchInvalidTrap, error_handlers.BatchQueueFullTrap] validator_query = client_batch_submit_pb2.ClientBatchSubmitRequest( batches=batch_list.batches) with self._post_batches_validator_time.time(): await self._query_validator( Message.CLIENT_BATCH_SUBMIT_REQUEST, client_batch_submit_pb2.ClientBatchSubmitResponse, validator_query, error_traps) # Build response envelope id_string = ','.join(b.header_signature for b in batch_list.batches) status = 202 link = self._build_url(request, path='/batch_statuses', id=id_string) retval = self._wrap_response( request, metadata={'link': link}, status=status) timer_ctx.stop() return retval
[ "async", "def", "submit_batches", "(", "self", ",", "request", ")", ":", "timer_ctx", "=", "self", ".", "_post_batches_total_time", ".", "time", "(", ")", "self", ".", "_post_batches_count", ".", "inc", "(", ")", "# Parse request", "if", "request", ".", "headers", "[", "'Content-Type'", "]", "!=", "'application/octet-stream'", ":", "LOGGER", ".", "debug", "(", "'Submission headers had wrong Content-Type: %s'", ",", "request", ".", "headers", "[", "'Content-Type'", "]", ")", "self", ".", "_post_batches_error", ".", "inc", "(", ")", "raise", "errors", ".", "SubmissionWrongContentType", "(", ")", "body", "=", "await", "request", ".", "read", "(", ")", "if", "not", "body", ":", "LOGGER", ".", "debug", "(", "'Submission contained an empty body'", ")", "self", ".", "_post_batches_error", ".", "inc", "(", ")", "raise", "errors", ".", "NoBatchesSubmitted", "(", ")", "try", ":", "batch_list", "=", "BatchList", "(", ")", "batch_list", ".", "ParseFromString", "(", "body", ")", "except", "DecodeError", ":", "LOGGER", ".", "debug", "(", "'Submission body could not be decoded: %s'", ",", "body", ")", "self", ".", "_post_batches_error", ".", "inc", "(", ")", "raise", "errors", ".", "BadProtobufSubmitted", "(", ")", "# Query validator", "error_traps", "=", "[", "error_handlers", ".", "BatchInvalidTrap", ",", "error_handlers", ".", "BatchQueueFullTrap", "]", "validator_query", "=", "client_batch_submit_pb2", ".", "ClientBatchSubmitRequest", "(", "batches", "=", "batch_list", ".", "batches", ")", "with", "self", ".", "_post_batches_validator_time", ".", "time", "(", ")", ":", "await", "self", ".", "_query_validator", "(", "Message", ".", "CLIENT_BATCH_SUBMIT_REQUEST", ",", "client_batch_submit_pb2", ".", "ClientBatchSubmitResponse", ",", "validator_query", ",", "error_traps", ")", "# Build response envelope", "id_string", "=", "','", ".", "join", "(", "b", ".", "header_signature", "for", "b", "in", "batch_list", ".", "batches", ")", "status", "=", "202", "link", "=", "self", ".", "_build_url", "(", "request", ",", "path", "=", "'/batch_statuses'", ",", "id", "=", "id_string", ")", "retval", "=", "self", ".", "_wrap_response", "(", "request", ",", "metadata", "=", "{", "'link'", ":", "link", "}", ",", "status", "=", "status", ")", "timer_ctx", ".", "stop", "(", ")", "return", "retval" ]
Accepts a binary encoded BatchList and submits it to the validator. Request: body: octet-stream BatchList of one or more Batches Response: status: - 202: Batches submitted and pending link: /batches or /batch_statuses link for submitted batches
[ "Accepts", "a", "binary", "encoded", "BatchList", "and", "submits", "it", "to", "the", "validator", "." ]
python
train
hectane/python-hectane
pyhectane/django.py
https://github.com/hectane/python-hectane/blob/e0fe1df576f776566e813f71782f8adf60146383/pyhectane/django.py#L55-L78
def send_messages(self, emails): """ Attempt to send the specified emails. """ num_sent = 0 for e in emails: html = None if isinstance(e, EmailMultiAlternatives): for a in e.alternatives: if a[1] == 'text/html': html = a[0] r = self._connection.send( from_=e.from_email, to=e.to, subject=e.subject, text=e.body, html=html, cc=e.cc, bcc=e.bcc, attachments=list(self._process_attachments(e)), ) if 'error' not in r: num_sent += 1 return num_sent
[ "def", "send_messages", "(", "self", ",", "emails", ")", ":", "num_sent", "=", "0", "for", "e", "in", "emails", ":", "html", "=", "None", "if", "isinstance", "(", "e", ",", "EmailMultiAlternatives", ")", ":", "for", "a", "in", "e", ".", "alternatives", ":", "if", "a", "[", "1", "]", "==", "'text/html'", ":", "html", "=", "a", "[", "0", "]", "r", "=", "self", ".", "_connection", ".", "send", "(", "from_", "=", "e", ".", "from_email", ",", "to", "=", "e", ".", "to", ",", "subject", "=", "e", ".", "subject", ",", "text", "=", "e", ".", "body", ",", "html", "=", "html", ",", "cc", "=", "e", ".", "cc", ",", "bcc", "=", "e", ".", "bcc", ",", "attachments", "=", "list", "(", "self", ".", "_process_attachments", "(", "e", ")", ")", ",", ")", "if", "'error'", "not", "in", "r", ":", "num_sent", "+=", "1", "return", "num_sent" ]
Attempt to send the specified emails.
[ "Attempt", "to", "send", "the", "specified", "emails", "." ]
python
train
gusutabopb/aioinflux
aioinflux/serialization/usertype.py
https://github.com/gusutabopb/aioinflux/blob/2e4b7b3e13604e7618c686d89a0673f0bc70b24e/aioinflux/serialization/usertype.py#L67-L122
def _make_serializer(meas, schema, rm_none, extra_tags, placeholder): # noqa: C901 """Factory of line protocol parsers""" _validate_schema(schema, placeholder) tags = [] fields = [] ts = None meas = meas for k, t in schema.items(): if t is MEASUREMENT: meas = f"{{i.{k}}}" elif t is TIMEINT: ts = f"{{i.{k}}}" elif t is TIMESTR: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(str_to_dt(i.{k}))}}" elif t is TIMEDT: if pd: ts = f"{{pd.Timestamp(i.{k} or 0).value}}" else: ts = f"{{dt_to_int(i.{k})}}" elif t is TAG: tags.append(f"{k}={{str(i.{k}).translate(tag_escape)}}") elif t is TAGENUM: tags.append(f"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}") elif t in (FLOAT, BOOL): fields.append(f"{k}={{i.{k}}}") elif t is INT: fields.append(f"{k}={{i.{k}}}i") elif t is STR: fields.append(f"{k}=\\\"{{str(i.{k}).translate(str_escape)}}\\\"") elif t is ENUM: fields.append(f"{k}=\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\"") else: raise SchemaError(f"Invalid attribute type {k!r}: {t!r}") extra_tags = extra_tags or {} for k, v in extra_tags.items(): tags.append(f"{k}={v}") if placeholder: fields.insert(0, f"_=true") sep = ',' if tags else '' ts = f' {ts}' if ts else '' fmt = f"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}" if rm_none: # Has substantial runtime impact. Best avoided if performance is critical. # First field can't be removed. pat = r',\w+="?None"?i?' f = eval('lambda i: re.sub(r\'{}\', "", f"{}").encode()'.format(pat, fmt)) else: f = eval('lambda i: f"{}".encode()'.format(fmt)) f.__doc__ = "Returns InfluxDB line protocol representation of user-defined class" f._args = dict(meas=meas, schema=schema, rm_none=rm_none, extra_tags=extra_tags, placeholder=placeholder) return f
[ "def", "_make_serializer", "(", "meas", ",", "schema", ",", "rm_none", ",", "extra_tags", ",", "placeholder", ")", ":", "# noqa: C901", "_validate_schema", "(", "schema", ",", "placeholder", ")", "tags", "=", "[", "]", "fields", "=", "[", "]", "ts", "=", "None", "meas", "=", "meas", "for", "k", ",", "t", "in", "schema", ".", "items", "(", ")", ":", "if", "t", "is", "MEASUREMENT", ":", "meas", "=", "f\"{{i.{k}}}\"", "elif", "t", "is", "TIMEINT", ":", "ts", "=", "f\"{{i.{k}}}\"", "elif", "t", "is", "TIMESTR", ":", "if", "pd", ":", "ts", "=", "f\"{{pd.Timestamp(i.{k} or 0).value}}\"", "else", ":", "ts", "=", "f\"{{dt_to_int(str_to_dt(i.{k}))}}\"", "elif", "t", "is", "TIMEDT", ":", "if", "pd", ":", "ts", "=", "f\"{{pd.Timestamp(i.{k} or 0).value}}\"", "else", ":", "ts", "=", "f\"{{dt_to_int(i.{k})}}\"", "elif", "t", "is", "TAG", ":", "tags", ".", "append", "(", "f\"{k}={{str(i.{k}).translate(tag_escape)}}\"", ")", "elif", "t", "is", "TAGENUM", ":", "tags", ".", "append", "(", "f\"{k}={{getattr(i.{k}, 'name', i.{k} or None)}}\"", ")", "elif", "t", "in", "(", "FLOAT", ",", "BOOL", ")", ":", "fields", ".", "append", "(", "f\"{k}={{i.{k}}}\"", ")", "elif", "t", "is", "INT", ":", "fields", ".", "append", "(", "f\"{k}={{i.{k}}}i\"", ")", "elif", "t", "is", "STR", ":", "fields", ".", "append", "(", "f\"{k}=\\\\\\\"{{str(i.{k}).translate(str_escape)}}\\\\\\\"\"", ")", "elif", "t", "is", "ENUM", ":", "fields", ".", "append", "(", "f\"{k}=\\\\\\\"{{getattr(i.{k}, 'name', i.{k} or None)}}\\\\\\\"\"", ")", "else", ":", "raise", "SchemaError", "(", "f\"Invalid attribute type {k!r}: {t!r}\"", ")", "extra_tags", "=", "extra_tags", "or", "{", "}", "for", "k", ",", "v", "in", "extra_tags", ".", "items", "(", ")", ":", "tags", ".", "append", "(", "f\"{k}={v}\"", ")", "if", "placeholder", ":", "fields", ".", "insert", "(", "0", ",", "f\"_=true\"", ")", "sep", "=", "','", "if", "tags", "else", "''", "ts", "=", "f' {ts}'", "if", "ts", "else", "''", "fmt", "=", "f\"{meas}{sep}{','.join(tags)} {','.join(fields)}{ts}\"", "if", "rm_none", ":", "# Has substantial runtime impact. Best avoided if performance is critical.", "# First field can't be removed.", "pat", "=", "r',\\w+=\"?None\"?i?'", "f", "=", "eval", "(", "'lambda i: re.sub(r\\'{}\\', \"\", f\"{}\").encode()'", ".", "format", "(", "pat", ",", "fmt", ")", ")", "else", ":", "f", "=", "eval", "(", "'lambda i: f\"{}\".encode()'", ".", "format", "(", "fmt", ")", ")", "f", ".", "__doc__", "=", "\"Returns InfluxDB line protocol representation of user-defined class\"", "f", ".", "_args", "=", "dict", "(", "meas", "=", "meas", ",", "schema", "=", "schema", ",", "rm_none", "=", "rm_none", ",", "extra_tags", "=", "extra_tags", ",", "placeholder", "=", "placeholder", ")", "return", "f" ]
Factory of line protocol parsers
[ "Factory", "of", "line", "protocol", "parsers" ]
python
train
jfear/sramongo
sramongo/services/entrez.py
https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L86-L115
def epost(database, ids: List[str], webenv=False, api_key=False, email=False, **kwargs) -> Optional[EpostResult]: """Post IDs using the Entrez ESearch API. Parameters ---------- database : str Entez database to search. ids : list List of IDs to submit to the server. webenv : str An Entrez WebEnv to post ids to. api_key : str A users API key which allows more requests per second email : str A users email which is required if not using API. Returns ------- requests.Response """ url = BASE_URL + f'epost.fcgi' id = ','.join(ids) url_params = f'db={database}&id={id}' url_params = check_webenv(webenv, url_params) url_params = check_api_key(api_key, url_params) url_params = check_email(email, url_params) resp = entrez_try_put_multiple_times(url, url_params, num_tries=3) time.sleep(.5) return parse_epost(resp.text)
[ "def", "epost", "(", "database", ",", "ids", ":", "List", "[", "str", "]", ",", "webenv", "=", "False", ",", "api_key", "=", "False", ",", "email", "=", "False", ",", "*", "*", "kwargs", ")", "->", "Optional", "[", "EpostResult", "]", ":", "url", "=", "BASE_URL", "+", "f'epost.fcgi'", "id", "=", "','", ".", "join", "(", "ids", ")", "url_params", "=", "f'db={database}&id={id}'", "url_params", "=", "check_webenv", "(", "webenv", ",", "url_params", ")", "url_params", "=", "check_api_key", "(", "api_key", ",", "url_params", ")", "url_params", "=", "check_email", "(", "email", ",", "url_params", ")", "resp", "=", "entrez_try_put_multiple_times", "(", "url", ",", "url_params", ",", "num_tries", "=", "3", ")", "time", ".", "sleep", "(", ".5", ")", "return", "parse_epost", "(", "resp", ".", "text", ")" ]
Post IDs using the Entrez ESearch API. Parameters ---------- database : str Entez database to search. ids : list List of IDs to submit to the server. webenv : str An Entrez WebEnv to post ids to. api_key : str A users API key which allows more requests per second email : str A users email which is required if not using API. Returns ------- requests.Response
[ "Post", "IDs", "using", "the", "Entrez", "ESearch", "API", "." ]
python
train
libtcod/python-tcod
tcod/random.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/random.py#L58-L62
def _new_from_cdata(cls, cdata: Any) -> "Random": """Return a new instance encapsulating this cdata.""" self = object.__new__(cls) # type: "Random" self.random_c = cdata return self
[ "def", "_new_from_cdata", "(", "cls", ",", "cdata", ":", "Any", ")", "->", "\"Random\"", ":", "self", "=", "object", ".", "__new__", "(", "cls", ")", "# type: \"Random\"", "self", ".", "random_c", "=", "cdata", "return", "self" ]
Return a new instance encapsulating this cdata.
[ "Return", "a", "new", "instance", "encapsulating", "this", "cdata", "." ]
python
train
biolink/ontobio
ontobio/sparql/rdflib_bridge.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/sparql/rdflib_bridge.py#L15-L44
def rdfgraph_to_ontol(rg): """ Return an Ontology object from an rdflib graph object Status: Incomplete """ digraph = networkx.MultiDiGraph() from rdflib.namespace import RDF label_map = {} for c in rg.subjects(RDF.type, OWL.Class): cid = contract_uri_wrap(c) logging.info("C={}".format(cid)) for lit in rg.objects(c, RDFS.label): label_map[cid] = lit.value digraph.add_node(cid, label=lit.value) for s in rg.objects(c, RDFS.subClassOf): # todo - blank nodes sid = contract_uri_wrap(s) digraph.add_edge(sid, cid, pred='subClassOf') logging.info("G={}".format(digraph)) payload = { 'graph': digraph, #'xref_graph': xref_graph, #'graphdoc': obographdoc, #'logical_definitions': logical_definitions } ont = Ontology(handle='wd', payload=payload) return ont
[ "def", "rdfgraph_to_ontol", "(", "rg", ")", ":", "digraph", "=", "networkx", ".", "MultiDiGraph", "(", ")", "from", "rdflib", ".", "namespace", "import", "RDF", "label_map", "=", "{", "}", "for", "c", "in", "rg", ".", "subjects", "(", "RDF", ".", "type", ",", "OWL", ".", "Class", ")", ":", "cid", "=", "contract_uri_wrap", "(", "c", ")", "logging", ".", "info", "(", "\"C={}\"", ".", "format", "(", "cid", ")", ")", "for", "lit", "in", "rg", ".", "objects", "(", "c", ",", "RDFS", ".", "label", ")", ":", "label_map", "[", "cid", "]", "=", "lit", ".", "value", "digraph", ".", "add_node", "(", "cid", ",", "label", "=", "lit", ".", "value", ")", "for", "s", "in", "rg", ".", "objects", "(", "c", ",", "RDFS", ".", "subClassOf", ")", ":", "# todo - blank nodes", "sid", "=", "contract_uri_wrap", "(", "s", ")", "digraph", ".", "add_edge", "(", "sid", ",", "cid", ",", "pred", "=", "'subClassOf'", ")", "logging", ".", "info", "(", "\"G={}\"", ".", "format", "(", "digraph", ")", ")", "payload", "=", "{", "'graph'", ":", "digraph", ",", "#'xref_graph': xref_graph,", "#'graphdoc': obographdoc,", "#'logical_definitions': logical_definitions", "}", "ont", "=", "Ontology", "(", "handle", "=", "'wd'", ",", "payload", "=", "payload", ")", "return", "ont" ]
Return an Ontology object from an rdflib graph object Status: Incomplete
[ "Return", "an", "Ontology", "object", "from", "an", "rdflib", "graph", "object" ]
python
train
aiortc/aiortc
aiortc/rtcpeerconnection.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcpeerconnection.py#L319-L343
def addTrack(self, track): """ Add a :class:`MediaStreamTrack` to the set of media tracks which will be transmitted to the remote peer. """ # check state is valid self.__assertNotClosed() if track.kind not in ['audio', 'video']: raise InternalError('Invalid track kind "%s"' % track.kind) # don't add track twice self.__assertTrackHasNoSender(track) for transceiver in self.__transceivers: if transceiver.kind == track.kind: if transceiver.sender.track is None: transceiver.sender.replaceTrack(track) transceiver.direction = or_direction(transceiver.direction, 'sendonly') return transceiver.sender transceiver = self.__createTransceiver( direction='sendrecv', kind=track.kind, sender_track=track) return transceiver.sender
[ "def", "addTrack", "(", "self", ",", "track", ")", ":", "# check state is valid", "self", ".", "__assertNotClosed", "(", ")", "if", "track", ".", "kind", "not", "in", "[", "'audio'", ",", "'video'", "]", ":", "raise", "InternalError", "(", "'Invalid track kind \"%s\"'", "%", "track", ".", "kind", ")", "# don't add track twice", "self", ".", "__assertTrackHasNoSender", "(", "track", ")", "for", "transceiver", "in", "self", ".", "__transceivers", ":", "if", "transceiver", ".", "kind", "==", "track", ".", "kind", ":", "if", "transceiver", ".", "sender", ".", "track", "is", "None", ":", "transceiver", ".", "sender", ".", "replaceTrack", "(", "track", ")", "transceiver", ".", "direction", "=", "or_direction", "(", "transceiver", ".", "direction", ",", "'sendonly'", ")", "return", "transceiver", ".", "sender", "transceiver", "=", "self", ".", "__createTransceiver", "(", "direction", "=", "'sendrecv'", ",", "kind", "=", "track", ".", "kind", ",", "sender_track", "=", "track", ")", "return", "transceiver", ".", "sender" ]
Add a :class:`MediaStreamTrack` to the set of media tracks which will be transmitted to the remote peer.
[ "Add", "a", ":", "class", ":", "MediaStreamTrack", "to", "the", "set", "of", "media", "tracks", "which", "will", "be", "transmitted", "to", "the", "remote", "peer", "." ]
python
train
molmod/molmod
molmod/io/cube.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/io/cube.py#L260-L267
def copy(self, newdata=None): '''Return a copy of the cube with optionally new data.''' if newdata is None: newdata = self.data.copy() return self.__class__( self.molecule, self.origin.copy(), self.axes.copy(), self.nrep.copy(), newdata, self.subtitle, self.nuclear_charges )
[ "def", "copy", "(", "self", ",", "newdata", "=", "None", ")", ":", "if", "newdata", "is", "None", ":", "newdata", "=", "self", ".", "data", ".", "copy", "(", ")", "return", "self", ".", "__class__", "(", "self", ".", "molecule", ",", "self", ".", "origin", ".", "copy", "(", ")", ",", "self", ".", "axes", ".", "copy", "(", ")", ",", "self", ".", "nrep", ".", "copy", "(", ")", ",", "newdata", ",", "self", ".", "subtitle", ",", "self", ".", "nuclear_charges", ")" ]
Return a copy of the cube with optionally new data.
[ "Return", "a", "copy", "of", "the", "cube", "with", "optionally", "new", "data", "." ]
python
train
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/publisher/plos.py
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L1192-L1216
def convert_ref_list_elements(self): """ List of references (citations) for an article, which is often called “References”, “Bibliography”, or “Additional Reading”. No distinction is made between lists of cited references and lists of suggested references. This method should not be confused with the method(s) employed for the formatting of a proper bibliography, though they are related. Similarly, this is an area of major openness in development, I lack access to PLOS' algorithm for proper citation formatting. """ #TODO: Handle nested ref-lists for ref_list in self.main.getroot().findall('.//ref-list'): remove_all_attributes(ref_list) ref_list.tag = 'div' ref_list.attrib['class'] = 'ref-list' label = ref_list.find('label') if label is not None: label.tag = 'h3' for ref in ref_list.findall('ref'): ref_p = etree.Element('p') ref_p.text = str(etree.tostring(ref, method='text', encoding='utf-8'), encoding='utf-8') replace(ref, ref_p)
[ "def", "convert_ref_list_elements", "(", "self", ")", ":", "#TODO: Handle nested ref-lists", "for", "ref_list", "in", "self", ".", "main", ".", "getroot", "(", ")", ".", "findall", "(", "'.//ref-list'", ")", ":", "remove_all_attributes", "(", "ref_list", ")", "ref_list", ".", "tag", "=", "'div'", "ref_list", ".", "attrib", "[", "'class'", "]", "=", "'ref-list'", "label", "=", "ref_list", ".", "find", "(", "'label'", ")", "if", "label", "is", "not", "None", ":", "label", ".", "tag", "=", "'h3'", "for", "ref", "in", "ref_list", ".", "findall", "(", "'ref'", ")", ":", "ref_p", "=", "etree", ".", "Element", "(", "'p'", ")", "ref_p", ".", "text", "=", "str", "(", "etree", ".", "tostring", "(", "ref", ",", "method", "=", "'text'", ",", "encoding", "=", "'utf-8'", ")", ",", "encoding", "=", "'utf-8'", ")", "replace", "(", "ref", ",", "ref_p", ")" ]
List of references (citations) for an article, which is often called “References”, “Bibliography”, or “Additional Reading”. No distinction is made between lists of cited references and lists of suggested references. This method should not be confused with the method(s) employed for the formatting of a proper bibliography, though they are related. Similarly, this is an area of major openness in development, I lack access to PLOS' algorithm for proper citation formatting.
[ "List", "of", "references", "(", "citations", ")", "for", "an", "article", "which", "is", "often", "called", "“References”", "“Bibliography”", "or", "“Additional", "Reading”", "." ]
python
train
night-crawler/django-docker-helpers
django_docker_helpers/utils.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/utils.py#L143-L156
def dotkey(obj: dict, path: str, default=None, separator='.'): """ Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``. :param obj: dict like ``{'some': {'value': 3}}`` :param path: ``'some.value'`` :param separator: ``'.'`` or ``'/'`` or whatever :param default: default for KeyError :return: dict value or default value """ try: return get(obj, path, separator=separator) except KeyError: return default
[ "def", "dotkey", "(", "obj", ":", "dict", ",", "path", ":", "str", ",", "default", "=", "None", ",", "separator", "=", "'.'", ")", ":", "try", ":", "return", "get", "(", "obj", ",", "path", ",", "separator", "=", "separator", ")", "except", "KeyError", ":", "return", "default" ]
Provides an interface to traverse nested dict values by dot-separated paths. Wrapper for ``dpath.util.get``. :param obj: dict like ``{'some': {'value': 3}}`` :param path: ``'some.value'`` :param separator: ``'.'`` or ``'/'`` or whatever :param default: default for KeyError :return: dict value or default value
[ "Provides", "an", "interface", "to", "traverse", "nested", "dict", "values", "by", "dot", "-", "separated", "paths", ".", "Wrapper", "for", "dpath", ".", "util", ".", "get", "." ]
python
train
saltstack/salt
salt/loader.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/loader.py#L913-L926
def call(fun, **kwargs): ''' Directly call a function inside a loader directory ''' args = kwargs.get('args', []) dirs = kwargs.get('dirs', []) funcs = LazyLoader( [os.path.join(SALT_BASE_PATH, 'modules')] + dirs, None, tag='modules', virtual_enable=False, ) return funcs[fun](*args)
[ "def", "call", "(", "fun", ",", "*", "*", "kwargs", ")", ":", "args", "=", "kwargs", ".", "get", "(", "'args'", ",", "[", "]", ")", "dirs", "=", "kwargs", ".", "get", "(", "'dirs'", ",", "[", "]", ")", "funcs", "=", "LazyLoader", "(", "[", "os", ".", "path", ".", "join", "(", "SALT_BASE_PATH", ",", "'modules'", ")", "]", "+", "dirs", ",", "None", ",", "tag", "=", "'modules'", ",", "virtual_enable", "=", "False", ",", ")", "return", "funcs", "[", "fun", "]", "(", "*", "args", ")" ]
Directly call a function inside a loader directory
[ "Directly", "call", "a", "function", "inside", "a", "loader", "directory" ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/bandstructure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/bandstructure.py#L922-L959
def from_old_dict(cls, d): """ Args: d (dict): A dict with all data for a band structure symm line object. Returns: A BandStructureSymmLine object """ # Strip the label to recover initial string (see trick used in as_dict to handle $ chars) labels_dict = {k.strip(): v for k, v in d['labels_dict'].items()} projections = {} structure = None if 'projections' in d and len(d['projections']) != 0: structure = Structure.from_dict(d['structure']) projections = {} for spin in d['projections']: dd = [] for i in range(len(d['projections'][spin])): ddd = [] for j in range(len(d['projections'][spin][i])): dddd = [] for k in range(len(d['projections'][spin][i][j])): ddddd = [] orb = Orbital(k).name for l in range(len(d['projections'][spin][i][j][ orb])): ddddd.append(d['projections'][spin][i][j][ orb][l]) dddd.append(np.array(ddddd)) ddd.append(np.array(dddd)) dd.append(np.array(ddd)) projections[Spin(int(spin))] = np.array(dd) return BandStructureSymmLine( d['kpoints'], {Spin(int(k)): d['bands'][k] for k in d['bands']}, Lattice(d['lattice_rec']['matrix']), d['efermi'], labels_dict, structure=structure, projections=projections)
[ "def", "from_old_dict", "(", "cls", ",", "d", ")", ":", "# Strip the label to recover initial string (see trick used in as_dict to handle $ chars)", "labels_dict", "=", "{", "k", ".", "strip", "(", ")", ":", "v", "for", "k", ",", "v", "in", "d", "[", "'labels_dict'", "]", ".", "items", "(", ")", "}", "projections", "=", "{", "}", "structure", "=", "None", "if", "'projections'", "in", "d", "and", "len", "(", "d", "[", "'projections'", "]", ")", "!=", "0", ":", "structure", "=", "Structure", ".", "from_dict", "(", "d", "[", "'structure'", "]", ")", "projections", "=", "{", "}", "for", "spin", "in", "d", "[", "'projections'", "]", ":", "dd", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "d", "[", "'projections'", "]", "[", "spin", "]", ")", ")", ":", "ddd", "=", "[", "]", "for", "j", "in", "range", "(", "len", "(", "d", "[", "'projections'", "]", "[", "spin", "]", "[", "i", "]", ")", ")", ":", "dddd", "=", "[", "]", "for", "k", "in", "range", "(", "len", "(", "d", "[", "'projections'", "]", "[", "spin", "]", "[", "i", "]", "[", "j", "]", ")", ")", ":", "ddddd", "=", "[", "]", "orb", "=", "Orbital", "(", "k", ")", ".", "name", "for", "l", "in", "range", "(", "len", "(", "d", "[", "'projections'", "]", "[", "spin", "]", "[", "i", "]", "[", "j", "]", "[", "orb", "]", ")", ")", ":", "ddddd", ".", "append", "(", "d", "[", "'projections'", "]", "[", "spin", "]", "[", "i", "]", "[", "j", "]", "[", "orb", "]", "[", "l", "]", ")", "dddd", ".", "append", "(", "np", ".", "array", "(", "ddddd", ")", ")", "ddd", ".", "append", "(", "np", ".", "array", "(", "dddd", ")", ")", "dd", ".", "append", "(", "np", ".", "array", "(", "ddd", ")", ")", "projections", "[", "Spin", "(", "int", "(", "spin", ")", ")", "]", "=", "np", ".", "array", "(", "dd", ")", "return", "BandStructureSymmLine", "(", "d", "[", "'kpoints'", "]", ",", "{", "Spin", "(", "int", "(", "k", ")", ")", ":", "d", "[", "'bands'", "]", "[", "k", "]", "for", "k", "in", "d", "[", "'bands'", "]", "}", ",", "Lattice", "(", "d", "[", "'lattice_rec'", "]", "[", "'matrix'", "]", ")", ",", "d", "[", "'efermi'", "]", ",", "labels_dict", ",", "structure", "=", "structure", ",", "projections", "=", "projections", ")" ]
Args: d (dict): A dict with all data for a band structure symm line object. Returns: A BandStructureSymmLine object
[ "Args", ":", "d", "(", "dict", ")", ":", "A", "dict", "with", "all", "data", "for", "a", "band", "structure", "symm", "line", "object", ".", "Returns", ":", "A", "BandStructureSymmLine", "object" ]
python
train
pudo/jsongraph
jsongraph/context.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/context.py#L51-L59
def delete(self): """ Delete all statements matching the current context identifier from the main store. """ if self.parent.buffered: query = 'CLEAR SILENT GRAPH %s ;' % self.identifier.n3() self.parent.graph.update(query) self.flush() else: self.graph.remove((None, None, None))
[ "def", "delete", "(", "self", ")", ":", "if", "self", ".", "parent", ".", "buffered", ":", "query", "=", "'CLEAR SILENT GRAPH %s ;'", "%", "self", ".", "identifier", ".", "n3", "(", ")", "self", ".", "parent", ".", "graph", ".", "update", "(", "query", ")", "self", ".", "flush", "(", ")", "else", ":", "self", ".", "graph", ".", "remove", "(", "(", "None", ",", "None", ",", "None", ")", ")" ]
Delete all statements matching the current context identifier from the main store.
[ "Delete", "all", "statements", "matching", "the", "current", "context", "identifier", "from", "the", "main", "store", "." ]
python
train
CitrineInformatics/pif-dft
dfttopif/parsers/base.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/base.py#L196-L205
def is_converged(self): '''Whether the calculation has converged Returns: Property where "scalar" is a boolean indicating ''' # Check for cached result if self._converged is None: self._converged = self._is_converged() return Property(scalars=[Scalar(value=self._converged)])
[ "def", "is_converged", "(", "self", ")", ":", "# Check for cached result", "if", "self", ".", "_converged", "is", "None", ":", "self", ".", "_converged", "=", "self", ".", "_is_converged", "(", ")", "return", "Property", "(", "scalars", "=", "[", "Scalar", "(", "value", "=", "self", ".", "_converged", ")", "]", ")" ]
Whether the calculation has converged Returns: Property where "scalar" is a boolean indicating
[ "Whether", "the", "calculation", "has", "converged", "Returns", ":", "Property", "where", "scalar", "is", "a", "boolean", "indicating" ]
python
train
tango-controls/pytango
tango/utils.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L1170-L1180
def findentry(self, item): """A caseless way of checking if an item is in the list or not. It returns None or the entry.""" if not isinstance(item, str): raise TypeError( 'Members of this object must be strings. ' 'You supplied \"%s\"' % type(item)) for entry in self: if item.lower() == entry.lower(): return entry return None
[ "def", "findentry", "(", "self", ",", "item", ")", ":", "if", "not", "isinstance", "(", "item", ",", "str", ")", ":", "raise", "TypeError", "(", "'Members of this object must be strings. '", "'You supplied \\\"%s\\\"'", "%", "type", "(", "item", ")", ")", "for", "entry", "in", "self", ":", "if", "item", ".", "lower", "(", ")", "==", "entry", ".", "lower", "(", ")", ":", "return", "entry", "return", "None" ]
A caseless way of checking if an item is in the list or not. It returns None or the entry.
[ "A", "caseless", "way", "of", "checking", "if", "an", "item", "is", "in", "the", "list", "or", "not", ".", "It", "returns", "None", "or", "the", "entry", "." ]
python
train
Ceasar/easywatch
easywatch/easywatch.py
https://github.com/Ceasar/easywatch/blob/1dd464d2acca5932473759b187dec4eb63dab2d9/easywatch/easywatch.py#L13-L36
def watch(path, handler): """Watch a directory for events. - path should be the directory to watch - handler should a function which takes an event_type and src_path and does something interesting. event_type will be one of 'created', 'deleted', 'modified', or 'moved'. src_path will be the absolute path to the file that triggered the event. """ # let the user just deal with events @functools.wraps(handler) def wrapper(self, event): if not event.is_directory: return handler(event.event_type, event.src_path) attrs = {'on_any_event': wrapper} EventHandler = type("EventHandler", (FileSystemEventHandler,), attrs) observer = Observer() observer.schedule(EventHandler(), path=path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
[ "def", "watch", "(", "path", ",", "handler", ")", ":", "# let the user just deal with events", "@", "functools", ".", "wraps", "(", "handler", ")", "def", "wrapper", "(", "self", ",", "event", ")", ":", "if", "not", "event", ".", "is_directory", ":", "return", "handler", "(", "event", ".", "event_type", ",", "event", ".", "src_path", ")", "attrs", "=", "{", "'on_any_event'", ":", "wrapper", "}", "EventHandler", "=", "type", "(", "\"EventHandler\"", ",", "(", "FileSystemEventHandler", ",", ")", ",", "attrs", ")", "observer", "=", "Observer", "(", ")", "observer", ".", "schedule", "(", "EventHandler", "(", ")", ",", "path", "=", "path", ",", "recursive", "=", "True", ")", "observer", ".", "start", "(", ")", "try", ":", "while", "True", ":", "time", ".", "sleep", "(", "1", ")", "except", "KeyboardInterrupt", ":", "observer", ".", "stop", "(", ")", "observer", ".", "join", "(", ")" ]
Watch a directory for events. - path should be the directory to watch - handler should a function which takes an event_type and src_path and does something interesting. event_type will be one of 'created', 'deleted', 'modified', or 'moved'. src_path will be the absolute path to the file that triggered the event.
[ "Watch", "a", "directory", "for", "events", ".", "-", "path", "should", "be", "the", "directory", "to", "watch", "-", "handler", "should", "a", "function", "which", "takes", "an", "event_type", "and", "src_path", "and", "does", "something", "interesting", ".", "event_type", "will", "be", "one", "of", "created", "deleted", "modified", "or", "moved", ".", "src_path", "will", "be", "the", "absolute", "path", "to", "the", "file", "that", "triggered", "the", "event", "." ]
python
train
PyGithub/PyGithub
github/AuthenticatedUser.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/AuthenticatedUser.py#L657-L667
def get_authorizations(self): """ :calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization` """ return github.PaginatedList.PaginatedList( github.Authorization.Authorization, self._requester, "/authorizations", None )
[ "def", "get_authorizations", "(", "self", ")", ":", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "Authorization", ".", "Authorization", ",", "self", ".", "_requester", ",", "\"/authorizations\"", ",", "None", ")" ]
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
[ ":", "calls", ":", "GET", "/", "authorizations", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "oauth", ">", "_", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "Authorization", ".", "Authorization" ]
python
train
soravux/scoop
scoop/backports/dictconfig.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/scoop/backports/dictconfig.py#L153-L174
def resolve(self, s): """ Resolve strings to objects using standard import and attribute syntax. """ name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += '.' + frag try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: e, tb = sys.exc_info()[1:] v = ValueError('Cannot resolve %r: %s' % (s, e)) v.__cause__, v.__traceback__ = e, tb raise v
[ "def", "resolve", "(", "self", ",", "s", ")", ":", "name", "=", "s", ".", "split", "(", "'.'", ")", "used", "=", "name", ".", "pop", "(", "0", ")", "try", ":", "found", "=", "self", ".", "importer", "(", "used", ")", "for", "frag", "in", "name", ":", "used", "+=", "'.'", "+", "frag", "try", ":", "found", "=", "getattr", "(", "found", ",", "frag", ")", "except", "AttributeError", ":", "self", ".", "importer", "(", "used", ")", "found", "=", "getattr", "(", "found", ",", "frag", ")", "return", "found", "except", "ImportError", ":", "e", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "1", ":", "]", "v", "=", "ValueError", "(", "'Cannot resolve %r: %s'", "%", "(", "s", ",", "e", ")", ")", "v", ".", "__cause__", ",", "v", ".", "__traceback__", "=", "e", ",", "tb", "raise", "v" ]
Resolve strings to objects using standard import and attribute syntax.
[ "Resolve", "strings", "to", "objects", "using", "standard", "import", "and", "attribute", "syntax", "." ]
python
train
googledatalab/pydatalab
google/datalab/_context.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/_context.py#L102-L124
def default(): """Retrieves a default Context object, creating it if necessary. The default Context is a global shared instance used every time the default context is retrieved. Attempting to use a Context with no project_id will raise an exception, so on first use set_project_id must be called. Returns: An initialized and shared instance of a Context object. """ credentials = du.get_credentials() project = du.get_default_project_id() if Context._global_context is None: config = Context._get_default_config() Context._global_context = Context(project, credentials, config) else: # Always update everything in case the access token is revoked or expired, config changed, # or project changed. Context._global_context.set_credentials(credentials) Context._global_context.set_project_id(project) return Context._global_context
[ "def", "default", "(", ")", ":", "credentials", "=", "du", ".", "get_credentials", "(", ")", "project", "=", "du", ".", "get_default_project_id", "(", ")", "if", "Context", ".", "_global_context", "is", "None", ":", "config", "=", "Context", ".", "_get_default_config", "(", ")", "Context", ".", "_global_context", "=", "Context", "(", "project", ",", "credentials", ",", "config", ")", "else", ":", "# Always update everything in case the access token is revoked or expired, config changed,", "# or project changed.", "Context", ".", "_global_context", ".", "set_credentials", "(", "credentials", ")", "Context", ".", "_global_context", ".", "set_project_id", "(", "project", ")", "return", "Context", ".", "_global_context" ]
Retrieves a default Context object, creating it if necessary. The default Context is a global shared instance used every time the default context is retrieved. Attempting to use a Context with no project_id will raise an exception, so on first use set_project_id must be called. Returns: An initialized and shared instance of a Context object.
[ "Retrieves", "a", "default", "Context", "object", "creating", "it", "if", "necessary", "." ]
python
train
josuebrunel/myql
myql/contrib/finance/stockscraper/stockretriever.py
https://github.com/josuebrunel/myql/blob/891bad29cc83a81b3f5ebc4d0401d6f2c22f119e/myql/contrib/finance/stockscraper/stockretriever.py#L49-L54
def get_historical_info(self, symbol,items=None, startDate=None, endDate=None, limit=None): """get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page """ startDate, endDate = self.__get_time_range(startDate, endDate) response = self.select('yahoo.finance.historicaldata',items,limit).where(['symbol','=',symbol],['startDate','=',startDate],['endDate','=',endDate]) return response
[ "def", "get_historical_info", "(", "self", ",", "symbol", ",", "items", "=", "None", ",", "startDate", "=", "None", ",", "endDate", "=", "None", ",", "limit", "=", "None", ")", ":", "startDate", ",", "endDate", "=", "self", ".", "__get_time_range", "(", "startDate", ",", "endDate", ")", "response", "=", "self", ".", "select", "(", "'yahoo.finance.historicaldata'", ",", "items", ",", "limit", ")", ".", "where", "(", "[", "'symbol'", ",", "'='", ",", "symbol", "]", ",", "[", "'startDate'", ",", "'='", ",", "startDate", "]", ",", "[", "'endDate'", ",", "'='", ",", "endDate", "]", ")", "return", "response" ]
get_historical_info() uses the csv datatable to retrieve all available historical data on a typical historical prices page
[ "get_historical_info", "()", "uses", "the", "csv", "datatable", "to", "retrieve", "all", "available", "historical", "data", "on", "a", "typical", "historical", "prices", "page" ]
python
train
hovren/crisp
crisp/imu.py
https://github.com/hovren/crisp/blob/65cae19e7cfae5a397859096c9ef666e0f4e7f1b/crisp/imu.py#L66-L77
def rate(self): """Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps """ N = len(self.timestamps) t = self.timestamps[-1] - self.timestamps[0] rate = 1.0 * N / t return rate
[ "def", "rate", "(", "self", ")", ":", "N", "=", "len", "(", "self", ".", "timestamps", ")", "t", "=", "self", ".", "timestamps", "[", "-", "1", "]", "-", "self", ".", "timestamps", "[", "0", "]", "rate", "=", "1.0", "*", "N", "/", "t", "return", "rate" ]
Get the sample rate in Hz. Returns --------- rate : float The sample rate, in Hz, calculated from the timestamps
[ "Get", "the", "sample", "rate", "in", "Hz", ".", "Returns", "---------", "rate", ":", "float", "The", "sample", "rate", "in", "Hz", "calculated", "from", "the", "timestamps" ]
python
train
rochacbruno/python-pagseguro
pagseguro/__init__.py
https://github.com/rochacbruno/python-pagseguro/blob/18a9ca3301783cb323e838574b59f9ddffa9a593/pagseguro/__init__.py#L209-L211
def get(self, url): """ do a get transaction """ return requests.get(url, params=self.data, headers=self.config.HEADERS)
[ "def", "get", "(", "self", ",", "url", ")", ":", "return", "requests", ".", "get", "(", "url", ",", "params", "=", "self", ".", "data", ",", "headers", "=", "self", ".", "config", ".", "HEADERS", ")" ]
do a get transaction
[ "do", "a", "get", "transaction" ]
python
train
shmir/PyIxNetwork
ixnetwork/ixn_interface.py
https://github.com/shmir/PyIxNetwork/blob/e7d7a89c08a5d3a1382b4dcfd915bbfc7eedd33f/ixnetwork/ixn_interface.py#L55-L68
def _create(self, **attributes): """ Create new interface on IxNetwork. Set enabled and description (==name). :return: interface object reference. """ attributes['enabled'] = True if 'name' in self._data: attributes['description'] = self._data['name'] obj_ref = self.api.add(self.obj_parent(), self.obj_type(), **attributes) self.api.commit() return self.api.remapIds(obj_ref)
[ "def", "_create", "(", "self", ",", "*", "*", "attributes", ")", ":", "attributes", "[", "'enabled'", "]", "=", "True", "if", "'name'", "in", "self", ".", "_data", ":", "attributes", "[", "'description'", "]", "=", "self", ".", "_data", "[", "'name'", "]", "obj_ref", "=", "self", ".", "api", ".", "add", "(", "self", ".", "obj_parent", "(", ")", ",", "self", ".", "obj_type", "(", ")", ",", "*", "*", "attributes", ")", "self", ".", "api", ".", "commit", "(", ")", "return", "self", ".", "api", ".", "remapIds", "(", "obj_ref", ")" ]
Create new interface on IxNetwork. Set enabled and description (==name). :return: interface object reference.
[ "Create", "new", "interface", "on", "IxNetwork", "." ]
python
train
hazelcast/hazelcast-python-client
hazelcast/proxy/transactional_map.py
https://github.com/hazelcast/hazelcast-python-client/blob/3f6639443c23d6d036aa343f8e094f052250d2c1/hazelcast/proxy/transactional_map.py#L25-L33
def get(self, key): """ Transactional implementation of :func:`Map.get(key) <hazelcast.proxy.map.Map.get>` :param key: (object), the specified key. :return: (object), the value for the specified key. """ check_not_none(key, "key can't be none") return self._encode_invoke(transactional_map_get_codec, key=self._to_data(key))
[ "def", "get", "(", "self", ",", "key", ")", ":", "check_not_none", "(", "key", ",", "\"key can't be none\"", ")", "return", "self", ".", "_encode_invoke", "(", "transactional_map_get_codec", ",", "key", "=", "self", ".", "_to_data", "(", "key", ")", ")" ]
Transactional implementation of :func:`Map.get(key) <hazelcast.proxy.map.Map.get>` :param key: (object), the specified key. :return: (object), the value for the specified key.
[ "Transactional", "implementation", "of", ":", "func", ":", "Map", ".", "get", "(", "key", ")", "<hazelcast", ".", "proxy", ".", "map", ".", "Map", ".", "get", ">" ]
python
train
summa-tx/riemann
riemann/tx/tx_builder.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx_builder.py#L263-L319
def make_tx(version, tx_ins, tx_outs, lock_time, expiry=None, value_balance=0, tx_shielded_spends=None, tx_shielded_outputs=None, tx_witnesses=None, tx_joinsplits=None, joinsplit_pubkey=None, joinsplit_sig=None, binding_sig=None): ''' int, list(TxIn), list(TxOut), int, list(InputWitness) -> Tx ''' n = riemann.get_current_network_name() if 'decred' in n: return tx.DecredTx( version=utils.i2le_padded(version, 4), tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry=utils.i2le_padded(expiry, 4), tx_witnesses=[tx_witnesses]) if 'sprout' in n and tx_joinsplits is not None: return tx.SproutTx( version=version, tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if 'overwinter' in n: return tx.OverwinterTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig) if 'sapling' in n: return tx.SaplingTx( tx_ins=tx_ins, tx_outs=tx_outs, lock_time=utils.i2le_padded(lock_time, 4), expiry_height=utils.i2le_padded(expiry, 4), value_balance=utils.i2le_padded(value_balance, 8), tx_shielded_spends=(tx_shielded_spends if tx_shielded_spends is not None else []), tx_shielded_outputs=(tx_shielded_outputs if tx_shielded_outputs is not None else []), tx_joinsplits=tx_joinsplits if tx_joinsplits is not None else [], joinsplit_pubkey=joinsplit_pubkey, joinsplit_sig=joinsplit_sig, binding_sig=binding_sig) flag = riemann.network.SEGWIT_TX_FLAG \ if tx_witnesses is not None else None return tx.Tx(version=utils.i2le_padded(version, 4), flag=flag, tx_ins=tx_ins, tx_outs=tx_outs, tx_witnesses=tx_witnesses, lock_time=utils.i2le_padded(lock_time, 4))
[ "def", "make_tx", "(", "version", ",", "tx_ins", ",", "tx_outs", ",", "lock_time", ",", "expiry", "=", "None", ",", "value_balance", "=", "0", ",", "tx_shielded_spends", "=", "None", ",", "tx_shielded_outputs", "=", "None", ",", "tx_witnesses", "=", "None", ",", "tx_joinsplits", "=", "None", ",", "joinsplit_pubkey", "=", "None", ",", "joinsplit_sig", "=", "None", ",", "binding_sig", "=", "None", ")", ":", "n", "=", "riemann", ".", "get_current_network_name", "(", ")", "if", "'decred'", "in", "n", ":", "return", "tx", ".", "DecredTx", "(", "version", "=", "utils", ".", "i2le_padded", "(", "version", ",", "4", ")", ",", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "lock_time", "=", "utils", ".", "i2le_padded", "(", "lock_time", ",", "4", ")", ",", "expiry", "=", "utils", ".", "i2le_padded", "(", "expiry", ",", "4", ")", ",", "tx_witnesses", "=", "[", "tx_witnesses", "]", ")", "if", "'sprout'", "in", "n", "and", "tx_joinsplits", "is", "not", "None", ":", "return", "tx", ".", "SproutTx", "(", "version", "=", "version", ",", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "lock_time", "=", "utils", ".", "i2le_padded", "(", "lock_time", ",", "4", ")", ",", "tx_joinsplits", "=", "tx_joinsplits", "if", "tx_joinsplits", "is", "not", "None", "else", "[", "]", ",", "joinsplit_pubkey", "=", "joinsplit_pubkey", ",", "joinsplit_sig", "=", "joinsplit_sig", ")", "if", "'overwinter'", "in", "n", ":", "return", "tx", ".", "OverwinterTx", "(", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "lock_time", "=", "utils", ".", "i2le_padded", "(", "lock_time", ",", "4", ")", ",", "expiry_height", "=", "utils", ".", "i2le_padded", "(", "expiry", ",", "4", ")", ",", "tx_joinsplits", "=", "tx_joinsplits", "if", "tx_joinsplits", "is", "not", "None", "else", "[", "]", ",", "joinsplit_pubkey", "=", "joinsplit_pubkey", ",", "joinsplit_sig", "=", "joinsplit_sig", ")", "if", "'sapling'", "in", "n", ":", "return", "tx", ".", "SaplingTx", "(", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "lock_time", "=", "utils", ".", "i2le_padded", "(", "lock_time", ",", "4", ")", ",", "expiry_height", "=", "utils", ".", "i2le_padded", "(", "expiry", ",", "4", ")", ",", "value_balance", "=", "utils", ".", "i2le_padded", "(", "value_balance", ",", "8", ")", ",", "tx_shielded_spends", "=", "(", "tx_shielded_spends", "if", "tx_shielded_spends", "is", "not", "None", "else", "[", "]", ")", ",", "tx_shielded_outputs", "=", "(", "tx_shielded_outputs", "if", "tx_shielded_outputs", "is", "not", "None", "else", "[", "]", ")", ",", "tx_joinsplits", "=", "tx_joinsplits", "if", "tx_joinsplits", "is", "not", "None", "else", "[", "]", ",", "joinsplit_pubkey", "=", "joinsplit_pubkey", ",", "joinsplit_sig", "=", "joinsplit_sig", ",", "binding_sig", "=", "binding_sig", ")", "flag", "=", "riemann", ".", "network", ".", "SEGWIT_TX_FLAG", "if", "tx_witnesses", "is", "not", "None", "else", "None", "return", "tx", ".", "Tx", "(", "version", "=", "utils", ".", "i2le_padded", "(", "version", ",", "4", ")", ",", "flag", "=", "flag", ",", "tx_ins", "=", "tx_ins", ",", "tx_outs", "=", "tx_outs", ",", "tx_witnesses", "=", "tx_witnesses", ",", "lock_time", "=", "utils", ".", "i2le_padded", "(", "lock_time", ",", "4", ")", ")" ]
int, list(TxIn), list(TxOut), int, list(InputWitness) -> Tx
[ "int", "list", "(", "TxIn", ")", "list", "(", "TxOut", ")", "int", "list", "(", "InputWitness", ")", "-", ">", "Tx" ]
python
train
Opentrons/opentrons
api/src/opentrons/protocol_api/labware.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/labware.py#L124-L157
def _from_center_cartesian( self, x: float, y: float, z: float) -> Point: """ Specifies an arbitrary point relative to the center of the well based on percentages of the radius in each axis. For example, to specify the back-right corner of a well at 1/4 of the well depth from the bottom, the call would be `_from_center_cartesian(1, 1, -0.5)`. No checks are performed to ensure that the resulting position will be inside of the well. :param x: a float in the range [-1.0, 1.0] for a percentage of half of the radius/length in the X axis :param y: a float in the range [-1.0, 1.0] for a percentage of half of the radius/width in the Y axis :param z: a float in the range [-1.0, 1.0] for a percentage of half of the height above/below the center :return: a Point representing the specified location in absolute deck coordinates """ center = self.center() if self._shape is WellShape.RECTANGULAR: x_size = self._length y_size = self._width else: x_size = self._diameter y_size = self._diameter z_size = self._depth return Point( x=center.point.x + (x * (x_size / 2.0)), y=center.point.y + (y * (y_size / 2.0)), z=center.point.z + (z * (z_size / 2.0)))
[ "def", "_from_center_cartesian", "(", "self", ",", "x", ":", "float", ",", "y", ":", "float", ",", "z", ":", "float", ")", "->", "Point", ":", "center", "=", "self", ".", "center", "(", ")", "if", "self", ".", "_shape", "is", "WellShape", ".", "RECTANGULAR", ":", "x_size", "=", "self", ".", "_length", "y_size", "=", "self", ".", "_width", "else", ":", "x_size", "=", "self", ".", "_diameter", "y_size", "=", "self", ".", "_diameter", "z_size", "=", "self", ".", "_depth", "return", "Point", "(", "x", "=", "center", ".", "point", ".", "x", "+", "(", "x", "*", "(", "x_size", "/", "2.0", ")", ")", ",", "y", "=", "center", ".", "point", ".", "y", "+", "(", "y", "*", "(", "y_size", "/", "2.0", ")", ")", ",", "z", "=", "center", ".", "point", ".", "z", "+", "(", "z", "*", "(", "z_size", "/", "2.0", ")", ")", ")" ]
Specifies an arbitrary point relative to the center of the well based on percentages of the radius in each axis. For example, to specify the back-right corner of a well at 1/4 of the well depth from the bottom, the call would be `_from_center_cartesian(1, 1, -0.5)`. No checks are performed to ensure that the resulting position will be inside of the well. :param x: a float in the range [-1.0, 1.0] for a percentage of half of the radius/length in the X axis :param y: a float in the range [-1.0, 1.0] for a percentage of half of the radius/width in the Y axis :param z: a float in the range [-1.0, 1.0] for a percentage of half of the height above/below the center :return: a Point representing the specified location in absolute deck coordinates
[ "Specifies", "an", "arbitrary", "point", "relative", "to", "the", "center", "of", "the", "well", "based", "on", "percentages", "of", "the", "radius", "in", "each", "axis", ".", "For", "example", "to", "specify", "the", "back", "-", "right", "corner", "of", "a", "well", "at", "1", "/", "4", "of", "the", "well", "depth", "from", "the", "bottom", "the", "call", "would", "be", "_from_center_cartesian", "(", "1", "1", "-", "0", ".", "5", ")", "." ]
python
train
pennlabs/penn-sdk-python
penn/dining.py
https://github.com/pennlabs/penn-sdk-python/blob/31ff12c20d69438d63bc7a796f83ce4f4c828396/penn/dining.py#L157-L173
def venues(self): """Get a list of all venue objects. >>> venues = din.venues() """ response = self._request(V2_ENDPOINTS['VENUES']) # Normalize `dateHours` to array for venue in response["result_data"]["document"]["venue"]: if venue.get("id") in VENUE_NAMES: venue["name"] = VENUE_NAMES[venue.get("id")] if isinstance(venue.get("dateHours"), dict): venue["dateHours"] = [venue["dateHours"]] if "dateHours" in venue: for dh in venue["dateHours"]: if isinstance(dh.get("meal"), dict): dh["meal"] = [dh["meal"]] return response
[ "def", "venues", "(", "self", ")", ":", "response", "=", "self", ".", "_request", "(", "V2_ENDPOINTS", "[", "'VENUES'", "]", ")", "# Normalize `dateHours` to array", "for", "venue", "in", "response", "[", "\"result_data\"", "]", "[", "\"document\"", "]", "[", "\"venue\"", "]", ":", "if", "venue", ".", "get", "(", "\"id\"", ")", "in", "VENUE_NAMES", ":", "venue", "[", "\"name\"", "]", "=", "VENUE_NAMES", "[", "venue", ".", "get", "(", "\"id\"", ")", "]", "if", "isinstance", "(", "venue", ".", "get", "(", "\"dateHours\"", ")", ",", "dict", ")", ":", "venue", "[", "\"dateHours\"", "]", "=", "[", "venue", "[", "\"dateHours\"", "]", "]", "if", "\"dateHours\"", "in", "venue", ":", "for", "dh", "in", "venue", "[", "\"dateHours\"", "]", ":", "if", "isinstance", "(", "dh", ".", "get", "(", "\"meal\"", ")", ",", "dict", ")", ":", "dh", "[", "\"meal\"", "]", "=", "[", "dh", "[", "\"meal\"", "]", "]", "return", "response" ]
Get a list of all venue objects. >>> venues = din.venues()
[ "Get", "a", "list", "of", "all", "venue", "objects", "." ]
python
train
houtianze/bypy
bypy/bypy.py
https://github.com/houtianze/bypy/blob/c59b6183e2fca45f11138bbcdec6247449b2eaad/bypy/bypy.py#L2912-L2920
def dumpcache(self): ''' Usage: dumpcache - display file hash cache''' if cached.cacheloaded: #pprint.pprint(cached.cache) MyPrettyPrinter().pprint(cached.cache) return const.ENoError else: perr("Cache not loaded.") return const.ECacheNotLoaded
[ "def", "dumpcache", "(", "self", ")", ":", "if", "cached", ".", "cacheloaded", ":", "#pprint.pprint(cached.cache)", "MyPrettyPrinter", "(", ")", ".", "pprint", "(", "cached", ".", "cache", ")", "return", "const", ".", "ENoError", "else", ":", "perr", "(", "\"Cache not loaded.\"", ")", "return", "const", ".", "ECacheNotLoaded" ]
Usage: dumpcache - display file hash cache
[ "Usage", ":", "dumpcache", "-", "display", "file", "hash", "cache" ]
python
train
kristianfoerster/melodist
melodist/humidity.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/humidity.py#L33-L109
def disaggregate_humidity(data_daily, method='equal', temp=None, a0=None, a1=None, kr=None, month_hour_precip_mean=None, preserve_daily_mean=False): """general function for humidity disaggregation Args: daily_data: daily values method: keyword specifying the disaggregation method to be used temp: hourly temperature time series (necessary for some methods) kr: parameter for linear_dewpoint_variation method (6 or 12) month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values preserve_daily_mean: if True, correct the daily mean values of the disaggregated data with the observed daily means. Returns: Disaggregated hourly values of relative humidity. """ assert method in ('equal', 'minimal', 'dewpoint_regression', 'min_max', 'linear_dewpoint_variation', 'month_hour_precip_mean'), 'Invalid option' if method == 'equal': hum_disagg = melodist.distribute_equally(data_daily.hum) elif method in ('minimal', 'dewpoint_regression', 'linear_dewpoint_variation'): if method == 'minimal': a0 = 0 a1 = 1 assert a0 is not None and a1 is not None, 'a0 and a1 must be specified' tdew_daily = a0 + a1 * data_daily.tmin tdew = melodist.distribute_equally(tdew_daily) if method == 'linear_dewpoint_variation': assert kr is not None, 'kr must be specified' assert kr in (6, 12), 'kr must be 6 or 12' tdew_delta = 0.5 * np.sin((temp.index.hour + 1) * np.pi / kr - 3. * np.pi / 4.) # eq. (21) from Debele et al. (2007) tdew_nextday = tdew.shift(-24) tdew_nextday.iloc[-24:] = tdew.iloc[-24:] # copy the last day # eq. (20) from Debele et al. (2007): # (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should # be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around) tdew += temp.index.hour / 24. * (tdew_nextday - tdew) + tdew_delta sat_vap_press_tdew = util.vapor_pressure(tdew, 100) sat_vap_press_t = util.vapor_pressure(temp, 100) hum_disagg = pd.Series(index=temp.index, data=100 * sat_vap_press_tdew / sat_vap_press_t) elif method == 'min_max': assert 'hum_min' in data_daily.columns and 'hum_max' in data_daily.columns, \ 'Minimum and maximum humidity must be present in data frame' hmin = melodist.distribute_equally(data_daily.hum_min) hmax = melodist.distribute_equally(data_daily.hum_max) tmin = melodist.distribute_equally(data_daily.tmin) tmax = melodist.distribute_equally(data_daily.tmax) hum_disagg = hmax + (temp - tmin) / (tmax - tmin) * (hmin - hmax) elif method == 'month_hour_precip_mean': assert month_hour_precip_mean is not None precip_equal = melodist.distribute_equally(data_daily.precip) # daily precipitation equally distributed to hourly values hum_disagg = pd.Series(index=precip_equal.index) locs = list(zip(hum_disagg.index.month, hum_disagg.index.hour, precip_equal > 0)) hum_disagg[:] = month_hour_precip_mean.loc[locs].values if preserve_daily_mean: daily_mean_df = pd.DataFrame(data=dict(obs=data_daily.hum, disagg=hum_disagg.resample('D').mean())) bias = melodist.util.distribute_equally(daily_mean_df.disagg - daily_mean_df.obs) bias = bias.fillna(0) hum_disagg -= bias return hum_disagg.clip(0, 100)
[ "def", "disaggregate_humidity", "(", "data_daily", ",", "method", "=", "'equal'", ",", "temp", "=", "None", ",", "a0", "=", "None", ",", "a1", "=", "None", ",", "kr", "=", "None", ",", "month_hour_precip_mean", "=", "None", ",", "preserve_daily_mean", "=", "False", ")", ":", "assert", "method", "in", "(", "'equal'", ",", "'minimal'", ",", "'dewpoint_regression'", ",", "'min_max'", ",", "'linear_dewpoint_variation'", ",", "'month_hour_precip_mean'", ")", ",", "'Invalid option'", "if", "method", "==", "'equal'", ":", "hum_disagg", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum", ")", "elif", "method", "in", "(", "'minimal'", ",", "'dewpoint_regression'", ",", "'linear_dewpoint_variation'", ")", ":", "if", "method", "==", "'minimal'", ":", "a0", "=", "0", "a1", "=", "1", "assert", "a0", "is", "not", "None", "and", "a1", "is", "not", "None", ",", "'a0 and a1 must be specified'", "tdew_daily", "=", "a0", "+", "a1", "*", "data_daily", ".", "tmin", "tdew", "=", "melodist", ".", "distribute_equally", "(", "tdew_daily", ")", "if", "method", "==", "'linear_dewpoint_variation'", ":", "assert", "kr", "is", "not", "None", ",", "'kr must be specified'", "assert", "kr", "in", "(", "6", ",", "12", ")", ",", "'kr must be 6 or 12'", "tdew_delta", "=", "0.5", "*", "np", ".", "sin", "(", "(", "temp", ".", "index", ".", "hour", "+", "1", ")", "*", "np", ".", "pi", "/", "kr", "-", "3.", "*", "np", ".", "pi", "/", "4.", ")", "# eq. (21) from Debele et al. (2007)", "tdew_nextday", "=", "tdew", ".", "shift", "(", "-", "24", ")", "tdew_nextday", ".", "iloc", "[", "-", "24", ":", "]", "=", "tdew", ".", "iloc", "[", "-", "24", ":", "]", "# copy the last day", "# eq. (20) from Debele et al. (2007):", "# (corrected - the equation is wrong both in Debele et al. (2007) and Bregaglio et al. (2010) - it should", "# be (T_dp,day)_(d+1) - (T_dp,day)_d instead of the other way around)", "tdew", "+=", "temp", ".", "index", ".", "hour", "/", "24.", "*", "(", "tdew_nextday", "-", "tdew", ")", "+", "tdew_delta", "sat_vap_press_tdew", "=", "util", ".", "vapor_pressure", "(", "tdew", ",", "100", ")", "sat_vap_press_t", "=", "util", ".", "vapor_pressure", "(", "temp", ",", "100", ")", "hum_disagg", "=", "pd", ".", "Series", "(", "index", "=", "temp", ".", "index", ",", "data", "=", "100", "*", "sat_vap_press_tdew", "/", "sat_vap_press_t", ")", "elif", "method", "==", "'min_max'", ":", "assert", "'hum_min'", "in", "data_daily", ".", "columns", "and", "'hum_max'", "in", "data_daily", ".", "columns", ",", "'Minimum and maximum humidity must be present in data frame'", "hmin", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum_min", ")", "hmax", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "hum_max", ")", "tmin", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "tmin", ")", "tmax", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "tmax", ")", "hum_disagg", "=", "hmax", "+", "(", "temp", "-", "tmin", ")", "/", "(", "tmax", "-", "tmin", ")", "*", "(", "hmin", "-", "hmax", ")", "elif", "method", "==", "'month_hour_precip_mean'", ":", "assert", "month_hour_precip_mean", "is", "not", "None", "precip_equal", "=", "melodist", ".", "distribute_equally", "(", "data_daily", ".", "precip", ")", "# daily precipitation equally distributed to hourly values", "hum_disagg", "=", "pd", ".", "Series", "(", "index", "=", "precip_equal", ".", "index", ")", "locs", "=", "list", "(", "zip", "(", "hum_disagg", ".", "index", ".", "month", ",", "hum_disagg", ".", "index", ".", "hour", ",", "precip_equal", ">", "0", ")", ")", "hum_disagg", "[", ":", "]", "=", "month_hour_precip_mean", ".", "loc", "[", "locs", "]", ".", "values", "if", "preserve_daily_mean", ":", "daily_mean_df", "=", "pd", ".", "DataFrame", "(", "data", "=", "dict", "(", "obs", "=", "data_daily", ".", "hum", ",", "disagg", "=", "hum_disagg", ".", "resample", "(", "'D'", ")", ".", "mean", "(", ")", ")", ")", "bias", "=", "melodist", ".", "util", ".", "distribute_equally", "(", "daily_mean_df", ".", "disagg", "-", "daily_mean_df", ".", "obs", ")", "bias", "=", "bias", ".", "fillna", "(", "0", ")", "hum_disagg", "-=", "bias", "return", "hum_disagg", ".", "clip", "(", "0", ",", "100", ")" ]
general function for humidity disaggregation Args: daily_data: daily values method: keyword specifying the disaggregation method to be used temp: hourly temperature time series (necessary for some methods) kr: parameter for linear_dewpoint_variation method (6 or 12) month_hour_precip_mean: [month, hour, precip(y/n)] categorical mean values preserve_daily_mean: if True, correct the daily mean values of the disaggregated data with the observed daily means. Returns: Disaggregated hourly values of relative humidity.
[ "general", "function", "for", "humidity", "disaggregation" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/external_config.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/external_config.py#L697-L718
def from_api_repr(cls, resource): """Factory: construct an :class:`~.external_config.ExternalConfig` instance given its API representation. Args: resource (Dict[str, Any]): Definition of an :class:`~.external_config.ExternalConfig` instance in the same representation as is returned from the API. Returns: :class:`~.external_config.ExternalConfig`: Configuration parsed from ``resource``. """ config = cls(resource["sourceFormat"]) for optcls in _OPTION_CLASSES: opts = resource.get(optcls._RESOURCE_NAME) if opts is not None: config._options = optcls.from_api_repr(opts) break config._properties = copy.deepcopy(resource) return config
[ "def", "from_api_repr", "(", "cls", ",", "resource", ")", ":", "config", "=", "cls", "(", "resource", "[", "\"sourceFormat\"", "]", ")", "for", "optcls", "in", "_OPTION_CLASSES", ":", "opts", "=", "resource", ".", "get", "(", "optcls", ".", "_RESOURCE_NAME", ")", "if", "opts", "is", "not", "None", ":", "config", ".", "_options", "=", "optcls", ".", "from_api_repr", "(", "opts", ")", "break", "config", ".", "_properties", "=", "copy", ".", "deepcopy", "(", "resource", ")", "return", "config" ]
Factory: construct an :class:`~.external_config.ExternalConfig` instance given its API representation. Args: resource (Dict[str, Any]): Definition of an :class:`~.external_config.ExternalConfig` instance in the same representation as is returned from the API. Returns: :class:`~.external_config.ExternalConfig`: Configuration parsed from ``resource``.
[ "Factory", ":", "construct", "an", ":", "class", ":", "~", ".", "external_config", ".", "ExternalConfig", "instance", "given", "its", "API", "representation", "." ]
python
train
GetmeUK/MongoFrames
mongoframes/frames.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L771-L775
def get_db(cls): """Return the database for the collection""" if cls._db: return getattr(cls._client, cls._db) return cls._client.get_default_database()
[ "def", "get_db", "(", "cls", ")", ":", "if", "cls", ".", "_db", ":", "return", "getattr", "(", "cls", ".", "_client", ",", "cls", ".", "_db", ")", "return", "cls", ".", "_client", ".", "get_default_database", "(", ")" ]
Return the database for the collection
[ "Return", "the", "database", "for", "the", "collection" ]
python
train
mdeous/fatbotslim
fatbotslim/handlers.py
https://github.com/mdeous/fatbotslim/blob/341595d24454a79caee23750eac271f9d0626c88/fatbotslim/handlers.py#L284-L306
def handle_rights(self, msg): """ Catch-all command that is called whenever a restricted command is triggered. :param msg: message that triggered the command. :type msg: :class:`fatbotslim.irc.Message` """ command = msg.args[0][1:] if command in self.commands_rights: if msg.src.name.lower() in self.commands_rights[command]: if msg.event not in self.commands_rights[command][msg.src.name.lower()]: msg.propagate = False elif '*' in self.commands_rights[command]: if msg.event not in self.commands_rights[command]['*']: msg.propagate = False if (not msg.propagate) and self.notify: message = "You're not allowed to use the '%s' command" % command if msg.event == EVT_PUBLIC: self.irc.msg(msg.dst, message) elif msg.event == EVT_PRIVATE: self.irc.msg(msg.src.name, message) elif msg.event == EVT_NOTICE: self.irc.notice(msg.src.name, message)
[ "def", "handle_rights", "(", "self", ",", "msg", ")", ":", "command", "=", "msg", ".", "args", "[", "0", "]", "[", "1", ":", "]", "if", "command", "in", "self", ".", "commands_rights", ":", "if", "msg", ".", "src", ".", "name", ".", "lower", "(", ")", "in", "self", ".", "commands_rights", "[", "command", "]", ":", "if", "msg", ".", "event", "not", "in", "self", ".", "commands_rights", "[", "command", "]", "[", "msg", ".", "src", ".", "name", ".", "lower", "(", ")", "]", ":", "msg", ".", "propagate", "=", "False", "elif", "'*'", "in", "self", ".", "commands_rights", "[", "command", "]", ":", "if", "msg", ".", "event", "not", "in", "self", ".", "commands_rights", "[", "command", "]", "[", "'*'", "]", ":", "msg", ".", "propagate", "=", "False", "if", "(", "not", "msg", ".", "propagate", ")", "and", "self", ".", "notify", ":", "message", "=", "\"You're not allowed to use the '%s' command\"", "%", "command", "if", "msg", ".", "event", "==", "EVT_PUBLIC", ":", "self", ".", "irc", ".", "msg", "(", "msg", ".", "dst", ",", "message", ")", "elif", "msg", ".", "event", "==", "EVT_PRIVATE", ":", "self", ".", "irc", ".", "msg", "(", "msg", ".", "src", ".", "name", ",", "message", ")", "elif", "msg", ".", "event", "==", "EVT_NOTICE", ":", "self", ".", "irc", ".", "notice", "(", "msg", ".", "src", ".", "name", ",", "message", ")" ]
Catch-all command that is called whenever a restricted command is triggered. :param msg: message that triggered the command. :type msg: :class:`fatbotslim.irc.Message`
[ "Catch", "-", "all", "command", "that", "is", "called", "whenever", "a", "restricted", "command", "is", "triggered", "." ]
python
train
edx/edx-enterprise
enterprise/messages.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/messages.py#L77-L97
def add_unenrollable_item_message(request, item): """ Add a message to the Django message store indicating that the item (i.e. course run, program) is unenrollable. :param request: The current request. :param item: The item that is unenrollable (i.e. a course run). """ messages.info( request, _( '{strong_start}Something happened.{strong_end} ' '{span_start}This {item} is not currently open to new learners. Please start over and select a different ' '{item}.{span_end}' ).format( item=item, strong_start='<strong>', strong_end='</strong>', span_start='<span>', span_end='</span>', ) )
[ "def", "add_unenrollable_item_message", "(", "request", ",", "item", ")", ":", "messages", ".", "info", "(", "request", ",", "_", "(", "'{strong_start}Something happened.{strong_end} '", "'{span_start}This {item} is not currently open to new learners. Please start over and select a different '", "'{item}.{span_end}'", ")", ".", "format", "(", "item", "=", "item", ",", "strong_start", "=", "'<strong>'", ",", "strong_end", "=", "'</strong>'", ",", "span_start", "=", "'<span>'", ",", "span_end", "=", "'</span>'", ",", ")", ")" ]
Add a message to the Django message store indicating that the item (i.e. course run, program) is unenrollable. :param request: The current request. :param item: The item that is unenrollable (i.e. a course run).
[ "Add", "a", "message", "to", "the", "Django", "message", "store", "indicating", "that", "the", "item", "(", "i", ".", "e", ".", "course", "run", "program", ")", "is", "unenrollable", "." ]
python
valid
OpenGov/python_data_wrap
datawrap/listwrap.py
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/listwrap.py#L393-L431
def _get_single_index_request(self, index, set_to_value=False, value=None): ''' Helper function which implements single index requests for __getitem__. ''' adjusted_index = index # Multiply by step length if self.range.step: step = self.range.step adjusted_index *= self.range.step else: step = 1 # Adjust for negative indicies adjusted_index = get_non_negative_index(adjusted_index, self._length) # Push forward by start length if self.range.start != None: adjusted_index += self.range.start # Push forward by start length stop_index = self.range.stop if self.range.stop != None else self._length*step if adjusted_index > stop_index: raise IndexError(index) elem = self._data[adjusted_index] # Check if we have further dimension requirements # NOTE: if we're given dimensions for non-dimensional data, # this will blow up with an IndexError -- user should # not define this dimension with any restrictions if len(self._dim_ranges) > 1: if not has_len(elem): # We throw an IndexError instead of an AttributeError # because if can be caused by either sublist requests # or a bad constructor, and it's usually the former. raise IndexError("Element restricted by dimension_ranges "+ str(self._dim_ranges[1:])+" is not subscriptable: "+ "Dimension cannot be applied to elements with no len()") return self.builder(elem, *self._dim_ranges[1:]) else: if set_to_value: self._data[adjusted_index] = value return elem
[ "def", "_get_single_index_request", "(", "self", ",", "index", ",", "set_to_value", "=", "False", ",", "value", "=", "None", ")", ":", "adjusted_index", "=", "index", "# Multiply by step length", "if", "self", ".", "range", ".", "step", ":", "step", "=", "self", ".", "range", ".", "step", "adjusted_index", "*=", "self", ".", "range", ".", "step", "else", ":", "step", "=", "1", "# Adjust for negative indicies", "adjusted_index", "=", "get_non_negative_index", "(", "adjusted_index", ",", "self", ".", "_length", ")", "# Push forward by start length", "if", "self", ".", "range", ".", "start", "!=", "None", ":", "adjusted_index", "+=", "self", ".", "range", ".", "start", "# Push forward by start length", "stop_index", "=", "self", ".", "range", ".", "stop", "if", "self", ".", "range", ".", "stop", "!=", "None", "else", "self", ".", "_length", "*", "step", "if", "adjusted_index", ">", "stop_index", ":", "raise", "IndexError", "(", "index", ")", "elem", "=", "self", ".", "_data", "[", "adjusted_index", "]", "# Check if we have further dimension requirements", "# NOTE: if we're given dimensions for non-dimensional data,", "# this will blow up with an IndexError -- user should", "# not define this dimension with any restrictions", "if", "len", "(", "self", ".", "_dim_ranges", ")", ">", "1", ":", "if", "not", "has_len", "(", "elem", ")", ":", "# We throw an IndexError instead of an AttributeError", "# because if can be caused by either sublist requests", "# or a bad constructor, and it's usually the former.", "raise", "IndexError", "(", "\"Element restricted by dimension_ranges \"", "+", "str", "(", "self", ".", "_dim_ranges", "[", "1", ":", "]", ")", "+", "\" is not subscriptable: \"", "+", "\"Dimension cannot be applied to elements with no len()\"", ")", "return", "self", ".", "builder", "(", "elem", ",", "*", "self", ".", "_dim_ranges", "[", "1", ":", "]", ")", "else", ":", "if", "set_to_value", ":", "self", ".", "_data", "[", "adjusted_index", "]", "=", "value", "return", "elem" ]
Helper function which implements single index requests for __getitem__.
[ "Helper", "function", "which", "implements", "single", "index", "requests", "for", "__getitem__", "." ]
python
train
googleapis/google-cloud-python
irm/google/cloud/irm_v1alpha2/gapic/incident_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/irm/google/cloud/irm_v1alpha2/gapic/incident_service_client.py#L82-L88
def incident_path(cls, project, incident): """Return a fully-qualified incident string.""" return google.api_core.path_template.expand( "projects/{project}/incidents/{incident}", project=project, incident=incident, )
[ "def", "incident_path", "(", "cls", ",", "project", ",", "incident", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/incidents/{incident}\"", ",", "project", "=", "project", ",", "incident", "=", "incident", ",", ")" ]
Return a fully-qualified incident string.
[ "Return", "a", "fully", "-", "qualified", "incident", "string", "." ]
python
train
ARMmbed/icetea
icetea_lib/tools/GenericProcess.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/tools/GenericProcess.py#L247-L260
def readline(self): """ Readline implementation. :return: popped line from descriptor queue. None if nothing found :raises: RuntimeError if errors happened while reading PIPE """ try: return self._descriptor.read_queue.pop() except IndexError: # No lines in queue if self.has_error(): raise RuntimeError("Errors reading PIPE") return None
[ "def", "readline", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_descriptor", ".", "read_queue", ".", "pop", "(", ")", "except", "IndexError", ":", "# No lines in queue", "if", "self", ".", "has_error", "(", ")", ":", "raise", "RuntimeError", "(", "\"Errors reading PIPE\"", ")", "return", "None" ]
Readline implementation. :return: popped line from descriptor queue. None if nothing found :raises: RuntimeError if errors happened while reading PIPE
[ "Readline", "implementation", "." ]
python
train
carlosp420/dataset-creator
dataset_creator/utils.py
https://github.com/carlosp420/dataset-creator/blob/ea27340b145cb566a36c1836ff42263f1b2003a0/dataset_creator/utils.py#L89-L126
def make_dataset_header(data, file_format, aminoacids): """Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``. Parameters: data (namedtuple): with necessary info for dataset creation. file_format (str): TNT, PHYLIP, NEXUS, FASTA aminoacids (boolean): If ``aminoacids is True`` the header will show ``DATATYPE=PROTEIN`` otherwise it will be ``DNA``. """ if aminoacids: datatype = 'PROTEIN' else: datatype = 'DNA' if file_format in ['NEXUS', 'PHYLIP', 'FASTA']: header = """ #NEXUS BEGIN DATA; DIMENSIONS NTAX={0} NCHAR={1}; FORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-; MATRIX """.format(data.number_taxa, data.number_chars, datatype) elif file_format == 'MEGA': return "#MEGA\n!TITLE title;" else: # file_format: TNT if aminoacids: molecule_type = "prot" else: molecule_type = "dna" header = """ nstates {0}; xread {1} {2}""".format(molecule_type, data.number_chars, data.number_taxa) return header.strip()
[ "def", "make_dataset_header", "(", "data", ",", "file_format", ",", "aminoacids", ")", ":", "if", "aminoacids", ":", "datatype", "=", "'PROTEIN'", "else", ":", "datatype", "=", "'DNA'", "if", "file_format", "in", "[", "'NEXUS'", ",", "'PHYLIP'", ",", "'FASTA'", "]", ":", "header", "=", "\"\"\"\n#NEXUS\n\nBEGIN DATA;\nDIMENSIONS NTAX={0} NCHAR={1};\nFORMAT INTERLEAVE DATATYPE={2} MISSING=? GAP=-;\nMATRIX\n\"\"\"", ".", "format", "(", "data", ".", "number_taxa", ",", "data", ".", "number_chars", ",", "datatype", ")", "elif", "file_format", "==", "'MEGA'", ":", "return", "\"#MEGA\\n!TITLE title;\"", "else", ":", "# file_format: TNT", "if", "aminoacids", ":", "molecule_type", "=", "\"prot\"", "else", ":", "molecule_type", "=", "\"dna\"", "header", "=", "\"\"\"\nnstates {0};\nxread\n{1} {2}\"\"\"", ".", "format", "(", "molecule_type", ",", "data", ".", "number_chars", ",", "data", ".", "number_taxa", ")", "return", "header", ".", "strip", "(", ")" ]
Creates the dataset header for NEXUS files from ``#NEXUS`` to ``MATRIX``. Parameters: data (namedtuple): with necessary info for dataset creation. file_format (str): TNT, PHYLIP, NEXUS, FASTA aminoacids (boolean): If ``aminoacids is True`` the header will show ``DATATYPE=PROTEIN`` otherwise it will be ``DNA``.
[ "Creates", "the", "dataset", "header", "for", "NEXUS", "files", "from", "#NEXUS", "to", "MATRIX", "." ]
python
train
bitshares/python-bitshares
bitshares/bitshares.py
https://github.com/bitshares/python-bitshares/blob/8a3b5954a6abcaaff7c6a5c41d910e58eea3142f/bitshares/bitshares.py#L895-L922
def set_proxy(self, proxy_account, account=None, **kwargs): """ Set a specific proxy for account :param bitshares.account.Account proxy_account: Account to be proxied :param str account: (optional) the account to allow access to (defaults to ``default_account``) """ if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) proxy = Account(proxy_account, blockchain_instance=self) options = account["options"] options["voting_account"] = proxy["id"] op = operations.Account_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], "new_options": options, "extensions": {}, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
[ "def", "set_proxy", "(", "self", ",", "proxy_account", ",", "account", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "self", ")", "proxy", "=", "Account", "(", "proxy_account", ",", "blockchain_instance", "=", "self", ")", "options", "=", "account", "[", "\"options\"", "]", "options", "[", "\"voting_account\"", "]", "=", "proxy", "[", "\"id\"", "]", "op", "=", "operations", ".", "Account_update", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"account\"", ":", "account", "[", "\"id\"", "]", ",", "\"new_options\"", ":", "options", ",", "\"extensions\"", ":", "{", "}", ",", "\"prefix\"", ":", "self", ".", "prefix", ",", "}", ")", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", "[", "\"name\"", "]", ",", "\"active\"", ",", "*", "*", "kwargs", ")" ]
Set a specific proxy for account :param bitshares.account.Account proxy_account: Account to be proxied :param str account: (optional) the account to allow access to (defaults to ``default_account``)
[ "Set", "a", "specific", "proxy", "for", "account" ]
python
train
openstax/rhaptos.cnxmlutils
rhaptos/cnxmlutils/utils.py
https://github.com/openstax/rhaptos.cnxmlutils/blob/c32b1a7428dc652e8cd745f3fdf4019a20543649/rhaptos/cnxmlutils/utils.py#L233-L238
def aloha_to_etree(html_source): """ Converts HTML5 from Aloha editor output to a lxml etree. """ xml = _tidy2xhtml5(html_source) for i, transform in enumerate(ALOHA2HTML_TRANSFORM_PIPELINE): xml = transform(xml) return xml
[ "def", "aloha_to_etree", "(", "html_source", ")", ":", "xml", "=", "_tidy2xhtml5", "(", "html_source", ")", "for", "i", ",", "transform", "in", "enumerate", "(", "ALOHA2HTML_TRANSFORM_PIPELINE", ")", ":", "xml", "=", "transform", "(", "xml", ")", "return", "xml" ]
Converts HTML5 from Aloha editor output to a lxml etree.
[ "Converts", "HTML5", "from", "Aloha", "editor", "output", "to", "a", "lxml", "etree", "." ]
python
train
nschloe/colorio
experiments/new-cs.py
https://github.com/nschloe/colorio/blob/357d6001b3cf30f752e23726bf429dc1d1c60b3a/experiments/new-cs.py#L309-L346
def build_grad_matrices(V, points): """Build the sparse m-by-n matrices that map a coefficient set for a function in V to the values of dx and dy at a number m of points. """ # See <https://www.allanswered.com/post/lkbkm/#zxqgk> mesh = V.mesh() bbt = BoundingBoxTree() bbt.build(mesh) dofmap = V.dofmap() el = V.element() rows = [] cols = [] datax = [] datay = [] for i, xy in enumerate(points): cell_id = bbt.compute_first_entity_collision(Point(*xy)) cell = Cell(mesh, cell_id) coordinate_dofs = cell.get_vertex_coordinates() rows.append([i, i, i]) cols.append(dofmap.cell_dofs(cell_id)) v = el.evaluate_basis_derivatives_all(1, xy, coordinate_dofs, cell_id) v = v.reshape(3, 2) datax.append(v[:, 0]) datay.append(v[:, 1]) rows = numpy.concatenate(rows) cols = numpy.concatenate(cols) datax = numpy.concatenate(datax) datay = numpy.concatenate(datay) m = len(points) n = V.dim() dx_matrix = sparse.csr_matrix((datax, (rows, cols)), shape=(m, n)) dy_matrix = sparse.csr_matrix((datay, (rows, cols)), shape=(m, n)) return dx_matrix, dy_matrix
[ "def", "build_grad_matrices", "(", "V", ",", "points", ")", ":", "# See <https://www.allanswered.com/post/lkbkm/#zxqgk>", "mesh", "=", "V", ".", "mesh", "(", ")", "bbt", "=", "BoundingBoxTree", "(", ")", "bbt", ".", "build", "(", "mesh", ")", "dofmap", "=", "V", ".", "dofmap", "(", ")", "el", "=", "V", ".", "element", "(", ")", "rows", "=", "[", "]", "cols", "=", "[", "]", "datax", "=", "[", "]", "datay", "=", "[", "]", "for", "i", ",", "xy", "in", "enumerate", "(", "points", ")", ":", "cell_id", "=", "bbt", ".", "compute_first_entity_collision", "(", "Point", "(", "*", "xy", ")", ")", "cell", "=", "Cell", "(", "mesh", ",", "cell_id", ")", "coordinate_dofs", "=", "cell", ".", "get_vertex_coordinates", "(", ")", "rows", ".", "append", "(", "[", "i", ",", "i", ",", "i", "]", ")", "cols", ".", "append", "(", "dofmap", ".", "cell_dofs", "(", "cell_id", ")", ")", "v", "=", "el", ".", "evaluate_basis_derivatives_all", "(", "1", ",", "xy", ",", "coordinate_dofs", ",", "cell_id", ")", "v", "=", "v", ".", "reshape", "(", "3", ",", "2", ")", "datax", ".", "append", "(", "v", "[", ":", ",", "0", "]", ")", "datay", ".", "append", "(", "v", "[", ":", ",", "1", "]", ")", "rows", "=", "numpy", ".", "concatenate", "(", "rows", ")", "cols", "=", "numpy", ".", "concatenate", "(", "cols", ")", "datax", "=", "numpy", ".", "concatenate", "(", "datax", ")", "datay", "=", "numpy", ".", "concatenate", "(", "datay", ")", "m", "=", "len", "(", "points", ")", "n", "=", "V", ".", "dim", "(", ")", "dx_matrix", "=", "sparse", ".", "csr_matrix", "(", "(", "datax", ",", "(", "rows", ",", "cols", ")", ")", ",", "shape", "=", "(", "m", ",", "n", ")", ")", "dy_matrix", "=", "sparse", ".", "csr_matrix", "(", "(", "datay", ",", "(", "rows", ",", "cols", ")", ")", ",", "shape", "=", "(", "m", ",", "n", ")", ")", "return", "dx_matrix", ",", "dy_matrix" ]
Build the sparse m-by-n matrices that map a coefficient set for a function in V to the values of dx and dy at a number m of points.
[ "Build", "the", "sparse", "m", "-", "by", "-", "n", "matrices", "that", "map", "a", "coefficient", "set", "for", "a", "function", "in", "V", "to", "the", "values", "of", "dx", "and", "dy", "at", "a", "number", "m", "of", "points", "." ]
python
train
konstantint/PassportEye
passporteye/mrz/image.py
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L254-L267
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3): """Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.""" if roi.shape[1] <= 700: scale_by = int(1050.0 / roi.shape[1] + 0.5) roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode='constant', multichannel=False, anti_aliasing=True) new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params) new_mrz = MRZ.from_ocr(new_text) new_mrz.aux['method'] = 'rescaled(%d)' % filter_order if new_mrz.valid_score > cur_mrz.valid_score: cur_mrz = new_mrz cur_text = new_text return cur_text, cur_mrz
[ "def", "_try_larger_image", "(", "self", ",", "roi", ",", "cur_text", ",", "cur_mrz", ",", "filter_order", "=", "3", ")", ":", "if", "roi", ".", "shape", "[", "1", "]", "<=", "700", ":", "scale_by", "=", "int", "(", "1050.0", "/", "roi", ".", "shape", "[", "1", "]", "+", "0.5", ")", "roi_lg", "=", "transform", ".", "rescale", "(", "roi", ",", "scale_by", ",", "order", "=", "filter_order", ",", "mode", "=", "'constant'", ",", "multichannel", "=", "False", ",", "anti_aliasing", "=", "True", ")", "new_text", "=", "ocr", "(", "roi_lg", ",", "extra_cmdline_params", "=", "self", ".", "extra_cmdline_params", ")", "new_mrz", "=", "MRZ", ".", "from_ocr", "(", "new_text", ")", "new_mrz", ".", "aux", "[", "'method'", "]", "=", "'rescaled(%d)'", "%", "filter_order", "if", "new_mrz", ".", "valid_score", ">", "cur_mrz", ".", "valid_score", ":", "cur_mrz", "=", "new_mrz", "cur_text", "=", "new_text", "return", "cur_text", ",", "cur_mrz" ]
Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.
[ "Attempts", "to", "improve", "the", "OCR", "result", "by", "scaling", "the", "image", ".", "If", "the", "new", "mrz", "is", "better", "returns", "it", "otherwise", "returns", "the", "old", "mrz", "." ]
python
train
hamperbot/hamper
hamper/commander.py
https://github.com/hamperbot/hamper/blob/6f841ec4dcc319fdd7bb3ca1f990e3b7a458771b/hamper/commander.py#L348-L368
def runPlugins(self, category, func, protocol, *args): """ Run the specified set of plugins against a given protocol. """ # Plugins are already sorted by priority for plugin in self.plugins: # If a plugin throws an exception, we should catch it gracefully. try: event_listener = getattr(plugin, func) except AttributeError: # If the plugin doesn't implement the event, do nothing pass else: try: stop = event_listener(protocol, *args) if stop: break except Exception: # A plugin should not be able to crash the bot. # Catch and log all errors. traceback.print_exc()
[ "def", "runPlugins", "(", "self", ",", "category", ",", "func", ",", "protocol", ",", "*", "args", ")", ":", "# Plugins are already sorted by priority", "for", "plugin", "in", "self", ".", "plugins", ":", "# If a plugin throws an exception, we should catch it gracefully.", "try", ":", "event_listener", "=", "getattr", "(", "plugin", ",", "func", ")", "except", "AttributeError", ":", "# If the plugin doesn't implement the event, do nothing", "pass", "else", ":", "try", ":", "stop", "=", "event_listener", "(", "protocol", ",", "*", "args", ")", "if", "stop", ":", "break", "except", "Exception", ":", "# A plugin should not be able to crash the bot.", "# Catch and log all errors.", "traceback", ".", "print_exc", "(", ")" ]
Run the specified set of plugins against a given protocol.
[ "Run", "the", "specified", "set", "of", "plugins", "against", "a", "given", "protocol", "." ]
python
train
twilio/twilio-python
twilio/rest/sync/v1/service/sync_map/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/sync/v1/service/sync_map/__init__.py#L372-L386
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncMapContext for this SyncMapInstance :rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext """ if self._context is None: self._context = SyncMapContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "SyncMapContext", "(", "self", ".", "_version", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncMapContext for this SyncMapInstance :rtype: twilio.rest.sync.v1.service.sync_map.SyncMapContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
ranaroussi/ezibpy
ezibpy/ezibpy.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/ezibpy.py#L1892-L1929
def requestHistoricalData(self, contracts=None, resolution="1 min", lookback="1 D", data="TRADES", end_datetime=None, rth=False, csv_path=None, format_date=2, utc=False): """ Download to historical data https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm """ self.csv_path = csv_path self.utc_history = utc if end_datetime == None: end_datetime = time.strftime(dataTypes["DATE_TIME_FORMAT_HISTORY"]) if contracts == None: contracts = list(self.contracts.values()) if not isinstance(contracts, list): contracts = [contracts] for contract in contracts: show = str(data).upper() if contract.m_secType in ['CASH', 'CFD'] and data == 'TRADES': show = 'MIDPOINT' # tickerId = self.tickerId(contract.m_symbol) tickerId = self.tickerId(self.contractString(contract)) self.ibConn.reqHistoricalData( tickerId = tickerId, contract = contract, endDateTime = end_datetime, durationStr = lookback, barSizeSetting = resolution, whatToShow = show, useRTH = int(rth), formatDate = int(format_date) )
[ "def", "requestHistoricalData", "(", "self", ",", "contracts", "=", "None", ",", "resolution", "=", "\"1 min\"", ",", "lookback", "=", "\"1 D\"", ",", "data", "=", "\"TRADES\"", ",", "end_datetime", "=", "None", ",", "rth", "=", "False", ",", "csv_path", "=", "None", ",", "format_date", "=", "2", ",", "utc", "=", "False", ")", ":", "self", ".", "csv_path", "=", "csv_path", "self", ".", "utc_history", "=", "utc", "if", "end_datetime", "==", "None", ":", "end_datetime", "=", "time", ".", "strftime", "(", "dataTypes", "[", "\"DATE_TIME_FORMAT_HISTORY\"", "]", ")", "if", "contracts", "==", "None", ":", "contracts", "=", "list", "(", "self", ".", "contracts", ".", "values", "(", ")", ")", "if", "not", "isinstance", "(", "contracts", ",", "list", ")", ":", "contracts", "=", "[", "contracts", "]", "for", "contract", "in", "contracts", ":", "show", "=", "str", "(", "data", ")", ".", "upper", "(", ")", "if", "contract", ".", "m_secType", "in", "[", "'CASH'", ",", "'CFD'", "]", "and", "data", "==", "'TRADES'", ":", "show", "=", "'MIDPOINT'", "# tickerId = self.tickerId(contract.m_symbol)", "tickerId", "=", "self", ".", "tickerId", "(", "self", ".", "contractString", "(", "contract", ")", ")", "self", ".", "ibConn", ".", "reqHistoricalData", "(", "tickerId", "=", "tickerId", ",", "contract", "=", "contract", ",", "endDateTime", "=", "end_datetime", ",", "durationStr", "=", "lookback", ",", "barSizeSetting", "=", "resolution", ",", "whatToShow", "=", "show", ",", "useRTH", "=", "int", "(", "rth", ")", ",", "formatDate", "=", "int", "(", "format_date", ")", ")" ]
Download to historical data https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm
[ "Download", "to", "historical", "data", "https", ":", "//", "www", ".", "interactivebrokers", ".", "com", "/", "en", "/", "software", "/", "api", "/", "apiguide", "/", "java", "/", "reqhistoricaldata", ".", "htm" ]
python
train
nccgroup/Scout2
AWSScout2/services/cloudwatch.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/cloudwatch.py#L21-L35
def parse_alarm(self, global_params, region, alarm): """ Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm """ alarm['arn'] = alarm.pop('AlarmArn') alarm['name'] = alarm.pop('AlarmName') # Drop some data for k in ['AlarmConfigurationUpdatedTimestamp', 'StateReason', 'StateReasonData', 'StateUpdatedTimestamp']: foo = alarm.pop(k) if k in alarm else None alarm_id = self.get_non_aws_id(alarm['arn']) self.alarms[alarm_id] = alarm
[ "def", "parse_alarm", "(", "self", ",", "global_params", ",", "region", ",", "alarm", ")", ":", "alarm", "[", "'arn'", "]", "=", "alarm", ".", "pop", "(", "'AlarmArn'", ")", "alarm", "[", "'name'", "]", "=", "alarm", ".", "pop", "(", "'AlarmName'", ")", "# Drop some data", "for", "k", "in", "[", "'AlarmConfigurationUpdatedTimestamp'", ",", "'StateReason'", ",", "'StateReasonData'", ",", "'StateUpdatedTimestamp'", "]", ":", "foo", "=", "alarm", ".", "pop", "(", "k", ")", "if", "k", "in", "alarm", "else", "None", "alarm_id", "=", "self", ".", "get_non_aws_id", "(", "alarm", "[", "'arn'", "]", ")", "self", ".", "alarms", "[", "alarm_id", "]", "=", "alarm" ]
Parse a single CloudWatch trail :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param alarm: Alarm
[ "Parse", "a", "single", "CloudWatch", "trail" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_azure/c7n_azure/utils.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_azure/c7n_azure/utils.py#L259-L270
def _get_port_range(range_str): """ Given a string with a port or port range: '80', '80-120' Returns tuple with range start and end ports: (80, 80), (80, 120) """ if range_str == '*': return PortsRangeHelper.PortsRange(start=0, end=65535) s = range_str.split('-') if len(s) == 2: return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[1])) return PortsRangeHelper.PortsRange(start=int(s[0]), end=int(s[0]))
[ "def", "_get_port_range", "(", "range_str", ")", ":", "if", "range_str", "==", "'*'", ":", "return", "PortsRangeHelper", ".", "PortsRange", "(", "start", "=", "0", ",", "end", "=", "65535", ")", "s", "=", "range_str", ".", "split", "(", "'-'", ")", "if", "len", "(", "s", ")", "==", "2", ":", "return", "PortsRangeHelper", ".", "PortsRange", "(", "start", "=", "int", "(", "s", "[", "0", "]", ")", ",", "end", "=", "int", "(", "s", "[", "1", "]", ")", ")", "return", "PortsRangeHelper", ".", "PortsRange", "(", "start", "=", "int", "(", "s", "[", "0", "]", ")", ",", "end", "=", "int", "(", "s", "[", "0", "]", ")", ")" ]
Given a string with a port or port range: '80', '80-120' Returns tuple with range start and end ports: (80, 80), (80, 120)
[ "Given", "a", "string", "with", "a", "port", "or", "port", "range", ":", "80", "80", "-", "120", "Returns", "tuple", "with", "range", "start", "and", "end", "ports", ":", "(", "80", "80", ")", "(", "80", "120", ")" ]
python
train
GaryLee/cmdlet
cmdlet/cmds.py
https://github.com/GaryLee/cmdlet/blob/5852a63fc2c7dd723a3d7abe18455f8dacb49433/cmdlet/cmds.py#L691-L695
def substitute(prev, *args, **kw): '''alias of string.Template.substitute''' template_obj = string.Template(*args, **kw) for data in prev: yield template_obj.substitute(data)
[ "def", "substitute", "(", "prev", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "template_obj", "=", "string", ".", "Template", "(", "*", "args", ",", "*", "*", "kw", ")", "for", "data", "in", "prev", ":", "yield", "template_obj", ".", "substitute", "(", "data", ")" ]
alias of string.Template.substitute
[ "alias", "of", "string", ".", "Template", ".", "substitute" ]
python
valid
nccgroup/Scout2
AWSScout2/services/redshift.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/redshift.py#L60-L70
def parse_security_group(self, global_params, region, security_group): """ Parse a single Redsfhit security group :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param security)_group: Security group """ name = security_group.pop('ClusterSecurityGroupName') security_group['name'] = name self.security_groups['name'] = security_group
[ "def", "parse_security_group", "(", "self", ",", "global_params", ",", "region", ",", "security_group", ")", ":", "name", "=", "security_group", ".", "pop", "(", "'ClusterSecurityGroupName'", ")", "security_group", "[", "'name'", "]", "=", "name", "self", ".", "security_groups", "[", "'name'", "]", "=", "security_group" ]
Parse a single Redsfhit security group :param global_params: Parameters shared for all regions :param region: Name of the AWS region :param security)_group: Security group
[ "Parse", "a", "single", "Redsfhit", "security", "group" ]
python
train
Projectplace/basepage
basepage/base_page.py
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L722-L738
def wait_for_non_empty_text(self, locator, params=None, timeout=5): """ Wait and get elements when they're populated with any text. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) maximum waiting time (in seconds) (default: 5) :return: list of WebElements """ def _do_wait(): elements = self.get_present_elements(locator, params, timeout=0) for element in elements: if not self.get_text(element): return False return elements return ActionWait(timeout).until(_do_wait, "Element text was never populated!")
[ "def", "wait_for_non_empty_text", "(", "self", ",", "locator", ",", "params", "=", "None", ",", "timeout", "=", "5", ")", ":", "def", "_do_wait", "(", ")", ":", "elements", "=", "self", ".", "get_present_elements", "(", "locator", ",", "params", ",", "timeout", "=", "0", ")", "for", "element", "in", "elements", ":", "if", "not", "self", ".", "get_text", "(", "element", ")", ":", "return", "False", "return", "elements", "return", "ActionWait", "(", "timeout", ")", ".", "until", "(", "_do_wait", ",", "\"Element text was never populated!\"", ")" ]
Wait and get elements when they're populated with any text. :param locator: locator tuple :param params: (optional) locator params :param timeout: (optional) maximum waiting time (in seconds) (default: 5) :return: list of WebElements
[ "Wait", "and", "get", "elements", "when", "they", "re", "populated", "with", "any", "text", "." ]
python
train
fitnr/twitter_bot_utils
twitter_bot_utils/helpers.py
https://github.com/fitnr/twitter_bot_utils/blob/21f35afa5048cd3efa54db8cb87d405f69a78a62/twitter_bot_utils/helpers.py#L98-L113
def remove_entities(status, entitylist): '''Remove entities for a list of items.''' try: entities = status.entities text = status.text except AttributeError: entities = status.get('entities', dict()) text = status['text'] indices = [ent['indices'] for etype, entval in list(entities.items()) for ent in entval if etype in entitylist] indices.sort(key=lambda x: x[0], reverse=True) for start, end in indices: text = text[:start] + text[end:] return text
[ "def", "remove_entities", "(", "status", ",", "entitylist", ")", ":", "try", ":", "entities", "=", "status", ".", "entities", "text", "=", "status", ".", "text", "except", "AttributeError", ":", "entities", "=", "status", ".", "get", "(", "'entities'", ",", "dict", "(", ")", ")", "text", "=", "status", "[", "'text'", "]", "indices", "=", "[", "ent", "[", "'indices'", "]", "for", "etype", ",", "entval", "in", "list", "(", "entities", ".", "items", "(", ")", ")", "for", "ent", "in", "entval", "if", "etype", "in", "entitylist", "]", "indices", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", ",", "reverse", "=", "True", ")", "for", "start", ",", "end", "in", "indices", ":", "text", "=", "text", "[", ":", "start", "]", "+", "text", "[", "end", ":", "]", "return", "text" ]
Remove entities for a list of items.
[ "Remove", "entities", "for", "a", "list", "of", "items", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/chain.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/chain.py#L345-L365
def update_args(self, override_args): """Update the argument used to invoke the application Note that this will also update the dictionary of input and output files. Parameters ----------- override_args : dict dictionary passed to the links """ self.args = extract_arguments(override_args, self.args) self._map_arguments(self.args) scratch_dir = self.args.get('scratch', None) if is_not_null(scratch_dir): self._file_stage = FileStageManager(scratch_dir, '.') for link in self._links.values(): link._set_file_stage(self._file_stage) self._latch_file_info()
[ "def", "update_args", "(", "self", ",", "override_args", ")", ":", "self", ".", "args", "=", "extract_arguments", "(", "override_args", ",", "self", ".", "args", ")", "self", ".", "_map_arguments", "(", "self", ".", "args", ")", "scratch_dir", "=", "self", ".", "args", ".", "get", "(", "'scratch'", ",", "None", ")", "if", "is_not_null", "(", "scratch_dir", ")", ":", "self", ".", "_file_stage", "=", "FileStageManager", "(", "scratch_dir", ",", "'.'", ")", "for", "link", "in", "self", ".", "_links", ".", "values", "(", ")", ":", "link", ".", "_set_file_stage", "(", "self", ".", "_file_stage", ")", "self", ".", "_latch_file_info", "(", ")" ]
Update the argument used to invoke the application Note that this will also update the dictionary of input and output files. Parameters ----------- override_args : dict dictionary passed to the links
[ "Update", "the", "argument", "used", "to", "invoke", "the", "application" ]
python
train
Ouranosinc/xclim
xclim/checks.py
https://github.com/Ouranosinc/xclim/blob/2080d139188bd8de2aeca097a025c2d89d6e0e09/xclim/checks.py#L108-L118
def valid_daily_max_min_temperature(comp, units='K'): r"""Decorator to check that a computation runs on valid min and max temperature datasets.""" @wraps(comp) def func(tasmax, tasmin, **kwds): valid_daily_max_temperature(tasmax, units) valid_daily_min_temperature(tasmin, units) return comp(tasmax, tasmin, **kwds) return func
[ "def", "valid_daily_max_min_temperature", "(", "comp", ",", "units", "=", "'K'", ")", ":", "@", "wraps", "(", "comp", ")", "def", "func", "(", "tasmax", ",", "tasmin", ",", "*", "*", "kwds", ")", ":", "valid_daily_max_temperature", "(", "tasmax", ",", "units", ")", "valid_daily_min_temperature", "(", "tasmin", ",", "units", ")", "return", "comp", "(", "tasmax", ",", "tasmin", ",", "*", "*", "kwds", ")", "return", "func" ]
r"""Decorator to check that a computation runs on valid min and max temperature datasets.
[ "r", "Decorator", "to", "check", "that", "a", "computation", "runs", "on", "valid", "min", "and", "max", "temperature", "datasets", "." ]
python
train
shoebot/shoebot
lib/web/BeautifulSoup.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/web/BeautifulSoup.py#L1398-L1416
def parse_declaration(self, i): """Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.""" j = None if self.rawdata[i:i+9] == '<![CDATA[': k = self.rawdata.find(']]>', i) if k == -1: k = len(self.rawdata) data = self.rawdata[i+9:k] j = k+3 self._toStringSubclass(data, CData) else: try: j = SGMLParser.parse_declaration(self, i) except SGMLParseError: toHandle = self.rawdata[i:] self.handle_data(toHandle) j = i + len(toHandle) return j
[ "def", "parse_declaration", "(", "self", ",", "i", ")", ":", "j", "=", "None", "if", "self", ".", "rawdata", "[", "i", ":", "i", "+", "9", "]", "==", "'<![CDATA['", ":", "k", "=", "self", ".", "rawdata", ".", "find", "(", "']]>'", ",", "i", ")", "if", "k", "==", "-", "1", ":", "k", "=", "len", "(", "self", ".", "rawdata", ")", "data", "=", "self", ".", "rawdata", "[", "i", "+", "9", ":", "k", "]", "j", "=", "k", "+", "3", "self", ".", "_toStringSubclass", "(", "data", ",", "CData", ")", "else", ":", "try", ":", "j", "=", "SGMLParser", ".", "parse_declaration", "(", "self", ",", "i", ")", "except", "SGMLParseError", ":", "toHandle", "=", "self", ".", "rawdata", "[", "i", ":", "]", "self", ".", "handle_data", "(", "toHandle", ")", "j", "=", "i", "+", "len", "(", "toHandle", ")", "return", "j" ]
Treat a bogus SGML declaration as raw data. Treat a CDATA declaration as a CData object.
[ "Treat", "a", "bogus", "SGML", "declaration", "as", "raw", "data", ".", "Treat", "a", "CDATA", "declaration", "as", "a", "CData", "object", "." ]
python
valid
KelSolaar/Foundations
foundations/parsers.py
https://github.com/KelSolaar/Foundations/blob/5c141330faf09dad70a12bc321f4c564917d0a91/foundations/parsers.py#L238-L253
def namespace_splitter(self, value): """ Setter for **self.__namespace_splitter** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "namespace_splitter", value) assert len(value) == 1, "'{0}' attribute: '{1}' has multiples characters!".format("namespace_splitter", value) assert not re.search(r"\w", value), "'{0}' attribute: '{1}' is an alphanumeric character!".format( "namespace_splitter", value) self.__namespace_splitter = value
[ "def", "namespace_splitter", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"namespace_splitter\"", ",", "value", ")", "assert", "len", "(", "value", ")", "==", "1", ",", "\"'{0}' attribute: '{1}' has multiples characters!\"", ".", "format", "(", "\"namespace_splitter\"", ",", "value", ")", "assert", "not", "re", ".", "search", "(", "r\"\\w\"", ",", "value", ")", ",", "\"'{0}' attribute: '{1}' is an alphanumeric character!\"", ".", "format", "(", "\"namespace_splitter\"", ",", "value", ")", "self", ".", "__namespace_splitter", "=", "value" ]
Setter for **self.__namespace_splitter** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__namespace_splitter", "**", "attribute", "." ]
python
train
devassistant/devassistant
devassistant/dapi/dapicli.py
https://github.com/devassistant/devassistant/blob/2dbfeaa666a64127263664d18969c55d19ecc83e/devassistant/dapi/dapicli.py#L302-L319
def format_installed_dap_list(simple=False): '''Formats all installed DAPs in a human readable form to list of lines''' lines = [] if simple: for pkg in sorted(get_installed_daps()): lines.append(pkg) else: for pkg, instances in sorted(get_installed_daps_detailed().items()): versions = [] for instance in instances: location = utils.unexpanduser(instance['location']) version = instance['version'] if not versions: # if this is the first version = utils.bold(version) versions.append('{v}:{p}'.format(v=version, p=location)) pkg = utils.bold(pkg) lines.append('{pkg} ({versions})'.format(pkg=pkg, versions=' '.join(versions))) return lines
[ "def", "format_installed_dap_list", "(", "simple", "=", "False", ")", ":", "lines", "=", "[", "]", "if", "simple", ":", "for", "pkg", "in", "sorted", "(", "get_installed_daps", "(", ")", ")", ":", "lines", ".", "append", "(", "pkg", ")", "else", ":", "for", "pkg", ",", "instances", "in", "sorted", "(", "get_installed_daps_detailed", "(", ")", ".", "items", "(", ")", ")", ":", "versions", "=", "[", "]", "for", "instance", "in", "instances", ":", "location", "=", "utils", ".", "unexpanduser", "(", "instance", "[", "'location'", "]", ")", "version", "=", "instance", "[", "'version'", "]", "if", "not", "versions", ":", "# if this is the first", "version", "=", "utils", ".", "bold", "(", "version", ")", "versions", ".", "append", "(", "'{v}:{p}'", ".", "format", "(", "v", "=", "version", ",", "p", "=", "location", ")", ")", "pkg", "=", "utils", ".", "bold", "(", "pkg", ")", "lines", ".", "append", "(", "'{pkg} ({versions})'", ".", "format", "(", "pkg", "=", "pkg", ",", "versions", "=", "' '", ".", "join", "(", "versions", ")", ")", ")", "return", "lines" ]
Formats all installed DAPs in a human readable form to list of lines
[ "Formats", "all", "installed", "DAPs", "in", "a", "human", "readable", "form", "to", "list", "of", "lines" ]
python
train
mozilla/treeherder
treeherder/seta/models.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/seta/models.py#L21-L37
def adjust_jobs_priority(self, high_value_jobs, priority=1): """For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0. """ # Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100 # for jobs update via load_preseed) are updated for jp in JobPriority.objects.filter(expiration_date__isnull=True): if jp.unique_identifier() not in high_value_jobs: if jp.priority != SETA_LOW_VALUE_PRIORITY: logger.warning('Decreasing priority of %s', jp.unique_identifier()) jp.priority = SETA_LOW_VALUE_PRIORITY jp.save(update_fields=['priority']) elif jp.priority != priority: logger.warning('Increasing priority of %s', jp.unique_identifier()) jp.priority = priority jp.save(update_fields=['priority'])
[ "def", "adjust_jobs_priority", "(", "self", ",", "high_value_jobs", ",", "priority", "=", "1", ")", ":", "# Only job priorities that don't have an expiration date (2 weeks for new jobs or year 2100", "# for jobs update via load_preseed) are updated", "for", "jp", "in", "JobPriority", ".", "objects", ".", "filter", "(", "expiration_date__isnull", "=", "True", ")", ":", "if", "jp", ".", "unique_identifier", "(", ")", "not", "in", "high_value_jobs", ":", "if", "jp", ".", "priority", "!=", "SETA_LOW_VALUE_PRIORITY", ":", "logger", ".", "warning", "(", "'Decreasing priority of %s'", ",", "jp", ".", "unique_identifier", "(", ")", ")", "jp", ".", "priority", "=", "SETA_LOW_VALUE_PRIORITY", "jp", ".", "save", "(", "update_fields", "=", "[", "'priority'", "]", ")", "elif", "jp", ".", "priority", "!=", "priority", ":", "logger", ".", "warning", "(", "'Increasing priority of %s'", ",", "jp", ".", "unique_identifier", "(", ")", ")", "jp", ".", "priority", "=", "priority", "jp", ".", "save", "(", "update_fields", "=", "[", "'priority'", "]", ")" ]
For every job priority determine if we need to increase or decrease the job priority Currently, high value jobs have a priority of 1 and a timeout of 0.
[ "For", "every", "job", "priority", "determine", "if", "we", "need", "to", "increase", "or", "decrease", "the", "job", "priority" ]
python
train
funilrys/PyFunceble
PyFunceble/database.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/database.py#L270-L279
def _backup(self): """ Save the current database into the inactive-db.json file. """ if PyFunceble.CONFIGURATION["inactive_database"]: # The database subsystem is activated. # We save the current database state into the database file. Dict(PyFunceble.INTERN["inactive_db"]).to_json(self.inactive_db_path)
[ "def", "_backup", "(", "self", ")", ":", "if", "PyFunceble", ".", "CONFIGURATION", "[", "\"inactive_database\"", "]", ":", "# The database subsystem is activated.", "# We save the current database state into the database file.", "Dict", "(", "PyFunceble", ".", "INTERN", "[", "\"inactive_db\"", "]", ")", ".", "to_json", "(", "self", ".", "inactive_db_path", ")" ]
Save the current database into the inactive-db.json file.
[ "Save", "the", "current", "database", "into", "the", "inactive", "-", "db", ".", "json", "file", "." ]
python
test
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor.py#L90-L101
def system_monitor_power_alert_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") system_monitor = ET.SubElement(config, "system-monitor", xmlns="urn:brocade.com:mgmt:brocade-system-monitor") power = ET.SubElement(system_monitor, "power") alert = ET.SubElement(power, "alert") state = ET.SubElement(alert, "state") state.text = kwargs.pop('state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "system_monitor_power_alert_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "system_monitor", "=", "ET", ".", "SubElement", "(", "config", ",", "\"system-monitor\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-system-monitor\"", ")", "power", "=", "ET", ".", "SubElement", "(", "system_monitor", ",", "\"power\"", ")", "alert", "=", "ET", ".", "SubElement", "(", "power", ",", "\"alert\"", ")", "state", "=", "ET", ".", "SubElement", "(", "alert", ",", "\"state\"", ")", "state", ".", "text", "=", "kwargs", ".", "pop", "(", "'state'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
hozn/stravalib
stravalib/attributes.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/attributes.py#L81-L95
def unmarshal(self, v): """ Convert the value from parsed JSON structure to native python representation. By default this will leave the value as-is since the JSON parsing routines typically convert to native types. The exception may be date strings or other more complex types, where subclasses will override this behavior. """ if self.units: # Note that we don't want to cast to type in this case! if not isinstance(v, Quantity): v = self.units(v) elif not isinstance(v, self.type): v = self.type(v) return v
[ "def", "unmarshal", "(", "self", ",", "v", ")", ":", "if", "self", ".", "units", ":", "# Note that we don't want to cast to type in this case!", "if", "not", "isinstance", "(", "v", ",", "Quantity", ")", ":", "v", "=", "self", ".", "units", "(", "v", ")", "elif", "not", "isinstance", "(", "v", ",", "self", ".", "type", ")", ":", "v", "=", "self", ".", "type", "(", "v", ")", "return", "v" ]
Convert the value from parsed JSON structure to native python representation. By default this will leave the value as-is since the JSON parsing routines typically convert to native types. The exception may be date strings or other more complex types, where subclasses will override this behavior.
[ "Convert", "the", "value", "from", "parsed", "JSON", "structure", "to", "native", "python", "representation", "." ]
python
train
ManiacalLabs/PixelWeb
pixelweb/bottle.py
https://github.com/ManiacalLabs/PixelWeb/blob/9eacbfd40a1d35011c2dcea15c303da9636c6b9e/pixelweb/bottle.py#L553-L558
def get_config(self, key, default=None): ''' Lookup a config field and return its value, first checking the route.config, then route.app.config.''' for conf in (self.config, self.app.conifg): if key in conf: return conf[key] return default
[ "def", "get_config", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "for", "conf", "in", "(", "self", ".", "config", ",", "self", ".", "app", ".", "conifg", ")", ":", "if", "key", "in", "conf", ":", "return", "conf", "[", "key", "]", "return", "default" ]
Lookup a config field and return its value, first checking the route.config, then route.app.config.
[ "Lookup", "a", "config", "field", "and", "return", "its", "value", "first", "checking", "the", "route", ".", "config", "then", "route", ".", "app", ".", "config", "." ]
python
train
AltSchool/dynamic-rest
dynamic_rest/filters.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/filters.py#L73-L140
def generate_query_key(self, serializer): """Get the key that can be passed to Django's filter method. To account for serialier field name rewrites, this method translates serializer field names to model field names by inspecting `serializer`. For example, a query like `filter{users.events}` would be returned as `users__events`. Arguments: serializer: A DRF serializer Returns: A filter key. """ rewritten = [] last = len(self.field) - 1 s = serializer field = None for i, field_name in enumerate(self.field): # Note: .fields can be empty for related serializers that aren't # sideloaded. Fields that are deferred also won't be present. # If field name isn't in serializer.fields, get full list from # get_all_fields() method. This is somewhat expensive, so only do # this if we have to. fields = s.fields if field_name not in fields: fields = getattr(s, 'get_all_fields', lambda: {})() if field_name == 'pk': rewritten.append('pk') continue if field_name not in fields: raise ValidationError( "Invalid filter field: %s" % field_name ) field = fields[field_name] # For remote fields, strip off '_set' for filtering. This is a # weird Django inconsistency. model_field_name = field.source or field_name model_field = get_model_field(s.get_model(), model_field_name) if isinstance(model_field, RelatedObject): model_field_name = model_field.field.related_query_name() # If get_all_fields() was used above, field could be unbound, # and field.source would be None rewritten.append(model_field_name) if i == last: break # Recurse into nested field s = getattr(field, 'serializer', None) if isinstance(s, serializers.ListSerializer): s = s.child if not s: raise ValidationError( "Invalid nested filter field: %s" % field_name ) if self.operator: rewritten.append(self.operator) return ('__'.join(rewritten), field)
[ "def", "generate_query_key", "(", "self", ",", "serializer", ")", ":", "rewritten", "=", "[", "]", "last", "=", "len", "(", "self", ".", "field", ")", "-", "1", "s", "=", "serializer", "field", "=", "None", "for", "i", ",", "field_name", "in", "enumerate", "(", "self", ".", "field", ")", ":", "# Note: .fields can be empty for related serializers that aren't", "# sideloaded. Fields that are deferred also won't be present.", "# If field name isn't in serializer.fields, get full list from", "# get_all_fields() method. This is somewhat expensive, so only do", "# this if we have to.", "fields", "=", "s", ".", "fields", "if", "field_name", "not", "in", "fields", ":", "fields", "=", "getattr", "(", "s", ",", "'get_all_fields'", ",", "lambda", ":", "{", "}", ")", "(", ")", "if", "field_name", "==", "'pk'", ":", "rewritten", ".", "append", "(", "'pk'", ")", "continue", "if", "field_name", "not", "in", "fields", ":", "raise", "ValidationError", "(", "\"Invalid filter field: %s\"", "%", "field_name", ")", "field", "=", "fields", "[", "field_name", "]", "# For remote fields, strip off '_set' for filtering. This is a", "# weird Django inconsistency.", "model_field_name", "=", "field", ".", "source", "or", "field_name", "model_field", "=", "get_model_field", "(", "s", ".", "get_model", "(", ")", ",", "model_field_name", ")", "if", "isinstance", "(", "model_field", ",", "RelatedObject", ")", ":", "model_field_name", "=", "model_field", ".", "field", ".", "related_query_name", "(", ")", "# If get_all_fields() was used above, field could be unbound,", "# and field.source would be None", "rewritten", ".", "append", "(", "model_field_name", ")", "if", "i", "==", "last", ":", "break", "# Recurse into nested field", "s", "=", "getattr", "(", "field", ",", "'serializer'", ",", "None", ")", "if", "isinstance", "(", "s", ",", "serializers", ".", "ListSerializer", ")", ":", "s", "=", "s", ".", "child", "if", "not", "s", ":", "raise", "ValidationError", "(", "\"Invalid nested filter field: %s\"", "%", "field_name", ")", "if", "self", ".", "operator", ":", "rewritten", ".", "append", "(", "self", ".", "operator", ")", "return", "(", "'__'", ".", "join", "(", "rewritten", ")", ",", "field", ")" ]
Get the key that can be passed to Django's filter method. To account for serialier field name rewrites, this method translates serializer field names to model field names by inspecting `serializer`. For example, a query like `filter{users.events}` would be returned as `users__events`. Arguments: serializer: A DRF serializer Returns: A filter key.
[ "Get", "the", "key", "that", "can", "be", "passed", "to", "Django", "s", "filter", "method", "." ]
python
train
quantmind/pulsar
examples/chat/manage.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/examples/chat/manage.py#L70-L75
def encode(self, message): '''Encode a message when publishing.''' if not isinstance(message, dict): message = {'message': message} message['time'] = time.time() return json.dumps(message)
[ "def", "encode", "(", "self", ",", "message", ")", ":", "if", "not", "isinstance", "(", "message", ",", "dict", ")", ":", "message", "=", "{", "'message'", ":", "message", "}", "message", "[", "'time'", "]", "=", "time", ".", "time", "(", ")", "return", "json", ".", "dumps", "(", "message", ")" ]
Encode a message when publishing.
[ "Encode", "a", "message", "when", "publishing", "." ]
python
train
pyviz/holoviews
holoviews/core/data/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/data/__init__.py#L269-L294
def range(self, dim, data_range=True, dimension_range=True): """Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound """ dim = self.get_dimension(dim) if dim is None or (not data_range and not dimension_range): return (None, None) elif all(util.isfinite(v) for v in dim.range) and dimension_range: return dim.range elif dim in self.dimensions() and data_range and bool(self): lower, upper = self.interface.range(self, dim) else: lower, upper = (np.NaN, np.NaN) if not dimension_range: return lower, upper return util.dimension_range(lower, upper, dim.range, dim.soft_range)
[ "def", "range", "(", "self", ",", "dim", ",", "data_range", "=", "True", ",", "dimension_range", "=", "True", ")", ":", "dim", "=", "self", ".", "get_dimension", "(", "dim", ")", "if", "dim", "is", "None", "or", "(", "not", "data_range", "and", "not", "dimension_range", ")", ":", "return", "(", "None", ",", "None", ")", "elif", "all", "(", "util", ".", "isfinite", "(", "v", ")", "for", "v", "in", "dim", ".", "range", ")", "and", "dimension_range", ":", "return", "dim", ".", "range", "elif", "dim", "in", "self", ".", "dimensions", "(", ")", "and", "data_range", "and", "bool", "(", "self", ")", ":", "lower", ",", "upper", "=", "self", ".", "interface", ".", "range", "(", "self", ",", "dim", ")", "else", ":", "lower", ",", "upper", "=", "(", "np", ".", "NaN", ",", "np", ".", "NaN", ")", "if", "not", "dimension_range", ":", "return", "lower", ",", "upper", "return", "util", ".", "dimension_range", "(", "lower", ",", "upper", ",", "dim", ".", "range", ",", "dim", ".", "soft_range", ")" ]
Return the lower and upper bounds of values along dimension. Args: dimension: The dimension to compute the range on. data_range (bool): Compute range from data values dimension_range (bool): Include Dimension ranges Whether to include Dimension range and soft_range in range calculation Returns: Tuple containing the lower and upper bound
[ "Return", "the", "lower", "and", "upper", "bounds", "of", "values", "along", "dimension", "." ]
python
train
KarchinLab/probabilistic2020
scripts/check_mutations.py
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/scripts/check_mutations.py#L23-L35
def correct_chrom_names(chroms): """Make sure chromosome names follow UCSC chr convention.""" chrom_list = [] for chrom in chroms: # fix chrom numbering chrom = str(chrom) chrom = chrom.replace('23', 'X') chrom = chrom.replace('24', 'Y') chrom = chrom.replace('25', 'Mt') if not chrom.startswith('chr'): chrom = 'chr' + chrom chrom_list.append(chrom) return chrom_list
[ "def", "correct_chrom_names", "(", "chroms", ")", ":", "chrom_list", "=", "[", "]", "for", "chrom", "in", "chroms", ":", "# fix chrom numbering", "chrom", "=", "str", "(", "chrom", ")", "chrom", "=", "chrom", ".", "replace", "(", "'23'", ",", "'X'", ")", "chrom", "=", "chrom", ".", "replace", "(", "'24'", ",", "'Y'", ")", "chrom", "=", "chrom", ".", "replace", "(", "'25'", ",", "'Mt'", ")", "if", "not", "chrom", ".", "startswith", "(", "'chr'", ")", ":", "chrom", "=", "'chr'", "+", "chrom", "chrom_list", ".", "append", "(", "chrom", ")", "return", "chrom_list" ]
Make sure chromosome names follow UCSC chr convention.
[ "Make", "sure", "chromosome", "names", "follow", "UCSC", "chr", "convention", "." ]
python
train
wilson-eft/wilson
wilson/translate/wet.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L973-L984
def JMS_to_Fierz_nunu(C, ddll): """From JMS to semileptonic Fierz basis for Class V. `ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.""" s = dflav[ddll[0]] b = dflav[ddll[1]] l = lflav[ddll[4:ddll.find('n')]] lp = lflav[ddll[ddll.find('_',5)+1:len(ddll)]] ind = ddll.replace('l_','').replace('nu_','') return { 'F' + ind + 'nu' : C["VnudLL"][l, lp, s, b], 'F' + ind + 'nup' : C["VnudLR"][l, lp, s, b] }
[ "def", "JMS_to_Fierz_nunu", "(", "C", ",", "ddll", ")", ":", "s", "=", "dflav", "[", "ddll", "[", "0", "]", "]", "b", "=", "dflav", "[", "ddll", "[", "1", "]", "]", "l", "=", "lflav", "[", "ddll", "[", "4", ":", "ddll", ".", "find", "(", "'n'", ")", "]", "]", "lp", "=", "lflav", "[", "ddll", "[", "ddll", ".", "find", "(", "'_'", ",", "5", ")", "+", "1", ":", "len", "(", "ddll", ")", "]", "]", "ind", "=", "ddll", ".", "replace", "(", "'l_'", ",", "''", ")", ".", "replace", "(", "'nu_'", ",", "''", ")", "return", "{", "'F'", "+", "ind", "+", "'nu'", ":", "C", "[", "\"VnudLL\"", "]", "[", "l", ",", "lp", ",", "s", ",", "b", "]", ",", "'F'", "+", "ind", "+", "'nup'", ":", "C", "[", "\"VnudLR\"", "]", "[", "l", ",", "lp", ",", "s", ",", "b", "]", "}" ]
From JMS to semileptonic Fierz basis for Class V. `ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.
[ "From", "JMS", "to", "semileptonic", "Fierz", "basis", "for", "Class", "V", ".", "ddll", "should", "be", "of", "the", "form", "sbl_enu_tau", "dbl_munu_e", "etc", "." ]
python
train
kata198/NamedAtomicLock
NamedAtomicLock/__init__.py
https://github.com/kata198/NamedAtomicLock/blob/82d480b81daf4deea926964f18bb2e47d4f30f6c/NamedAtomicLock/__init__.py#L165-L185
def __checkExpiration(self, mtime=None): ''' __checkExpiration - Check if we have expired @param mtime <int> - Optional mtime if known, otherwise will be gathered @return <bool> - True if we did expire, otherwise False ''' if not self.maxLockAge: return False if mtime is None: try: mtime = os.stat(self.lockPath).st_mtime except FileNotFoundError as e: return False if mtime < time.time() - self.maxLockAge: return True return False
[ "def", "__checkExpiration", "(", "self", ",", "mtime", "=", "None", ")", ":", "if", "not", "self", ".", "maxLockAge", ":", "return", "False", "if", "mtime", "is", "None", ":", "try", ":", "mtime", "=", "os", ".", "stat", "(", "self", ".", "lockPath", ")", ".", "st_mtime", "except", "FileNotFoundError", "as", "e", ":", "return", "False", "if", "mtime", "<", "time", ".", "time", "(", ")", "-", "self", ".", "maxLockAge", ":", "return", "True", "return", "False" ]
__checkExpiration - Check if we have expired @param mtime <int> - Optional mtime if known, otherwise will be gathered @return <bool> - True if we did expire, otherwise False
[ "__checkExpiration", "-", "Check", "if", "we", "have", "expired", "@param", "mtime", "<int", ">", "-", "Optional", "mtime", "if", "known", "otherwise", "will", "be", "gathered" ]
python
train
futurecolors/django-geoip
django_geoip/management/ipgeobase.py
https://github.com/futurecolors/django-geoip/blob/f9eee4bcad40508089b184434b79826f842d7bd0/django_geoip/management/ipgeobase.py#L31-L39
def clear_database(self): """ Removes all geodata stored in database. Useful for development, never use on production. """ self.logger.info('Removing obsolete geoip from database...') IpRange.objects.all().delete() City.objects.all().delete() Region.objects.all().delete() Country.objects.all().delete()
[ "def", "clear_database", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "'Removing obsolete geoip from database...'", ")", "IpRange", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "City", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "Region", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "Country", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")" ]
Removes all geodata stored in database. Useful for development, never use on production.
[ "Removes", "all", "geodata", "stored", "in", "database", ".", "Useful", "for", "development", "never", "use", "on", "production", "." ]
python
train
manahl/arctic
arctic/store/_ndarray_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/_ndarray_store.py#L75-L113
def _resize_with_dtype(arr, dtype): """ This function will transform arr into an array with the same type as dtype. It will do this by filling new columns with zeros (or NaNs, if it is a float column). Also, columns that are not in the new dtype will be dropped. """ structured_arrays = dtype.names is not None and arr.dtype.names is not None old_columns = arr.dtype.names or [] new_columns = dtype.names or [] # In numpy 1.9 the ndarray.astype method used to handle changes in number of fields. The code below # should replicate the same behaviour the old astype used to have. # # One may be tempted to use np.lib.recfunctions.stack_arrays to implement both this step and the # concatenate that follows but it 2x slower and it requires providing your own default values (instead # of np.zeros). # # Numpy 1.14 supports doing new_arr[old_columns] = arr[old_columns], which is faster than the code below # (in benchmarks it seems to be even slightly faster than using the old astype). However, that is not # supported by numpy 1.9.2. if structured_arrays and (old_columns != new_columns): old_columns = set(old_columns) new_columns = set(new_columns) new_arr = np.zeros(arr.shape, dtype) for c in old_columns & new_columns: new_arr[c] = arr[c] # missing float columns should default to nan rather than zero _is_float_type = lambda _dtype: _dtype.type in (np.float32, np.float64) _is_void_float_type = lambda _dtype: _dtype.type == np.void and _is_float_type(_dtype.subdtype[0]) _is_float_or_void_float_type = lambda _dtype: _is_float_type(_dtype) or _is_void_float_type(_dtype) _is_float = lambda column: _is_float_or_void_float_type(dtype.fields[column][0]) for new_column in filter(_is_float, new_columns - old_columns): new_arr[new_column] = np.nan return new_arr.astype(dtype) else: return arr.astype(dtype)
[ "def", "_resize_with_dtype", "(", "arr", ",", "dtype", ")", ":", "structured_arrays", "=", "dtype", ".", "names", "is", "not", "None", "and", "arr", ".", "dtype", ".", "names", "is", "not", "None", "old_columns", "=", "arr", ".", "dtype", ".", "names", "or", "[", "]", "new_columns", "=", "dtype", ".", "names", "or", "[", "]", "# In numpy 1.9 the ndarray.astype method used to handle changes in number of fields. The code below", "# should replicate the same behaviour the old astype used to have.", "#", "# One may be tempted to use np.lib.recfunctions.stack_arrays to implement both this step and the", "# concatenate that follows but it 2x slower and it requires providing your own default values (instead", "# of np.zeros).", "#", "# Numpy 1.14 supports doing new_arr[old_columns] = arr[old_columns], which is faster than the code below", "# (in benchmarks it seems to be even slightly faster than using the old astype). However, that is not", "# supported by numpy 1.9.2.", "if", "structured_arrays", "and", "(", "old_columns", "!=", "new_columns", ")", ":", "old_columns", "=", "set", "(", "old_columns", ")", "new_columns", "=", "set", "(", "new_columns", ")", "new_arr", "=", "np", ".", "zeros", "(", "arr", ".", "shape", ",", "dtype", ")", "for", "c", "in", "old_columns", "&", "new_columns", ":", "new_arr", "[", "c", "]", "=", "arr", "[", "c", "]", "# missing float columns should default to nan rather than zero", "_is_float_type", "=", "lambda", "_dtype", ":", "_dtype", ".", "type", "in", "(", "np", ".", "float32", ",", "np", ".", "float64", ")", "_is_void_float_type", "=", "lambda", "_dtype", ":", "_dtype", ".", "type", "==", "np", ".", "void", "and", "_is_float_type", "(", "_dtype", ".", "subdtype", "[", "0", "]", ")", "_is_float_or_void_float_type", "=", "lambda", "_dtype", ":", "_is_float_type", "(", "_dtype", ")", "or", "_is_void_float_type", "(", "_dtype", ")", "_is_float", "=", "lambda", "column", ":", "_is_float_or_void_float_type", "(", "dtype", ".", "fields", "[", "column", "]", "[", "0", "]", ")", "for", "new_column", "in", "filter", "(", "_is_float", ",", "new_columns", "-", "old_columns", ")", ":", "new_arr", "[", "new_column", "]", "=", "np", ".", "nan", "return", "new_arr", ".", "astype", "(", "dtype", ")", "else", ":", "return", "arr", ".", "astype", "(", "dtype", ")" ]
This function will transform arr into an array with the same type as dtype. It will do this by filling new columns with zeros (or NaNs, if it is a float column). Also, columns that are not in the new dtype will be dropped.
[ "This", "function", "will", "transform", "arr", "into", "an", "array", "with", "the", "same", "type", "as", "dtype", ".", "It", "will", "do", "this", "by", "filling", "new", "columns", "with", "zeros", "(", "or", "NaNs", "if", "it", "is", "a", "float", "column", ")", ".", "Also", "columns", "that", "are", "not", "in", "the", "new", "dtype", "will", "be", "dropped", "." ]
python
train
rorr73/LifeSOSpy
lifesospy/baseunit.py
https://github.com/rorr73/LifeSOSpy/blob/62360fbab2e90bf04d52b547093bdab2d4e389b4/lifesospy/baseunit.py#L539-L548
def as_dict(self) -> Dict[str, Any]: """Converts to a dict of attributes for easier serialization.""" def _on_filter(obj: Any, name: str) -> bool: # Filter out any callbacks if isinstance(obj, BaseUnit): if name.startswith('on_'): return False return True return serializable(self, on_filter=_on_filter)
[ "def", "as_dict", "(", "self", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "def", "_on_filter", "(", "obj", ":", "Any", ",", "name", ":", "str", ")", "->", "bool", ":", "# Filter out any callbacks", "if", "isinstance", "(", "obj", ",", "BaseUnit", ")", ":", "if", "name", ".", "startswith", "(", "'on_'", ")", ":", "return", "False", "return", "True", "return", "serializable", "(", "self", ",", "on_filter", "=", "_on_filter", ")" ]
Converts to a dict of attributes for easier serialization.
[ "Converts", "to", "a", "dict", "of", "attributes", "for", "easier", "serialization", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/expression.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/expression.py#L371-L434
def value_counts(self, dropna=False, dropnull=True, ascending=False, progress=False): """Computes counts of unique values. WARNING: * If the expression/column is not categorical, it will be converted on the fly * dropna is False by default, it is True by default in pandas :param dropna: when True, it will not report the missing values :param ascending: when False (default) it will report the most frequent occuring item first :returns: Pandas series containing the counts """ from pandas import Series dtype = self.dtype transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression) if self.dtype == str_type and not transient: # string is a special case, only ColumnString are not transient ar = self.ds.columns[self.expression] if not isinstance(ar, ColumnString): transient = True counter_type = counter_type_from_dtype(self.dtype, transient) counters = [None] * self.ds.executor.thread_pool.nthreads def map(thread_index, i1, i2, ar): if counters[thread_index] is None: counters[thread_index] = counter_type() if dtype == str_type: previous_ar = ar ar = _to_string_sequence(ar) if not transient: assert ar is previous_ar.string_sequence if np.ma.isMaskedArray(ar): mask = np.ma.getmaskarray(ar) counters[thread_index].update(ar, mask) else: counters[thread_index].update(ar) return 0 def reduce(a, b): return a+b self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progress, name='value_counts', info=True, to_numpy=False) counters = [k for k in counters if k is not None] counter0 = counters[0] for other in counters[1:]: counter0.merge(other) value_counts = counter0.extract() index = np.array(list(value_counts.keys())) counts = np.array(list(value_counts.values())) order = np.argsort(counts) if not ascending: order = order[::-1] counts = counts[order] index = index[order] if not dropna or not dropnull: index = index.tolist() counts = counts.tolist() if not dropna and counter0.nan_count: index = [np.nan] + index counts = [counter0.nan_count] + counts if not dropnull and counter0.null_count: index = ['null'] + index counts = [counter0.null_count] + counts return Series(counts, index=index)
[ "def", "value_counts", "(", "self", ",", "dropna", "=", "False", ",", "dropnull", "=", "True", ",", "ascending", "=", "False", ",", "progress", "=", "False", ")", ":", "from", "pandas", "import", "Series", "dtype", "=", "self", ".", "dtype", "transient", "=", "self", ".", "transient", "or", "self", ".", "ds", ".", "filtered", "or", "self", ".", "ds", ".", "is_masked", "(", "self", ".", "expression", ")", "if", "self", ".", "dtype", "==", "str_type", "and", "not", "transient", ":", "# string is a special case, only ColumnString are not transient", "ar", "=", "self", ".", "ds", ".", "columns", "[", "self", ".", "expression", "]", "if", "not", "isinstance", "(", "ar", ",", "ColumnString", ")", ":", "transient", "=", "True", "counter_type", "=", "counter_type_from_dtype", "(", "self", ".", "dtype", ",", "transient", ")", "counters", "=", "[", "None", "]", "*", "self", ".", "ds", ".", "executor", ".", "thread_pool", ".", "nthreads", "def", "map", "(", "thread_index", ",", "i1", ",", "i2", ",", "ar", ")", ":", "if", "counters", "[", "thread_index", "]", "is", "None", ":", "counters", "[", "thread_index", "]", "=", "counter_type", "(", ")", "if", "dtype", "==", "str_type", ":", "previous_ar", "=", "ar", "ar", "=", "_to_string_sequence", "(", "ar", ")", "if", "not", "transient", ":", "assert", "ar", "is", "previous_ar", ".", "string_sequence", "if", "np", ".", "ma", ".", "isMaskedArray", "(", "ar", ")", ":", "mask", "=", "np", ".", "ma", ".", "getmaskarray", "(", "ar", ")", "counters", "[", "thread_index", "]", ".", "update", "(", "ar", ",", "mask", ")", "else", ":", "counters", "[", "thread_index", "]", ".", "update", "(", "ar", ")", "return", "0", "def", "reduce", "(", "a", ",", "b", ")", ":", "return", "a", "+", "b", "self", ".", "ds", ".", "map_reduce", "(", "map", ",", "reduce", ",", "[", "self", ".", "expression", "]", ",", "delay", "=", "False", ",", "progress", "=", "progress", ",", "name", "=", "'value_counts'", ",", "info", "=", "True", ",", "to_numpy", "=", "False", ")", "counters", "=", "[", "k", "for", "k", "in", "counters", "if", "k", "is", "not", "None", "]", "counter0", "=", "counters", "[", "0", "]", "for", "other", "in", "counters", "[", "1", ":", "]", ":", "counter0", ".", "merge", "(", "other", ")", "value_counts", "=", "counter0", ".", "extract", "(", ")", "index", "=", "np", ".", "array", "(", "list", "(", "value_counts", ".", "keys", "(", ")", ")", ")", "counts", "=", "np", ".", "array", "(", "list", "(", "value_counts", ".", "values", "(", ")", ")", ")", "order", "=", "np", ".", "argsort", "(", "counts", ")", "if", "not", "ascending", ":", "order", "=", "order", "[", ":", ":", "-", "1", "]", "counts", "=", "counts", "[", "order", "]", "index", "=", "index", "[", "order", "]", "if", "not", "dropna", "or", "not", "dropnull", ":", "index", "=", "index", ".", "tolist", "(", ")", "counts", "=", "counts", ".", "tolist", "(", ")", "if", "not", "dropna", "and", "counter0", ".", "nan_count", ":", "index", "=", "[", "np", ".", "nan", "]", "+", "index", "counts", "=", "[", "counter0", ".", "nan_count", "]", "+", "counts", "if", "not", "dropnull", "and", "counter0", ".", "null_count", ":", "index", "=", "[", "'null'", "]", "+", "index", "counts", "=", "[", "counter0", ".", "null_count", "]", "+", "counts", "return", "Series", "(", "counts", ",", "index", "=", "index", ")" ]
Computes counts of unique values. WARNING: * If the expression/column is not categorical, it will be converted on the fly * dropna is False by default, it is True by default in pandas :param dropna: when True, it will not report the missing values :param ascending: when False (default) it will report the most frequent occuring item first :returns: Pandas series containing the counts
[ "Computes", "counts", "of", "unique", "values", "." ]
python
test
SavinaRoja/PyUserInput
pykeyboard/mac.py
https://github.com/SavinaRoja/PyUserInput/blob/153c1d39b1a41b467b235fd182392d6dcbf07947/pykeyboard/mac.py#L174-L193
def _press_special_key(self, key, down): """ Helper method for special keys. Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac """ key_code = special_key_translate_table[key] ev = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_( NSSystemDefined, # type (0,0), # location 0xa00 if down else 0xb00, # flags 0, # timestamp 0, # window 0, # ctx 8, # subtype (key_code << 16) | ((0xa if down else 0xb) << 8), # data1 -1 # data2 ) Quartz.CGEventPost(0, ev.Quartz.CGEvent())
[ "def", "_press_special_key", "(", "self", ",", "key", ",", "down", ")", ":", "key_code", "=", "special_key_translate_table", "[", "key", "]", "ev", "=", "NSEvent", ".", "otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_", "(", "NSSystemDefined", ",", "# type", "(", "0", ",", "0", ")", ",", "# location", "0xa00", "if", "down", "else", "0xb00", ",", "# flags", "0", ",", "# timestamp", "0", ",", "# window", "0", ",", "# ctx", "8", ",", "# subtype", "(", "key_code", "<<", "16", ")", "|", "(", "(", "0xa", "if", "down", "else", "0xb", ")", "<<", "8", ")", ",", "# data1", "-", "1", "# data2", ")", "Quartz", ".", "CGEventPost", "(", "0", ",", "ev", ".", "Quartz", ".", "CGEvent", "(", ")", ")" ]
Helper method for special keys. Source: http://stackoverflow.com/questions/11045814/emulate-media-key-press-on-mac
[ "Helper", "method", "for", "special", "keys", "." ]
python
train
apache/airflow
airflow/contrib/hooks/gcp_vision_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L311-L349
def create_reference_image( self, location, product_id, reference_image, reference_image_id=None, project_id=None, retry=None, timeout=None, metadata=None, ): """ For the documentation see: :py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator` """ client = self.get_conn() self.log.info('Creating ReferenceImage') parent = ProductSearchClient.product_path(project=project_id, location=location, product=product_id) response = client.create_reference_image( parent=parent, reference_image=reference_image, reference_image_id=reference_image_id, retry=retry, timeout=timeout, metadata=metadata, ) self.log.info('ReferenceImage created: %s', response.name if response else '') self.log.debug('ReferenceImage created:\n%s', response) if not reference_image_id: # Refernece image id was generated by the API reference_image_id = self._get_autogenerated_id(response) self.log.info( 'Extracted autogenerated ReferenceImage ID from the response: %s', reference_image_id ) return reference_image_id
[ "def", "create_reference_image", "(", "self", ",", "location", ",", "product_id", ",", "reference_image", ",", "reference_image_id", "=", "None", ",", "project_id", "=", "None", ",", "retry", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", ")", ":", "client", "=", "self", ".", "get_conn", "(", ")", "self", ".", "log", ".", "info", "(", "'Creating ReferenceImage'", ")", "parent", "=", "ProductSearchClient", ".", "product_path", "(", "project", "=", "project_id", ",", "location", "=", "location", ",", "product", "=", "product_id", ")", "response", "=", "client", ".", "create_reference_image", "(", "parent", "=", "parent", ",", "reference_image", "=", "reference_image", ",", "reference_image_id", "=", "reference_image_id", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ",", ")", "self", ".", "log", ".", "info", "(", "'ReferenceImage created: %s'", ",", "response", ".", "name", "if", "response", "else", "''", ")", "self", ".", "log", ".", "debug", "(", "'ReferenceImage created:\\n%s'", ",", "response", ")", "if", "not", "reference_image_id", ":", "# Refernece image id was generated by the API", "reference_image_id", "=", "self", ".", "_get_autogenerated_id", "(", "response", ")", "self", ".", "log", ".", "info", "(", "'Extracted autogenerated ReferenceImage ID from the response: %s'", ",", "reference_image_id", ")", "return", "reference_image_id" ]
For the documentation see: :py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionReferenceImageCreateOperator`
[ "For", "the", "documentation", "see", ":", ":", "py", ":", "class", ":", "~airflow", ".", "contrib", ".", "operators", ".", "gcp_vision_operator", ".", "CloudVisionReferenceImageCreateOperator" ]
python
test
jorisroovers/gitlint
gitlint/lint.py
https://github.com/jorisroovers/gitlint/blob/6248bd6cbc20c1be3bb6d196a5ec0425af99733b/gitlint/lint.py#L70-L96
def lint(self, commit): """ Lint the last commit in a given git context by applying all ignore, title, body and commit rules. """ LOG.debug("Linting commit %s", commit.sha or "[SHA UNKNOWN]") LOG.debug("Commit Object\n" + ustr(commit)) # Apply config rules for rule in self.configuration_rules: rule.apply(self.config, commit) # Skip linting if this is a special commit type that is configured to be ignored ignore_commit_types = ["merge", "squash", "fixup"] for commit_type in ignore_commit_types: if getattr(commit, "is_{0}_commit".format(commit_type)) and \ getattr(self.config, "ignore_{0}_commits".format(commit_type)): return [] violations = [] # determine violations by applying all rules violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1)) violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2)) violations.extend(self._apply_commit_rules(self.commit_rules, commit)) # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules), # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support # python 3, as None is not allowed in a list that is being sorted. violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id)) return violations
[ "def", "lint", "(", "self", ",", "commit", ")", ":", "LOG", ".", "debug", "(", "\"Linting commit %s\"", ",", "commit", ".", "sha", "or", "\"[SHA UNKNOWN]\"", ")", "LOG", ".", "debug", "(", "\"Commit Object\\n\"", "+", "ustr", "(", "commit", ")", ")", "# Apply config rules", "for", "rule", "in", "self", ".", "configuration_rules", ":", "rule", ".", "apply", "(", "self", ".", "config", ",", "commit", ")", "# Skip linting if this is a special commit type that is configured to be ignored", "ignore_commit_types", "=", "[", "\"merge\"", ",", "\"squash\"", ",", "\"fixup\"", "]", "for", "commit_type", "in", "ignore_commit_types", ":", "if", "getattr", "(", "commit", ",", "\"is_{0}_commit\"", ".", "format", "(", "commit_type", ")", ")", "and", "getattr", "(", "self", ".", "config", ",", "\"ignore_{0}_commits\"", ".", "format", "(", "commit_type", ")", ")", ":", "return", "[", "]", "violations", "=", "[", "]", "# determine violations by applying all rules", "violations", ".", "extend", "(", "self", ".", "_apply_line_rules", "(", "[", "commit", ".", "message", ".", "title", "]", ",", "commit", ",", "self", ".", "title_line_rules", ",", "1", ")", ")", "violations", ".", "extend", "(", "self", ".", "_apply_line_rules", "(", "commit", ".", "message", ".", "body", ",", "commit", ",", "self", ".", "body_line_rules", ",", "2", ")", ")", "violations", ".", "extend", "(", "self", ".", "_apply_commit_rules", "(", "self", ".", "commit_rules", ",", "commit", ")", ")", "# Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),", "# we replace None with -1 so that it always get's placed first. Note that we need this to do this to support", "# python 3, as None is not allowed in a list that is being sorted.", "violations", ".", "sort", "(", "key", "=", "lambda", "v", ":", "(", "-", "1", "if", "v", ".", "line_nr", "is", "None", "else", "v", ".", "line_nr", ",", "v", ".", "rule_id", ")", ")", "return", "violations" ]
Lint the last commit in a given git context by applying all ignore, title, body and commit rules.
[ "Lint", "the", "last", "commit", "in", "a", "given", "git", "context", "by", "applying", "all", "ignore", "title", "body", "and", "commit", "rules", "." ]
python
train
ipfs/py-ipfs-api
ipfsapi/client.py
https://github.com/ipfs/py-ipfs-api/blob/7574dad04877b45dbe4ad321dcfa9e880eb2d90c/ipfsapi/client.py#L1775-L1803
def log_level(self, subsystem, level, **kwargs): r"""Changes the logging output of a running daemon. .. code-block:: python >>> c.log_level("path", "info") {'Message': "Changed log level of 'path' to 'info'\n"} Parameters ---------- subsystem : str The subsystem logging identifier (Use ``"all"`` for all subsystems) level : str The desired logging level. Must be one of: * ``"debug"`` * ``"info"`` * ``"warning"`` * ``"error"`` * ``"fatal"`` * ``"panic"`` Returns ------- dict : Status message """ args = (subsystem, level) return self._client.request('/log/level', args, decoder='json', **kwargs)
[ "def", "log_level", "(", "self", ",", "subsystem", ",", "level", ",", "*", "*", "kwargs", ")", ":", "args", "=", "(", "subsystem", ",", "level", ")", "return", "self", ".", "_client", ".", "request", "(", "'/log/level'", ",", "args", ",", "decoder", "=", "'json'", ",", "*", "*", "kwargs", ")" ]
r"""Changes the logging output of a running daemon. .. code-block:: python >>> c.log_level("path", "info") {'Message': "Changed log level of 'path' to 'info'\n"} Parameters ---------- subsystem : str The subsystem logging identifier (Use ``"all"`` for all subsystems) level : str The desired logging level. Must be one of: * ``"debug"`` * ``"info"`` * ``"warning"`` * ``"error"`` * ``"fatal"`` * ``"panic"`` Returns ------- dict : Status message
[ "r", "Changes", "the", "logging", "output", "of", "a", "running", "daemon", "." ]
python
train
tilezen/tilequeue
tilequeue/query/rawr.py
https://github.com/tilezen/tilequeue/blob/d7b9484ab92e246eb2773949c784ebb37c731e28/tilequeue/query/rawr.py#L616-L632
def _orient(shape): """ The Shapely version of the orient function appears to only work on Polygons, and fails on MultiPolygons. This is a quick wrapper to allow orienting of either. """ assert shape.geom_type in ('Polygon', 'MultiPolygon') if shape.geom_type == 'Polygon': return orient(shape) else: polys = [] for geom in shape.geoms: polys.append(orient(geom)) return MultiPolygon(polys)
[ "def", "_orient", "(", "shape", ")", ":", "assert", "shape", ".", "geom_type", "in", "(", "'Polygon'", ",", "'MultiPolygon'", ")", "if", "shape", ".", "geom_type", "==", "'Polygon'", ":", "return", "orient", "(", "shape", ")", "else", ":", "polys", "=", "[", "]", "for", "geom", "in", "shape", ".", "geoms", ":", "polys", ".", "append", "(", "orient", "(", "geom", ")", ")", "return", "MultiPolygon", "(", "polys", ")" ]
The Shapely version of the orient function appears to only work on Polygons, and fails on MultiPolygons. This is a quick wrapper to allow orienting of either.
[ "The", "Shapely", "version", "of", "the", "orient", "function", "appears", "to", "only", "work", "on", "Polygons", "and", "fails", "on", "MultiPolygons", ".", "This", "is", "a", "quick", "wrapper", "to", "allow", "orienting", "of", "either", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L870-L880
def update_subnet_db_info(self, tenant_id, direc, net_id, subnet_id): """Update the subnet DB with Net and Subnet ID, given the subnet. """ subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc) if not subnet_dict: LOG.error("Subnet dict not found for tenant %s", tenant_id) return subnet = subnet_dict['cidr'].split('/')[0] if direc == 'in': self.service_in_ip.update_subnet(subnet, net_id, subnet_id) else: self.service_out_ip.update_subnet(subnet, net_id, subnet_id)
[ "def", "update_subnet_db_info", "(", "self", ",", "tenant_id", ",", "direc", ",", "net_id", ",", "subnet_id", ")", ":", "subnet_dict", "=", "self", ".", "retrieve_dcnm_subnet_info", "(", "tenant_id", ",", "direc", ")", "if", "not", "subnet_dict", ":", "LOG", ".", "error", "(", "\"Subnet dict not found for tenant %s\"", ",", "tenant_id", ")", "return", "subnet", "=", "subnet_dict", "[", "'cidr'", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", "if", "direc", "==", "'in'", ":", "self", ".", "service_in_ip", ".", "update_subnet", "(", "subnet", ",", "net_id", ",", "subnet_id", ")", "else", ":", "self", ".", "service_out_ip", ".", "update_subnet", "(", "subnet", ",", "net_id", ",", "subnet_id", ")" ]
Update the subnet DB with Net and Subnet ID, given the subnet.
[ "Update", "the", "subnet", "DB", "with", "Net", "and", "Subnet", "ID", "given", "the", "subnet", "." ]
python
train
vsudilov/flask-consulate
flask_consulate/consul.py
https://github.com/vsudilov/flask-consulate/blob/514f8754e7186f960237ed2836206993d5d3d3b6/flask_consulate/consul.py#L78-L109
def apply_remote_config(self, namespace=None): """ Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None """ if namespace is None: namespace = "config/{service}/{environment}/".format( service=os.environ.get('SERVICE', 'generic_service'), environment=os.environ.get('ENVIRONMENT', 'generic_environment') ) for k, v in iteritems(self.session.kv.find(namespace)): k = k.replace(namespace, '') try: self.app.config[k] = json.loads(v) except (TypeError, ValueError): self.app.logger.warning("Couldn't de-serialize {} to json, using raw value".format(v)) self.app.config[k] = v msg = "Set {k}={v} from consul kv '{ns}'".format( k=k, v=v, ns=namespace, ) self.app.logger.debug(msg)
[ "def", "apply_remote_config", "(", "self", ",", "namespace", "=", "None", ")", ":", "if", "namespace", "is", "None", ":", "namespace", "=", "\"config/{service}/{environment}/\"", ".", "format", "(", "service", "=", "os", ".", "environ", ".", "get", "(", "'SERVICE'", ",", "'generic_service'", ")", ",", "environment", "=", "os", ".", "environ", ".", "get", "(", "'ENVIRONMENT'", ",", "'generic_environment'", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "self", ".", "session", ".", "kv", ".", "find", "(", "namespace", ")", ")", ":", "k", "=", "k", ".", "replace", "(", "namespace", ",", "''", ")", "try", ":", "self", ".", "app", ".", "config", "[", "k", "]", "=", "json", ".", "loads", "(", "v", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "self", ".", "app", ".", "logger", ".", "warning", "(", "\"Couldn't de-serialize {} to json, using raw value\"", ".", "format", "(", "v", ")", ")", "self", ".", "app", ".", "config", "[", "k", "]", "=", "v", "msg", "=", "\"Set {k}={v} from consul kv '{ns}'\"", ".", "format", "(", "k", "=", "k", ",", "v", "=", "v", ",", "ns", "=", "namespace", ",", ")", "self", ".", "app", ".", "logger", ".", "debug", "(", "msg", ")" ]
Applies all config values defined in consul's kv store to self.app. There is no guarantee that these values will not be overwritten later elsewhere. :param namespace: kv namespace/directory. Defaults to DEFAULT_KV_NAMESPACE :return: None
[ "Applies", "all", "config", "values", "defined", "in", "consul", "s", "kv", "store", "to", "self", ".", "app", "." ]
python
train
materialsproject/pymatgen
pymatgen/symmetry/analyzer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/analyzer.py#L1151-L1187
def _find_spherical_axes(self): """ Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point group T molecules have only one unique 3-fold and one unique 2-fold axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules have a unique 5-fold axis. """ rot_present = defaultdict(bool) origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol) test_set = min(dist_el_sites.values(), key=lambda s: len(s)) coords = [s.coords for s in test_set] for c1, c2, c3 in itertools.combinations(coords, 3): for cc1, cc2 in itertools.combinations([c1, c2, c3], 2): if not rot_present[2]: test_axis = cc1 + cc2 if np.linalg.norm(test_axis) > self.tol: op = SymmOp.from_axis_angle_and_translation(test_axis, 180) rot_present[2] = self.is_valid_op(op) if rot_present[2]: self.symmops.append(op) self.rot_sym.append((test_axis, 2)) test_axis = np.cross(c2 - c1, c3 - c1) if np.linalg.norm(test_axis) > self.tol: for r in (3, 4, 5): if not rot_present[r]: op = SymmOp.from_axis_angle_and_translation( test_axis, 360 / r) rot_present[r] = self.is_valid_op(op) if rot_present[r]: self.symmops.append(op) self.rot_sym.append((test_axis, r)) break if rot_present[2] and rot_present[3] and ( rot_present[4] or rot_present[5]): break
[ "def", "_find_spherical_axes", "(", "self", ")", ":", "rot_present", "=", "defaultdict", "(", "bool", ")", "origin_site", ",", "dist_el_sites", "=", "cluster_sites", "(", "self", ".", "centered_mol", ",", "self", ".", "tol", ")", "test_set", "=", "min", "(", "dist_el_sites", ".", "values", "(", ")", ",", "key", "=", "lambda", "s", ":", "len", "(", "s", ")", ")", "coords", "=", "[", "s", ".", "coords", "for", "s", "in", "test_set", "]", "for", "c1", ",", "c2", ",", "c3", "in", "itertools", ".", "combinations", "(", "coords", ",", "3", ")", ":", "for", "cc1", ",", "cc2", "in", "itertools", ".", "combinations", "(", "[", "c1", ",", "c2", ",", "c3", "]", ",", "2", ")", ":", "if", "not", "rot_present", "[", "2", "]", ":", "test_axis", "=", "cc1", "+", "cc2", "if", "np", ".", "linalg", ".", "norm", "(", "test_axis", ")", ">", "self", ".", "tol", ":", "op", "=", "SymmOp", ".", "from_axis_angle_and_translation", "(", "test_axis", ",", "180", ")", "rot_present", "[", "2", "]", "=", "self", ".", "is_valid_op", "(", "op", ")", "if", "rot_present", "[", "2", "]", ":", "self", ".", "symmops", ".", "append", "(", "op", ")", "self", ".", "rot_sym", ".", "append", "(", "(", "test_axis", ",", "2", ")", ")", "test_axis", "=", "np", ".", "cross", "(", "c2", "-", "c1", ",", "c3", "-", "c1", ")", "if", "np", ".", "linalg", ".", "norm", "(", "test_axis", ")", ">", "self", ".", "tol", ":", "for", "r", "in", "(", "3", ",", "4", ",", "5", ")", ":", "if", "not", "rot_present", "[", "r", "]", ":", "op", "=", "SymmOp", ".", "from_axis_angle_and_translation", "(", "test_axis", ",", "360", "/", "r", ")", "rot_present", "[", "r", "]", "=", "self", ".", "is_valid_op", "(", "op", ")", "if", "rot_present", "[", "r", "]", ":", "self", ".", "symmops", ".", "append", "(", "op", ")", "self", ".", "rot_sym", ".", "append", "(", "(", "test_axis", ",", "r", ")", ")", "break", "if", "rot_present", "[", "2", "]", "and", "rot_present", "[", "3", "]", "and", "(", "rot_present", "[", "4", "]", "or", "rot_present", "[", "5", "]", ")", ":", "break" ]
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point group T molecules have only one unique 3-fold and one unique 2-fold axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules have a unique 5-fold axis.
[ "Looks", "for", "R5", "R4", "R3", "and", "R2", "axes", "in", "spherical", "top", "molecules", ".", "Point", "group", "T", "molecules", "have", "only", "one", "unique", "3", "-", "fold", "and", "one", "unique", "2", "-", "fold", "axis", ".", "O", "molecules", "have", "one", "unique", "4", "3", "and", "2", "-", "fold", "axes", ".", "I", "molecules", "have", "a", "unique", "5", "-", "fold", "axis", "." ]
python
train
zsethna/OLGA
olga/generation_probability.py
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/generation_probability.py#L377-L470
def format_usage_masks(self, V_usage_mask_in, J_usage_mask_in, print_warnings = True): """Format raw usage masks into lists of indices. Usage masks allows the Pgen computation to be conditioned on the V and J gene/allele identities. The inputted masks are lists of strings, or a single string, of the names of the genes or alleles to be conditioned on. The default mask includes all productive V or J genes. Parameters ---------- V_usage_mask_in : str or list An object to indicate which V alleles should be considered. The default input is None which returns the list of all productive V alleles. J_usage_mask_in : str or list An object to indicate which J alleles should be considered. The default input is None which returns the list of all productive J alleles. print_warnings : bool Determines whether warnings are printed or not. Default ON. Returns ------- V_usage_mask : list of integers Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list of integers Indices of the J alleles to be considered in the Pgen computation Examples -------- >>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01') ([34], [0]) >>> generation_probability.format_usage_masks('TRBV27*01', '') ([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13]) >>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01') ([34, 18], [0]) """ #Format the V usage mask if V_usage_mask_in is None: #Default case, use all productive V genes with non-zero probability #V_usage_mask = [v for v, V in enumerate(ppp['cutV_genomic_CDR3_segs']) if len(V) > 0] V_usage_mask = self.d_V_usage_mask elif isinstance(V_usage_mask_in, list): e_V_usage_mask = set() for v in V_usage_mask_in: try: e_V_usage_mask = e_V_usage_mask.union(self.V_mask_mapping[v]) except KeyError: if print_warnings: print 'Unfamiliar V gene/allele: ' + v pass if len(e_V_usage_mask) == 0: if print_warnings: print 'No recognized V genes/alleles. Using default V_usage_mask' V_usage_mask = self.d_V_usage_mask else: V_usage_mask = list(e_V_usage_mask) else: try: V_usage_mask = self.V_mask_mapping[V_usage_mask_in] except KeyError: #Do raise error here as the mask will be empty if print_warnings: print 'Unfamiliar V usage mask: ' + str(V_usage_mask_in) + ', please check the allowed V alleles. Using default V_usage_mask' V_usage_mask = self.d_V_usage_mask #Format the J usage mask if J_usage_mask_in is None: #Default case, use all productive J genes with non-zero probability #J_usage_mask = [j for j, J in enumerate(ppp['cutJ_genomic_CDR3_segs']) if len(J) > 0] J_usage_mask = self.d_J_usage_mask elif isinstance(J_usage_mask_in, list): e_J_usage_mask = set() for j in J_usage_mask_in: try: e_J_usage_mask = e_J_usage_mask.union(self.J_mask_mapping[j]) except KeyError: if print_warnings: print 'Unfamiliar J gene/allele: ' + j pass if len(e_J_usage_mask) == 0: if print_warnings: print 'No recognized J genes/alleles. Using default J_usage_mask' J_usage_mask = self.d_J_usage_mask else: J_usage_mask = list(e_J_usage_mask) else: try: J_usage_mask = self.J_mask_mapping[J_usage_mask_in] except KeyError: #Do raise error here as the mask will be empty if print_warnings: print 'Unfamiliar J usage mask: ' + str(J_usage_mask_in) + ', please check the allowed J alleles. Using default J_usage_mask' J_usage_mask = self.d_J_usage_mask return V_usage_mask, J_usage_mask
[ "def", "format_usage_masks", "(", "self", ",", "V_usage_mask_in", ",", "J_usage_mask_in", ",", "print_warnings", "=", "True", ")", ":", "#Format the V usage mask", "if", "V_usage_mask_in", "is", "None", ":", "#Default case, use all productive V genes with non-zero probability", "#V_usage_mask = [v for v, V in enumerate(ppp['cutV_genomic_CDR3_segs']) if len(V) > 0]", "V_usage_mask", "=", "self", ".", "d_V_usage_mask", "elif", "isinstance", "(", "V_usage_mask_in", ",", "list", ")", ":", "e_V_usage_mask", "=", "set", "(", ")", "for", "v", "in", "V_usage_mask_in", ":", "try", ":", "e_V_usage_mask", "=", "e_V_usage_mask", ".", "union", "(", "self", ".", "V_mask_mapping", "[", "v", "]", ")", "except", "KeyError", ":", "if", "print_warnings", ":", "print", "'Unfamiliar V gene/allele: '", "+", "v", "pass", "if", "len", "(", "e_V_usage_mask", ")", "==", "0", ":", "if", "print_warnings", ":", "print", "'No recognized V genes/alleles. Using default V_usage_mask'", "V_usage_mask", "=", "self", ".", "d_V_usage_mask", "else", ":", "V_usage_mask", "=", "list", "(", "e_V_usage_mask", ")", "else", ":", "try", ":", "V_usage_mask", "=", "self", ".", "V_mask_mapping", "[", "V_usage_mask_in", "]", "except", "KeyError", ":", "#Do raise error here as the mask will be empty", "if", "print_warnings", ":", "print", "'Unfamiliar V usage mask: '", "+", "str", "(", "V_usage_mask_in", ")", "+", "', please check the allowed V alleles. Using default V_usage_mask'", "V_usage_mask", "=", "self", ".", "d_V_usage_mask", "#Format the J usage mask", "if", "J_usage_mask_in", "is", "None", ":", "#Default case, use all productive J genes with non-zero probability", "#J_usage_mask = [j for j, J in enumerate(ppp['cutJ_genomic_CDR3_segs']) if len(J) > 0]", "J_usage_mask", "=", "self", ".", "d_J_usage_mask", "elif", "isinstance", "(", "J_usage_mask_in", ",", "list", ")", ":", "e_J_usage_mask", "=", "set", "(", ")", "for", "j", "in", "J_usage_mask_in", ":", "try", ":", "e_J_usage_mask", "=", "e_J_usage_mask", ".", "union", "(", "self", ".", "J_mask_mapping", "[", "j", "]", ")", "except", "KeyError", ":", "if", "print_warnings", ":", "print", "'Unfamiliar J gene/allele: '", "+", "j", "pass", "if", "len", "(", "e_J_usage_mask", ")", "==", "0", ":", "if", "print_warnings", ":", "print", "'No recognized J genes/alleles. Using default J_usage_mask'", "J_usage_mask", "=", "self", ".", "d_J_usage_mask", "else", ":", "J_usage_mask", "=", "list", "(", "e_J_usage_mask", ")", "else", ":", "try", ":", "J_usage_mask", "=", "self", ".", "J_mask_mapping", "[", "J_usage_mask_in", "]", "except", "KeyError", ":", "#Do raise error here as the mask will be empty", "if", "print_warnings", ":", "print", "'Unfamiliar J usage mask: '", "+", "str", "(", "J_usage_mask_in", ")", "+", "', please check the allowed J alleles. Using default J_usage_mask'", "J_usage_mask", "=", "self", ".", "d_J_usage_mask", "return", "V_usage_mask", ",", "J_usage_mask" ]
Format raw usage masks into lists of indices. Usage masks allows the Pgen computation to be conditioned on the V and J gene/allele identities. The inputted masks are lists of strings, or a single string, of the names of the genes or alleles to be conditioned on. The default mask includes all productive V or J genes. Parameters ---------- V_usage_mask_in : str or list An object to indicate which V alleles should be considered. The default input is None which returns the list of all productive V alleles. J_usage_mask_in : str or list An object to indicate which J alleles should be considered. The default input is None which returns the list of all productive J alleles. print_warnings : bool Determines whether warnings are printed or not. Default ON. Returns ------- V_usage_mask : list of integers Indices of the V alleles to be considered in the Pgen computation J_usage_mask : list of integers Indices of the J alleles to be considered in the Pgen computation Examples -------- >>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01') ([34], [0]) >>> generation_probability.format_usage_masks('TRBV27*01', '') ([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13]) >>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01') ([34, 18], [0])
[ "Format", "raw", "usage", "masks", "into", "lists", "of", "indices", ".", "Usage", "masks", "allows", "the", "Pgen", "computation", "to", "be", "conditioned", "on", "the", "V", "and", "J", "gene", "/", "allele", "identities", ".", "The", "inputted", "masks", "are", "lists", "of", "strings", "or", "a", "single", "string", "of", "the", "names", "of", "the", "genes", "or", "alleles", "to", "be", "conditioned", "on", ".", "The", "default", "mask", "includes", "all", "productive", "V", "or", "J", "genes", ".", "Parameters", "----------", "V_usage_mask_in", ":", "str", "or", "list", "An", "object", "to", "indicate", "which", "V", "alleles", "should", "be", "considered", ".", "The", "default", "input", "is", "None", "which", "returns", "the", "list", "of", "all", "productive", "V", "alleles", ".", "J_usage_mask_in", ":", "str", "or", "list", "An", "object", "to", "indicate", "which", "J", "alleles", "should", "be", "considered", ".", "The", "default", "input", "is", "None", "which", "returns", "the", "list", "of", "all", "productive", "J", "alleles", ".", "print_warnings", ":", "bool", "Determines", "whether", "warnings", "are", "printed", "or", "not", ".", "Default", "ON", ".", "Returns", "-------", "V_usage_mask", ":", "list", "of", "integers", "Indices", "of", "the", "V", "alleles", "to", "be", "considered", "in", "the", "Pgen", "computation", "J_usage_mask", ":", "list", "of", "integers", "Indices", "of", "the", "J", "alleles", "to", "be", "considered", "in", "the", "Pgen", "computation", "Examples", "--------", ">>>", "generation_probability", ".", "format_usage_masks", "(", "TRBV27", "*", "01", "TRBJ1", "-", "1", "*", "01", ")", "(", "[", "34", "]", "[", "0", "]", ")", ">>>", "generation_probability", ".", "format_usage_masks", "(", "TRBV27", "*", "01", ")", "(", "[", "34", "]", "[", "0", "1", "2", "3", "4", "7", "8", "9", "10", "11", "12", "13", "]", ")", ">>>", "generation_probability", ".", "format_usage_masks", "(", "[", "TRBV27", "*", "01", "TRBV13", "*", "01", "]", "TRBJ1", "-", "1", "*", "01", ")", "(", "[", "34", "18", "]", "[", "0", "]", ")" ]
python
train
blockstack/blockstack-core
blockstack/lib/client.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L648-L717
def get_atlas_peers(hostport, timeout=30, my_hostport=None, proxy=None): """ Get an atlas peer's neighbors. Return {'status': True, 'peers': [peers]} on success. Return {'error': ...} on error """ assert hostport or proxy, 'need either hostport or proxy' peers_schema = { 'type': 'object', 'properties': { 'peers': { 'type': 'array', 'items': { 'type': 'string', 'pattern': '^([^:]+):([1-9][0-9]{1,4})$', }, }, }, 'required': [ 'peers' ], } schema = json_response_schema( peers_schema ) if proxy is None: proxy = connect_hostport(hostport) peers = None try: peer_list_resp = proxy.get_atlas_peers() peer_list_resp = json_validate(schema, peer_list_resp) if json_is_error(peer_list_resp): return peer_list_resp # verify that all strings are host:ports for peer_hostport in peer_list_resp['peers']: peer_host, peer_port = url_to_host_port(peer_hostport) if peer_host is None or peer_port is None: return {'error': 'Server did not return valid Atlas peers', 'http_status': 503} peers = peer_list_resp except ValidationError as ve: if BLOCKSTACK_DEBUG: log.exception(ve) resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502} return resp except socket.timeout: log.error("Connection timed out") resp = {'error': 'Connection to remote host timed out.', 'http_status': 503} return resp except socket.error as se: log.error("Connection error {}".format(se.errno)) resp = {'error': 'Connection to remote host failed.', 'http_status': 502} return resp except Exception as ee: if BLOCKSTACK_DEBUG: log.exception(ee) log.error("Caught exception while connecting to Blockstack node: {}".format(ee)) resp = {'error': 'Failed to contact Blockstack node {}. Try again with `--debug`.'.format(hostport), 'http_status': 500} return resp return peers
[ "def", "get_atlas_peers", "(", "hostport", ",", "timeout", "=", "30", ",", "my_hostport", "=", "None", ",", "proxy", "=", "None", ")", ":", "assert", "hostport", "or", "proxy", ",", "'need either hostport or proxy'", "peers_schema", "=", "{", "'type'", ":", "'object'", ",", "'properties'", ":", "{", "'peers'", ":", "{", "'type'", ":", "'array'", ",", "'items'", ":", "{", "'type'", ":", "'string'", ",", "'pattern'", ":", "'^([^:]+):([1-9][0-9]{1,4})$'", ",", "}", ",", "}", ",", "}", ",", "'required'", ":", "[", "'peers'", "]", ",", "}", "schema", "=", "json_response_schema", "(", "peers_schema", ")", "if", "proxy", "is", "None", ":", "proxy", "=", "connect_hostport", "(", "hostport", ")", "peers", "=", "None", "try", ":", "peer_list_resp", "=", "proxy", ".", "get_atlas_peers", "(", ")", "peer_list_resp", "=", "json_validate", "(", "schema", ",", "peer_list_resp", ")", "if", "json_is_error", "(", "peer_list_resp", ")", ":", "return", "peer_list_resp", "# verify that all strings are host:ports", "for", "peer_hostport", "in", "peer_list_resp", "[", "'peers'", "]", ":", "peer_host", ",", "peer_port", "=", "url_to_host_port", "(", "peer_hostport", ")", "if", "peer_host", "is", "None", "or", "peer_port", "is", "None", ":", "return", "{", "'error'", ":", "'Server did not return valid Atlas peers'", ",", "'http_status'", ":", "503", "}", "peers", "=", "peer_list_resp", "except", "ValidationError", "as", "ve", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "ve", ")", "resp", "=", "{", "'error'", ":", "'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "socket", ".", "timeout", ":", "log", ".", "error", "(", "\"Connection timed out\"", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host timed out.'", ",", "'http_status'", ":", "503", "}", "return", "resp", "except", "socket", ".", "error", "as", "se", ":", "log", ".", "error", "(", "\"Connection error {}\"", ".", "format", "(", "se", ".", "errno", ")", ")", "resp", "=", "{", "'error'", ":", "'Connection to remote host failed.'", ",", "'http_status'", ":", "502", "}", "return", "resp", "except", "Exception", "as", "ee", ":", "if", "BLOCKSTACK_DEBUG", ":", "log", ".", "exception", "(", "ee", ")", "log", ".", "error", "(", "\"Caught exception while connecting to Blockstack node: {}\"", ".", "format", "(", "ee", ")", ")", "resp", "=", "{", "'error'", ":", "'Failed to contact Blockstack node {}. Try again with `--debug`.'", ".", "format", "(", "hostport", ")", ",", "'http_status'", ":", "500", "}", "return", "resp", "return", "peers" ]
Get an atlas peer's neighbors. Return {'status': True, 'peers': [peers]} on success. Return {'error': ...} on error
[ "Get", "an", "atlas", "peer", "s", "neighbors", ".", "Return", "{", "status", ":", "True", "peers", ":", "[", "peers", "]", "}", "on", "success", ".", "Return", "{", "error", ":", "...", "}", "on", "error" ]
python
train
jor-/util
util/cache/memory.py
https://github.com/jor-/util/blob/0eb0be84430f88885f4d48335596ca8881f85587/util/cache/memory.py#L35-L51
def attribute_dependend_key(key_function, *dependencies): """Return a cache key for the specified hashable arguments with additional dependent arguments.""" def dependend_key_function(self, *args, **kwargs): key = hash_key(*args, **kwargs) if len(dependencies) > 0: dependecies_dict = {} for dependency in dependencies: value = eval(dependency) dependecies_dict[dependency] = value key = key + cachetools.keys._HashedTuple(_dependency_mark + tuple(itertools.chain(sorted(dependecies_dict.items())))) return key return dependend_key_function
[ "def", "attribute_dependend_key", "(", "key_function", ",", "*", "dependencies", ")", ":", "def", "dependend_key_function", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "hash_key", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "dependencies", ")", ">", "0", ":", "dependecies_dict", "=", "{", "}", "for", "dependency", "in", "dependencies", ":", "value", "=", "eval", "(", "dependency", ")", "dependecies_dict", "[", "dependency", "]", "=", "value", "key", "=", "key", "+", "cachetools", ".", "keys", ".", "_HashedTuple", "(", "_dependency_mark", "+", "tuple", "(", "itertools", ".", "chain", "(", "sorted", "(", "dependecies_dict", ".", "items", "(", ")", ")", ")", ")", ")", "return", "key", "return", "dependend_key_function" ]
Return a cache key for the specified hashable arguments with additional dependent arguments.
[ "Return", "a", "cache", "key", "for", "the", "specified", "hashable", "arguments", "with", "additional", "dependent", "arguments", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/hardening/host/checks/minimize_access.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/hardening/host/checks/minimize_access.py#L22-L50
def get_audits(): """Get OS hardening access audits. :returns: dictionary of audits """ audits = [] settings = utils.get_settings('os') # Remove write permissions from $PATH folders for all regular users. # This prevents changing system-wide commands from normal users. path_folders = {'/usr/local/sbin', '/usr/local/bin', '/usr/sbin', '/usr/bin', '/bin'} extra_user_paths = settings['environment']['extra_user_paths'] path_folders.update(extra_user_paths) audits.append(ReadOnly(path_folders)) # Only allow the root user to have access to the shadow file. audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) if 'change_user' not in settings['security']['users_allow']: # su should only be accessible to user and group root, unless it is # expressly defined to allow users to change to root via the # security_users_allow config option. audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) return audits
[ "def", "get_audits", "(", ")", ":", "audits", "=", "[", "]", "settings", "=", "utils", ".", "get_settings", "(", "'os'", ")", "# Remove write permissions from $PATH folders for all regular users.", "# This prevents changing system-wide commands from normal users.", "path_folders", "=", "{", "'/usr/local/sbin'", ",", "'/usr/local/bin'", ",", "'/usr/sbin'", ",", "'/usr/bin'", ",", "'/bin'", "}", "extra_user_paths", "=", "settings", "[", "'environment'", "]", "[", "'extra_user_paths'", "]", "path_folders", ".", "update", "(", "extra_user_paths", ")", "audits", ".", "append", "(", "ReadOnly", "(", "path_folders", ")", ")", "# Only allow the root user to have access to the shadow file.", "audits", ".", "append", "(", "FilePermissionAudit", "(", "'/etc/shadow'", ",", "'root'", ",", "'root'", ",", "0o0600", ")", ")", "if", "'change_user'", "not", "in", "settings", "[", "'security'", "]", "[", "'users_allow'", "]", ":", "# su should only be accessible to user and group root, unless it is", "# expressly defined to allow users to change to root via the", "# security_users_allow config option.", "audits", ".", "append", "(", "FilePermissionAudit", "(", "'/bin/su'", ",", "'root'", ",", "'root'", ",", "0o750", ")", ")", "return", "audits" ]
Get OS hardening access audits. :returns: dictionary of audits
[ "Get", "OS", "hardening", "access", "audits", "." ]
python
train
siemens/django-dingos
dingos/import_handling.py
https://github.com/siemens/django-dingos/blob/7154f75b06d2538568e2f2455a76f3d0db0b7d70/dingos/import_handling.py#L318-L766
def xml_import(self, xml_fname=None, xml_content=None, ns_mapping=None, embedded_predicate=None, id_and_revision_extractor=None, extract_empty_embedded=False, keep_attrs_in_created_reference=True, transformer=None): """ This is the generic XML import function for dingos. Its parameters are as follows: - xml_fname: Filename of the XML file to be read - xml_content: Alternatively, the xml_content can be provided as string or as XMLNode (i.e., a piece of XML that has already been parsed) - ns_mapping: A dictionary that may already contain mappings of namespaces to namespace URIs. Attention: this dictionary will be enriched with namespace information found in the XML file!!! - embedded_predicate: A function that, when given an XML node and a child node, determines whether the child node should be treated as separate entity that has been embedded. Please refer to existing import modules such as for STIX or OpenIOC for examples of how to use this parameter. - id_and_revision_extractor: A function that, when given an XML node and a child node, determines whether this node specifies an identifier and possibly a timestamp. Please refer to existing import modules such as for STIX or OpenIOC for examples of how to use this parameter. - extract_empty_embedded: A flag (True/False) governing whether elements that are recognized as being embedded but contain no childs should be extracted as separate object or not. The default is False; the setting "True" may be necessary in cases where there are embedded objects that contain all its information in attributes rather than using child elements. - keep_attrs_in_created_reference: A flag (True/False) governing the shape of the reference created for an embedded object: when an embedding is recognized, it is extracted and a reference using 'idref' inserted instead. If 'keep_attrs_in_created_reference' is True, then the top-level attributes contained in the found embedded object are also retained in the reference. - transformer: A function that, when given an element name and a DingoObjDict containing the result of importing the contents under the element of the given name, may or may not change the element name and transform the DingoObjDict. Please refer to existing import MANTIS modules such as for OpenIOC for examples of how to use this parameter. Note: a good starting point for understanding how to use the python bindings of libxml2 is http://mikekneller.com/kb/python/libxml2python/part1. """ generated_id_count = {} # Fill defaults if not ns_mapping: nas_mapping = {} if not transformer: transformer = lambda x, y: (x, y) # We use the _import_pending_stack to hold extracted embedded objects # that still need to be processed _import_pending_stack = deque() # We collect the read embedded objects in the following list embedded_objects = deque() def xml_import_(element, depth=0, type_info=None, inherited_id_and_rev_info=None): """ Recursive import function """ if not inherited_id_and_rev_info: inherited_id_and_rev_info = main_id_and_rev_info.copy() fresh_inherited_id_and_rev_info = inherited_id_and_rev_info.copy() if element.name == 'comment': return None #try: # namespace = element.ns() # ns_mapping[namespace.name]=namespace.content #except: # pass result = DingoObjDict() # Add properties to result dictionary for this element if element.properties: for prop in element.properties: if not prop: break if prop.type == 'attribute': try: result["@%s:%s" % (prop.ns().name, prop.name)] = prop.content except: result["@%s" % prop.name] = prop.content # see if there is a namespace try: ns = element.ns().name result["@@ns"] = ns except: pass # prepare list for keeping resulting dictionaries of child elements element_dicts = [] # While looking at the child-elements, we have to keep track # of certain data. # Firstly: keep track whether we have seen text content that is not whitespace -- # if that happens, we know that this element contains mixed # content and we will back off and just dump the element contents # as is as value into the dictionary. non_ws_content = False # Secondly: If the element contains cdata stuff, we will see # that one (well, the only) child has type cdata. So if # we find such a child, we set the flag cdata_content = False # Thirdly: we keep track of how many different child-element-names # we have seen. # # - If we find that we have exactly one distinct name, # we will generate a dictionary of form # {<Element_Name> : [ <list of child elemen dictionaries> ]} # - If we find that we have as many distinct names as we # child elements, we create a dictionary mapping each child element # name to its dictionary representation # - If we find that we have less child element names than # we have children, we know that at least one name # occured more than once. Our dictionary representation cannot # deal with that, and we back off and dump the contents as they # are marked as 'xml' content with the '@@type' attribute. name_set = {} previous_seen_child = None double_occurrance = False element_child_count = 0 child = element.children while child is not None: #if child_name=='comment': # pass if child.name == 'text': # If we have non-whitespace content in one of the children, # we set the non_ws_content flag content = child.content.strip() if content != "": non_ws_content = True elif child.type == 'cdata': logger.debug("!!!!FOUND CDATA") # If one of the children (actually, it should be the only child) # has type cdata, we know that the parent element contains cdata # and set the cdata_content flag accordingly cdata_content = True else: # we have found an element, so we recurse into it. element_child_count += 1 if previous_seen_child and (child.name in name_set) and (not child.name == previous_seen_child): double_occurrance = True name_set[child.name] = None if embedded_predicate: embedded_ns = embedded_predicate(element, child, ns_mapping) logger.debug("Embedded ns is %s" % embedded_ns) if embedded_ns: inherited_id_and_rev_info = fresh_inherited_id_and_rev_info.copy() # There is an embedded object. We therefore # replace the contents of the element with an element # containing an idref (and, since we might need them, # all attributes of the embedded element) if type(embedded_ns) == type({}): # If necessary, the embedded_predicate can return more information # than namespace information, namely we can can hand down # id and revision info that has been derived wenn the embedding # was detected. For backward compatibility, # we further allow returning of a string; if, however, # a dictionary is returned, there is id_and_revision_info. id_and_revision_info = embedded_ns.get('id_and_revision_info', id_and_revision_extractor(child)) embedded_ns = embedded_ns.get('embedded_ns',None) else: id_and_revision_info = id_and_revision_extractor(child) # See whether stuff needs to be inherited if not 'id' in id_and_revision_info or not id_and_revision_info['id']: if 'id' in inherited_id_and_rev_info: parent_id = inherited_id_and_rev_info['id'] if parent_id in generated_id_count: gen_counter = generated_id_count[parent_id] gen_counter +=1 else: gen_counter = 0 generated_id_count[parent_id] = gen_counter (parent_namespace, parent_uid) = parent_id.split(':') generated_id = "%s:emb%s-in-%s" % (parent_namespace,gen_counter,parent_uid) logger.info("Found embedded %s without id and generated id %s" % (element.name,generated_id)) id_and_revision_info['id'] = generated_id id_and_revision_info['id_inherited'] = True else: logger.error("Attempt to import object (element name %s) without id -- object is ignored" % elt_name) #cybox_id = gen_cybox_id(iobject_type_name) if not id_and_revision_info.get('timestamp', None): if inherited_id_and_rev_info and 'timestamp' in inherited_id_and_rev_info: id_and_revision_info['timestamp'] = inherited_id_and_rev_info['timestamp'] id_and_revision_info['ts_inherited'] = True else: inherited_id_and_rev_info['timestamp'] = id_and_revision_info['timestamp'] if 'id' in id_and_revision_info: # If the identifier has no namespace info (this may occur, e.g. for # embedded OpenIOC in STIX, we take the namespace inherited from the # embedding object if (not ':' in id_and_revision_info['id'] and inherited_id_and_rev_info['id'] and ':' in inherited_id_and_rev_info['id']): id_and_revision_info['id'] = "%s:%s" % (inherited_id_and_rev_info['id'].split(':')[0], id_and_revision_info['id']) id_and_revision_info['ns_inherited'] = True inherited_id_and_rev_info['id'] = id_and_revision_info['id'] if keep_attrs_in_created_reference: reference_dict = extract_attributes(child, prefix_key_char='@', dict_constructor=DingoObjDict) else: reference_dict = DingoObjDict() reference_dict['@idref'] = id_and_revision_info['id'] reference_dict['@@timestamp'] = id_and_revision_info['timestamp'] try: reference_dict['@@ns'] = child.ns().name except: reference_dict['@@ns'] = None if embedded_ns == True: embedded_ns = None logger.debug("Setting embedded type info to %s" % embedded_ns) reference_dict['@@embedded_type_info'] = embedded_ns element_dicts.append((child.name, reference_dict)) if (child.children or child.content) \ or extract_empty_embedded \ or 'extract_empty_embedded' in id_and_revision_info: id_and_revision_info['inherited'] = fresh_inherited_id_and_rev_info.copy() if 'inherited' in id_and_revision_info['inherited']: for key in id_and_revision_info['inherited']['inherited']: if not key in id_and_revision_info['inherited']: id_and_revision_info['inherited'][key] = id_and_revision_info['inherited']['inherited'][key] del(id_and_revision_info['inherited']['inherited']) logger.debug( "Adding XML subtree starting with element %s and type info %s to pending stack." % ( id_and_revision_info, embedded_ns)) _import_pending_stack.append((id_and_revision_info, embedded_ns, child)) else: # For example, in cybox 1.0, the following occurs:: # <EmailMessageObj:File xsi:type="FileObj:FileObjectType" object_reference="cybox:object-3cf6a958-5c3f-11e2-a06c-0050569761d3"/> # This is only a reference and may not be confused with the definition of the object, # which occurs someplace else -- otherwise, the (almost) empty reference is created as object # and may overwrite the object resulting from the real definition. logger.info( "Not adding element %s with type info %s to pending stack because element is empty." % ( id_and_revision_info, embedded_ns)) else: child_import = xml_import_(child, depth + 1, inherited_id_and_rev_info=inherited_id_and_rev_info) if child_import: element_dicts.append(child_import) else: child_import = xml_import_(child, depth + 1, inherited_id_and_rev_info=inherited_id_and_rev_info) if child_import: element_dicts.append(child_import) child = child.next # now, we decide what to do with this node distinct_child_count = len(name_set.keys()) if distinct_child_count == 0: # No child elements were detected, so we dump the content into # the value result['_value'] = element.content if cdata_content: # If this is a cdata element, we mark it as such result['@@content_type'] = 'cdata' elif non_ws_content == True: # We have mixed content, so we dump it sub_child = element.children serialization = '' while sub_child: serialization += sub_child.serialize() sub_child = sub_child.next result['_value'] = serialization.strip() #result['_value']=element.serialize() result['@@content_type'] = 'mixed' elif double_occurrance: # distinct_child_count >1 and (distinct_child_count) < element_child_count: # We have a structure our dictionary representation cannot # deal with -- so we dump it logger.warning("Cannot deal with XML structure of %s (children %s, count %s): will dump to value" % ( element.name, name_set.keys(), element_child_count)) sub_child = element.children serialization = '' while sub_child: serialization += sub_child.serialize() sub_child = sub_child.next result['_value'] = serialization.strip() #result['_value']=element.serialize() result['@@content_type'] = 'xml' else: previously_written_name = None for (name, element_dict) in element_dicts: if not previously_written_name or name != previously_written_name: result[name] = element_dict previously_written_name = name else: # if name == previously_written_name: if type(result[name]) == type([]): result[name].append(element_dict) else: result[name] = [result[name], element_dict] if type_info: result['@@embedded_type_info'] = type_info element_ns = None try: element_ns = element.ns().name except: pass return transformer(element.name, result) if xml_content: if isinstance(xml_content,libxml2.xmlNode): root = xml_content else: doc = libxml2.parseDoc(xml_content) root = doc.getRootElement() else: doc = libxml2.recoverFile(xml_fname) root = doc.getRootElement() with open(xml_fname, 'r') as content_file: xml_content = content_file.read() # Extract namespace information (if any) try: ns_def = root.nsDefs() while ns_def: ns_mapping[ns_def.name] = ns_def.content ns_def = ns_def.next except: pass # Extract ID and timestamp for root element main_id_and_rev_info = id_and_revision_extractor(root) # Call the internal recursive function. This returns # - name of the top-level element # - DingoObjDict resulting from import # As side effect, it pushes the XML nodes of # found embedded objects onto the pending stack (main_elt_name, main_elt_dict) = xml_import_(root, 0) # We now go through the pending stack. # For each found embedded object, xml_import_ pushes # the following triple on the stack: # - id_and_revision_info: A dictionary, containing # identifier and (possibly) timestamp information # for that object # - type_info: Information about the type of the # embedded object (can be None) # - the XML node that describes the embedded object do_not_process_list = [] while _import_pending_stack: (id_and_revision_info, type_info, elt) = _import_pending_stack.pop() if 'defer_processing' in id_and_revision_info: do_not_process_list.append((id_and_revision_info,type_info,elt)) else: (elt_name, elt_dict) = xml_import_(elt, 0, type_info=type_info, inherited_id_and_rev_info=id_and_revision_info.copy()) embedded_objects.append({'id_and_rev_info': id_and_revision_info, 'elt_name': elt_name, 'dict_repr': elt_dict}) result= {'id_and_rev_info': main_id_and_rev_info, 'elt_name': main_elt_name, 'dict_repr': main_elt_dict, 'embedded_objects': embedded_objects, 'unprocessed' : do_not_process_list, 'file_content': xml_content} #pp.pprint(result) return result
[ "def", "xml_import", "(", "self", ",", "xml_fname", "=", "None", ",", "xml_content", "=", "None", ",", "ns_mapping", "=", "None", ",", "embedded_predicate", "=", "None", ",", "id_and_revision_extractor", "=", "None", ",", "extract_empty_embedded", "=", "False", ",", "keep_attrs_in_created_reference", "=", "True", ",", "transformer", "=", "None", ")", ":", "generated_id_count", "=", "{", "}", "# Fill defaults", "if", "not", "ns_mapping", ":", "nas_mapping", "=", "{", "}", "if", "not", "transformer", ":", "transformer", "=", "lambda", "x", ",", "y", ":", "(", "x", ",", "y", ")", "# We use the _import_pending_stack to hold extracted embedded objects", "# that still need to be processed", "_import_pending_stack", "=", "deque", "(", ")", "# We collect the read embedded objects in the following list", "embedded_objects", "=", "deque", "(", ")", "def", "xml_import_", "(", "element", ",", "depth", "=", "0", ",", "type_info", "=", "None", ",", "inherited_id_and_rev_info", "=", "None", ")", ":", "\"\"\"\n Recursive import function\n \"\"\"", "if", "not", "inherited_id_and_rev_info", ":", "inherited_id_and_rev_info", "=", "main_id_and_rev_info", ".", "copy", "(", ")", "fresh_inherited_id_and_rev_info", "=", "inherited_id_and_rev_info", ".", "copy", "(", ")", "if", "element", ".", "name", "==", "'comment'", ":", "return", "None", "#try:", "# namespace = element.ns()", "# ns_mapping[namespace.name]=namespace.content", "#except:", "# pass", "result", "=", "DingoObjDict", "(", ")", "# Add properties to result dictionary for this element", "if", "element", ".", "properties", ":", "for", "prop", "in", "element", ".", "properties", ":", "if", "not", "prop", ":", "break", "if", "prop", ".", "type", "==", "'attribute'", ":", "try", ":", "result", "[", "\"@%s:%s\"", "%", "(", "prop", ".", "ns", "(", ")", ".", "name", ",", "prop", ".", "name", ")", "]", "=", "prop", ".", "content", "except", ":", "result", "[", "\"@%s\"", "%", "prop", ".", "name", "]", "=", "prop", ".", "content", "# see if there is a namespace", "try", ":", "ns", "=", "element", ".", "ns", "(", ")", ".", "name", "result", "[", "\"@@ns\"", "]", "=", "ns", "except", ":", "pass", "# prepare list for keeping resulting dictionaries of child elements", "element_dicts", "=", "[", "]", "# While looking at the child-elements, we have to keep track", "# of certain data.", "# Firstly: keep track whether we have seen text content that is not whitespace --", "# if that happens, we know that this element contains mixed", "# content and we will back off and just dump the element contents", "# as is as value into the dictionary.", "non_ws_content", "=", "False", "# Secondly: If the element contains cdata stuff, we will see", "# that one (well, the only) child has type cdata. So if", "# we find such a child, we set the flag", "cdata_content", "=", "False", "# Thirdly: we keep track of how many different child-element-names", "# we have seen. ", "#", "# - If we find that we have exactly one distinct name,", "# we will generate a dictionary of form", "# {<Element_Name> : [ <list of child elemen dictionaries> ]}", "# - If we find that we have as many distinct names as we", "# child elements, we create a dictionary mapping each child element", "# name to its dictionary representation", "# - If we find that we have less child element names than", "# we have children, we know that at least one name", "# occured more than once. Our dictionary representation cannot", "# deal with that, and we back off and dump the contents as they", "# are marked as 'xml' content with the '@@type' attribute.", "name_set", "=", "{", "}", "previous_seen_child", "=", "None", "double_occurrance", "=", "False", "element_child_count", "=", "0", "child", "=", "element", ".", "children", "while", "child", "is", "not", "None", ":", "#if child_name=='comment':", "# pass", "if", "child", ".", "name", "==", "'text'", ":", "# If we have non-whitespace content in one of the children,", "# we set the non_ws_content flag", "content", "=", "child", ".", "content", ".", "strip", "(", ")", "if", "content", "!=", "\"\"", ":", "non_ws_content", "=", "True", "elif", "child", ".", "type", "==", "'cdata'", ":", "logger", ".", "debug", "(", "\"!!!!FOUND CDATA\"", ")", "# If one of the children (actually, it should be the only child)", "# has type cdata, we know that the parent element contains cdata", "# and set the cdata_content flag accordingly", "cdata_content", "=", "True", "else", ":", "# we have found an element, so we recurse into it.", "element_child_count", "+=", "1", "if", "previous_seen_child", "and", "(", "child", ".", "name", "in", "name_set", ")", "and", "(", "not", "child", ".", "name", "==", "previous_seen_child", ")", ":", "double_occurrance", "=", "True", "name_set", "[", "child", ".", "name", "]", "=", "None", "if", "embedded_predicate", ":", "embedded_ns", "=", "embedded_predicate", "(", "element", ",", "child", ",", "ns_mapping", ")", "logger", ".", "debug", "(", "\"Embedded ns is %s\"", "%", "embedded_ns", ")", "if", "embedded_ns", ":", "inherited_id_and_rev_info", "=", "fresh_inherited_id_and_rev_info", ".", "copy", "(", ")", "# There is an embedded object. We therefore", "# replace the contents of the element with an element", "# containing an idref (and, since we might need them,", "# all attributes of the embedded element)", "if", "type", "(", "embedded_ns", ")", "==", "type", "(", "{", "}", ")", ":", "# If necessary, the embedded_predicate can return more information", "# than namespace information, namely we can can hand down", "# id and revision info that has been derived wenn the embedding", "# was detected. For backward compatibility,", "# we further allow returning of a string; if, however,", "# a dictionary is returned, there is id_and_revision_info.", "id_and_revision_info", "=", "embedded_ns", ".", "get", "(", "'id_and_revision_info'", ",", "id_and_revision_extractor", "(", "child", ")", ")", "embedded_ns", "=", "embedded_ns", ".", "get", "(", "'embedded_ns'", ",", "None", ")", "else", ":", "id_and_revision_info", "=", "id_and_revision_extractor", "(", "child", ")", "# See whether stuff needs to be inherited", "if", "not", "'id'", "in", "id_and_revision_info", "or", "not", "id_and_revision_info", "[", "'id'", "]", ":", "if", "'id'", "in", "inherited_id_and_rev_info", ":", "parent_id", "=", "inherited_id_and_rev_info", "[", "'id'", "]", "if", "parent_id", "in", "generated_id_count", ":", "gen_counter", "=", "generated_id_count", "[", "parent_id", "]", "gen_counter", "+=", "1", "else", ":", "gen_counter", "=", "0", "generated_id_count", "[", "parent_id", "]", "=", "gen_counter", "(", "parent_namespace", ",", "parent_uid", ")", "=", "parent_id", ".", "split", "(", "':'", ")", "generated_id", "=", "\"%s:emb%s-in-%s\"", "%", "(", "parent_namespace", ",", "gen_counter", ",", "parent_uid", ")", "logger", ".", "info", "(", "\"Found embedded %s without id and generated id %s\"", "%", "(", "element", ".", "name", ",", "generated_id", ")", ")", "id_and_revision_info", "[", "'id'", "]", "=", "generated_id", "id_and_revision_info", "[", "'id_inherited'", "]", "=", "True", "else", ":", "logger", ".", "error", "(", "\"Attempt to import object (element name %s) without id -- object is ignored\"", "%", "elt_name", ")", "#cybox_id = gen_cybox_id(iobject_type_name)", "if", "not", "id_and_revision_info", ".", "get", "(", "'timestamp'", ",", "None", ")", ":", "if", "inherited_id_and_rev_info", "and", "'timestamp'", "in", "inherited_id_and_rev_info", ":", "id_and_revision_info", "[", "'timestamp'", "]", "=", "inherited_id_and_rev_info", "[", "'timestamp'", "]", "id_and_revision_info", "[", "'ts_inherited'", "]", "=", "True", "else", ":", "inherited_id_and_rev_info", "[", "'timestamp'", "]", "=", "id_and_revision_info", "[", "'timestamp'", "]", "if", "'id'", "in", "id_and_revision_info", ":", "# If the identifier has no namespace info (this may occur, e.g. for", "# embedded OpenIOC in STIX, we take the namespace inherited from the", "# embedding object", "if", "(", "not", "':'", "in", "id_and_revision_info", "[", "'id'", "]", "and", "inherited_id_and_rev_info", "[", "'id'", "]", "and", "':'", "in", "inherited_id_and_rev_info", "[", "'id'", "]", ")", ":", "id_and_revision_info", "[", "'id'", "]", "=", "\"%s:%s\"", "%", "(", "inherited_id_and_rev_info", "[", "'id'", "]", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "id_and_revision_info", "[", "'id'", "]", ")", "id_and_revision_info", "[", "'ns_inherited'", "]", "=", "True", "inherited_id_and_rev_info", "[", "'id'", "]", "=", "id_and_revision_info", "[", "'id'", "]", "if", "keep_attrs_in_created_reference", ":", "reference_dict", "=", "extract_attributes", "(", "child", ",", "prefix_key_char", "=", "'@'", ",", "dict_constructor", "=", "DingoObjDict", ")", "else", ":", "reference_dict", "=", "DingoObjDict", "(", ")", "reference_dict", "[", "'@idref'", "]", "=", "id_and_revision_info", "[", "'id'", "]", "reference_dict", "[", "'@@timestamp'", "]", "=", "id_and_revision_info", "[", "'timestamp'", "]", "try", ":", "reference_dict", "[", "'@@ns'", "]", "=", "child", ".", "ns", "(", ")", ".", "name", "except", ":", "reference_dict", "[", "'@@ns'", "]", "=", "None", "if", "embedded_ns", "==", "True", ":", "embedded_ns", "=", "None", "logger", ".", "debug", "(", "\"Setting embedded type info to %s\"", "%", "embedded_ns", ")", "reference_dict", "[", "'@@embedded_type_info'", "]", "=", "embedded_ns", "element_dicts", ".", "append", "(", "(", "child", ".", "name", ",", "reference_dict", ")", ")", "if", "(", "child", ".", "children", "or", "child", ".", "content", ")", "or", "extract_empty_embedded", "or", "'extract_empty_embedded'", "in", "id_and_revision_info", ":", "id_and_revision_info", "[", "'inherited'", "]", "=", "fresh_inherited_id_and_rev_info", ".", "copy", "(", ")", "if", "'inherited'", "in", "id_and_revision_info", "[", "'inherited'", "]", ":", "for", "key", "in", "id_and_revision_info", "[", "'inherited'", "]", "[", "'inherited'", "]", ":", "if", "not", "key", "in", "id_and_revision_info", "[", "'inherited'", "]", ":", "id_and_revision_info", "[", "'inherited'", "]", "[", "key", "]", "=", "id_and_revision_info", "[", "'inherited'", "]", "[", "'inherited'", "]", "[", "key", "]", "del", "(", "id_and_revision_info", "[", "'inherited'", "]", "[", "'inherited'", "]", ")", "logger", ".", "debug", "(", "\"Adding XML subtree starting with element %s and type info %s to pending stack.\"", "%", "(", "id_and_revision_info", ",", "embedded_ns", ")", ")", "_import_pending_stack", ".", "append", "(", "(", "id_and_revision_info", ",", "embedded_ns", ",", "child", ")", ")", "else", ":", "# For example, in cybox 1.0, the following occurs::", "# <EmailMessageObj:File xsi:type=\"FileObj:FileObjectType\" object_reference=\"cybox:object-3cf6a958-5c3f-11e2-a06c-0050569761d3\"/>", "# This is only a reference and may not be confused with the definition of the object,", "# which occurs someplace else -- otherwise, the (almost) empty reference is created as object", "# and may overwrite the object resulting from the real definition.", "logger", ".", "info", "(", "\"Not adding element %s with type info %s to pending stack because element is empty.\"", "%", "(", "id_and_revision_info", ",", "embedded_ns", ")", ")", "else", ":", "child_import", "=", "xml_import_", "(", "child", ",", "depth", "+", "1", ",", "inherited_id_and_rev_info", "=", "inherited_id_and_rev_info", ")", "if", "child_import", ":", "element_dicts", ".", "append", "(", "child_import", ")", "else", ":", "child_import", "=", "xml_import_", "(", "child", ",", "depth", "+", "1", ",", "inherited_id_and_rev_info", "=", "inherited_id_and_rev_info", ")", "if", "child_import", ":", "element_dicts", ".", "append", "(", "child_import", ")", "child", "=", "child", ".", "next", "# now, we decide what to do with this node", "distinct_child_count", "=", "len", "(", "name_set", ".", "keys", "(", ")", ")", "if", "distinct_child_count", "==", "0", ":", "# No child elements were detected, so we dump the content into", "# the value", "result", "[", "'_value'", "]", "=", "element", ".", "content", "if", "cdata_content", ":", "# If this is a cdata element, we mark it as such", "result", "[", "'@@content_type'", "]", "=", "'cdata'", "elif", "non_ws_content", "==", "True", ":", "# We have mixed content, so we dump it", "sub_child", "=", "element", ".", "children", "serialization", "=", "''", "while", "sub_child", ":", "serialization", "+=", "sub_child", ".", "serialize", "(", ")", "sub_child", "=", "sub_child", ".", "next", "result", "[", "'_value'", "]", "=", "serialization", ".", "strip", "(", ")", "#result['_value']=element.serialize()", "result", "[", "'@@content_type'", "]", "=", "'mixed'", "elif", "double_occurrance", ":", "# distinct_child_count >1 and (distinct_child_count) < element_child_count:", "# We have a structure our dictionary representation cannot", "# deal with -- so we dump it", "logger", ".", "warning", "(", "\"Cannot deal with XML structure of %s (children %s, count %s): will dump to value\"", "%", "(", "element", ".", "name", ",", "name_set", ".", "keys", "(", ")", ",", "element_child_count", ")", ")", "sub_child", "=", "element", ".", "children", "serialization", "=", "''", "while", "sub_child", ":", "serialization", "+=", "sub_child", ".", "serialize", "(", ")", "sub_child", "=", "sub_child", ".", "next", "result", "[", "'_value'", "]", "=", "serialization", ".", "strip", "(", ")", "#result['_value']=element.serialize()", "result", "[", "'@@content_type'", "]", "=", "'xml'", "else", ":", "previously_written_name", "=", "None", "for", "(", "name", ",", "element_dict", ")", "in", "element_dicts", ":", "if", "not", "previously_written_name", "or", "name", "!=", "previously_written_name", ":", "result", "[", "name", "]", "=", "element_dict", "previously_written_name", "=", "name", "else", ":", "# if name == previously_written_name:", "if", "type", "(", "result", "[", "name", "]", ")", "==", "type", "(", "[", "]", ")", ":", "result", "[", "name", "]", ".", "append", "(", "element_dict", ")", "else", ":", "result", "[", "name", "]", "=", "[", "result", "[", "name", "]", ",", "element_dict", "]", "if", "type_info", ":", "result", "[", "'@@embedded_type_info'", "]", "=", "type_info", "element_ns", "=", "None", "try", ":", "element_ns", "=", "element", ".", "ns", "(", ")", ".", "name", "except", ":", "pass", "return", "transformer", "(", "element", ".", "name", ",", "result", ")", "if", "xml_content", ":", "if", "isinstance", "(", "xml_content", ",", "libxml2", ".", "xmlNode", ")", ":", "root", "=", "xml_content", "else", ":", "doc", "=", "libxml2", ".", "parseDoc", "(", "xml_content", ")", "root", "=", "doc", ".", "getRootElement", "(", ")", "else", ":", "doc", "=", "libxml2", ".", "recoverFile", "(", "xml_fname", ")", "root", "=", "doc", ".", "getRootElement", "(", ")", "with", "open", "(", "xml_fname", ",", "'r'", ")", "as", "content_file", ":", "xml_content", "=", "content_file", ".", "read", "(", ")", "# Extract namespace information (if any)", "try", ":", "ns_def", "=", "root", ".", "nsDefs", "(", ")", "while", "ns_def", ":", "ns_mapping", "[", "ns_def", ".", "name", "]", "=", "ns_def", ".", "content", "ns_def", "=", "ns_def", ".", "next", "except", ":", "pass", "# Extract ID and timestamp for root element", "main_id_and_rev_info", "=", "id_and_revision_extractor", "(", "root", ")", "# Call the internal recursive function. This returns", "# - name of the top-level element", "# - DingoObjDict resulting from import", "# As side effect, it pushes the XML nodes of", "# found embedded objects onto the pending stack", "(", "main_elt_name", ",", "main_elt_dict", ")", "=", "xml_import_", "(", "root", ",", "0", ")", "# We now go through the pending stack.", "# For each found embedded object, xml_import_ pushes", "# the following triple on the stack:", "# - id_and_revision_info: A dictionary, containing", "# identifier and (possibly) timestamp information", "# for that object", "# - type_info: Information about the type of the", "# embedded object (can be None)", "# - the XML node that describes the embedded object", "do_not_process_list", "=", "[", "]", "while", "_import_pending_stack", ":", "(", "id_and_revision_info", ",", "type_info", ",", "elt", ")", "=", "_import_pending_stack", ".", "pop", "(", ")", "if", "'defer_processing'", "in", "id_and_revision_info", ":", "do_not_process_list", ".", "append", "(", "(", "id_and_revision_info", ",", "type_info", ",", "elt", ")", ")", "else", ":", "(", "elt_name", ",", "elt_dict", ")", "=", "xml_import_", "(", "elt", ",", "0", ",", "type_info", "=", "type_info", ",", "inherited_id_and_rev_info", "=", "id_and_revision_info", ".", "copy", "(", ")", ")", "embedded_objects", ".", "append", "(", "{", "'id_and_rev_info'", ":", "id_and_revision_info", ",", "'elt_name'", ":", "elt_name", ",", "'dict_repr'", ":", "elt_dict", "}", ")", "result", "=", "{", "'id_and_rev_info'", ":", "main_id_and_rev_info", ",", "'elt_name'", ":", "main_elt_name", ",", "'dict_repr'", ":", "main_elt_dict", ",", "'embedded_objects'", ":", "embedded_objects", ",", "'unprocessed'", ":", "do_not_process_list", ",", "'file_content'", ":", "xml_content", "}", "#pp.pprint(result)", "return", "result" ]
This is the generic XML import function for dingos. Its parameters are as follows: - xml_fname: Filename of the XML file to be read - xml_content: Alternatively, the xml_content can be provided as string or as XMLNode (i.e., a piece of XML that has already been parsed) - ns_mapping: A dictionary that may already contain mappings of namespaces to namespace URIs. Attention: this dictionary will be enriched with namespace information found in the XML file!!! - embedded_predicate: A function that, when given an XML node and a child node, determines whether the child node should be treated as separate entity that has been embedded. Please refer to existing import modules such as for STIX or OpenIOC for examples of how to use this parameter. - id_and_revision_extractor: A function that, when given an XML node and a child node, determines whether this node specifies an identifier and possibly a timestamp. Please refer to existing import modules such as for STIX or OpenIOC for examples of how to use this parameter. - extract_empty_embedded: A flag (True/False) governing whether elements that are recognized as being embedded but contain no childs should be extracted as separate object or not. The default is False; the setting "True" may be necessary in cases where there are embedded objects that contain all its information in attributes rather than using child elements. - keep_attrs_in_created_reference: A flag (True/False) governing the shape of the reference created for an embedded object: when an embedding is recognized, it is extracted and a reference using 'idref' inserted instead. If 'keep_attrs_in_created_reference' is True, then the top-level attributes contained in the found embedded object are also retained in the reference. - transformer: A function that, when given an element name and a DingoObjDict containing the result of importing the contents under the element of the given name, may or may not change the element name and transform the DingoObjDict. Please refer to existing import MANTIS modules such as for OpenIOC for examples of how to use this parameter. Note: a good starting point for understanding how to use the python bindings of libxml2 is http://mikekneller.com/kb/python/libxml2python/part1.
[ "This", "is", "the", "generic", "XML", "import", "function", "for", "dingos", ".", "Its", "parameters", "are", "as", "follows", ":" ]
python
train
safarijv/sbo-selenium
sbo_selenium/management/commands/selenium.py
https://github.com/safarijv/sbo-selenium/blob/16539f1b17cda18270033db3b64ab25bc05c5664/sbo_selenium/management/commands/selenium.py#L67-L71
def clean(): """Clear out any old screenshots""" screenshot_dir = settings.SELENIUM_SCREENSHOT_DIR if screenshot_dir and os.path.isdir(screenshot_dir): rmtree(screenshot_dir, ignore_errors=True)
[ "def", "clean", "(", ")", ":", "screenshot_dir", "=", "settings", ".", "SELENIUM_SCREENSHOT_DIR", "if", "screenshot_dir", "and", "os", ".", "path", ".", "isdir", "(", "screenshot_dir", ")", ":", "rmtree", "(", "screenshot_dir", ",", "ignore_errors", "=", "True", ")" ]
Clear out any old screenshots
[ "Clear", "out", "any", "old", "screenshots" ]
python
train
soravux/scoop
examples/dependency/sortingnetwork.py
https://github.com/soravux/scoop/blob/d391dfa62f47e49d48328ee9cf08aa114256fd33/examples/dependency/sortingnetwork.py#L88-L116
def draw(self): """Return an ASCII representation of the network.""" str_wires = [["-"]*7 * self.depth] str_wires[0][0] = "0" str_wires[0][1] = " o" str_spaces = [] for i in range(1, self.dimension): str_wires.append(["-"]*7 * self.depth) str_spaces.append([" "]*7 * self.depth) str_wires[i][0] = str(i) str_wires[i][1] = " o" for index, level in enumerate(self): for wire1, wire2 in level: str_wires[wire1][(index+1)*6] = "x" str_wires[wire2][(index+1)*6] = "x" for i in range(wire1, wire2): str_spaces[i][(index+1)*6+1] = "|" for i in range(wire1+1, wire2): str_wires[i][(index+1)*6] = "|" network_draw = "".join(str_wires[0]) for line, space in zip(str_wires[1:], str_spaces): network_draw += "\n" network_draw += "".join(space) network_draw += "\n" network_draw += "".join(line) return network_draw
[ "def", "draw", "(", "self", ")", ":", "str_wires", "=", "[", "[", "\"-\"", "]", "*", "7", "*", "self", ".", "depth", "]", "str_wires", "[", "0", "]", "[", "0", "]", "=", "\"0\"", "str_wires", "[", "0", "]", "[", "1", "]", "=", "\" o\"", "str_spaces", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "self", ".", "dimension", ")", ":", "str_wires", ".", "append", "(", "[", "\"-\"", "]", "*", "7", "*", "self", ".", "depth", ")", "str_spaces", ".", "append", "(", "[", "\" \"", "]", "*", "7", "*", "self", ".", "depth", ")", "str_wires", "[", "i", "]", "[", "0", "]", "=", "str", "(", "i", ")", "str_wires", "[", "i", "]", "[", "1", "]", "=", "\" o\"", "for", "index", ",", "level", "in", "enumerate", "(", "self", ")", ":", "for", "wire1", ",", "wire2", "in", "level", ":", "str_wires", "[", "wire1", "]", "[", "(", "index", "+", "1", ")", "*", "6", "]", "=", "\"x\"", "str_wires", "[", "wire2", "]", "[", "(", "index", "+", "1", ")", "*", "6", "]", "=", "\"x\"", "for", "i", "in", "range", "(", "wire1", ",", "wire2", ")", ":", "str_spaces", "[", "i", "]", "[", "(", "index", "+", "1", ")", "*", "6", "+", "1", "]", "=", "\"|\"", "for", "i", "in", "range", "(", "wire1", "+", "1", ",", "wire2", ")", ":", "str_wires", "[", "i", "]", "[", "(", "index", "+", "1", ")", "*", "6", "]", "=", "\"|\"", "network_draw", "=", "\"\"", ".", "join", "(", "str_wires", "[", "0", "]", ")", "for", "line", ",", "space", "in", "zip", "(", "str_wires", "[", "1", ":", "]", ",", "str_spaces", ")", ":", "network_draw", "+=", "\"\\n\"", "network_draw", "+=", "\"\"", ".", "join", "(", "space", ")", "network_draw", "+=", "\"\\n\"", "network_draw", "+=", "\"\"", ".", "join", "(", "line", ")", "return", "network_draw" ]
Return an ASCII representation of the network.
[ "Return", "an", "ASCII", "representation", "of", "the", "network", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/layers/object_machine_factory.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/object_machine_factory.py#L62-L76
def createObjectMachine(machineType, **kwargs): """ Return an object machine of the appropriate type. @param machineType (str) A supported ObjectMachine type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here. """ if machineType not in ObjectMachineTypes.getTypes(): raise RuntimeError("Unknown model type: " + machineType) return getattr(ObjectMachineTypes, machineType)(**kwargs)
[ "def", "createObjectMachine", "(", "machineType", ",", "*", "*", "kwargs", ")", ":", "if", "machineType", "not", "in", "ObjectMachineTypes", ".", "getTypes", "(", ")", ":", "raise", "RuntimeError", "(", "\"Unknown model type: \"", "+", "machineType", ")", "return", "getattr", "(", "ObjectMachineTypes", ",", "machineType", ")", "(", "*", "*", "kwargs", ")" ]
Return an object machine of the appropriate type. @param machineType (str) A supported ObjectMachine type @param kwargs (dict) Constructor argument for the class that will be instantiated. Keyword parameters specific to each model type should be passed in here.
[ "Return", "an", "object", "machine", "of", "the", "appropriate", "type", "." ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L1208-L1223
def conditional(self, result, obj): '''Check all file item with given conditions.''' fileonly = (self.opt.last_modified_before is not None) or (self.opt.last_modified_after is not None) if obj['is_dir']: if not fileonly: result.append(obj) return if (self.opt.last_modified_before is not None) and obj['last_modified'] >= self.opt.last_modified_before: return if (self.opt.last_modified_after is not None) and obj['last_modified'] <= self.opt.last_modified_after: return result.append(obj)
[ "def", "conditional", "(", "self", ",", "result", ",", "obj", ")", ":", "fileonly", "=", "(", "self", ".", "opt", ".", "last_modified_before", "is", "not", "None", ")", "or", "(", "self", ".", "opt", ".", "last_modified_after", "is", "not", "None", ")", "if", "obj", "[", "'is_dir'", "]", ":", "if", "not", "fileonly", ":", "result", ".", "append", "(", "obj", ")", "return", "if", "(", "self", ".", "opt", ".", "last_modified_before", "is", "not", "None", ")", "and", "obj", "[", "'last_modified'", "]", ">=", "self", ".", "opt", ".", "last_modified_before", ":", "return", "if", "(", "self", ".", "opt", ".", "last_modified_after", "is", "not", "None", ")", "and", "obj", "[", "'last_modified'", "]", "<=", "self", ".", "opt", ".", "last_modified_after", ":", "return", "result", ".", "append", "(", "obj", ")" ]
Check all file item with given conditions.
[ "Check", "all", "file", "item", "with", "given", "conditions", "." ]
python
test
google/openhtf
openhtf/util/data.py
https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/data.py#L42-L47
def pprint_diff(first, second, first_name='first', second_name='second'): """Compare the pprint representation of two objects and yield diff lines.""" return difflib.unified_diff( pprint.pformat(first).splitlines(), pprint.pformat(second).splitlines(), fromfile=first_name, tofile=second_name, lineterm='')
[ "def", "pprint_diff", "(", "first", ",", "second", ",", "first_name", "=", "'first'", ",", "second_name", "=", "'second'", ")", ":", "return", "difflib", ".", "unified_diff", "(", "pprint", ".", "pformat", "(", "first", ")", ".", "splitlines", "(", ")", ",", "pprint", ".", "pformat", "(", "second", ")", ".", "splitlines", "(", ")", ",", "fromfile", "=", "first_name", ",", "tofile", "=", "second_name", ",", "lineterm", "=", "''", ")" ]
Compare the pprint representation of two objects and yield diff lines.
[ "Compare", "the", "pprint", "representation", "of", "two", "objects", "and", "yield", "diff", "lines", "." ]
python
train
mitsei/dlkit
dlkit/handcar/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/objects.py#L837-L858
def set_distribute_compositions(self, distribute_comps=None): """Sets the distribution rights. This sets distribute verbatim to ``true``. :param distribute_comps: right to distribute modifications :type distribute_comps: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_comps`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.* """ if distribute_comps is None: raise NullArgument() metadata = Metadata(**settings.METADATA['distribute_compositions']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(distribute_comps, metadata, array=False): self._my_map['canDistributeCompositions'] = distribute_comps else: raise InvalidArgument()
[ "def", "set_distribute_compositions", "(", "self", ",", "distribute_comps", "=", "None", ")", ":", "if", "distribute_comps", "is", "None", ":", "raise", "NullArgument", "(", ")", "metadata", "=", "Metadata", "(", "*", "*", "settings", ".", "METADATA", "[", "'distribute_compositions'", "]", ")", "if", "metadata", ".", "is_read_only", "(", ")", ":", "raise", "NoAccess", "(", ")", "if", "self", ".", "_is_valid_input", "(", "distribute_comps", ",", "metadata", ",", "array", "=", "False", ")", ":", "self", ".", "_my_map", "[", "'canDistributeCompositions'", "]", "=", "distribute_comps", "else", ":", "raise", "InvalidArgument", "(", ")" ]
Sets the distribution rights. This sets distribute verbatim to ``true``. :param distribute_comps: right to distribute modifications :type distribute_comps: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_comps`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "distribution", "rights", "." ]
python
train
mfitzp/padua
padua/visualize.py
https://github.com/mfitzp/padua/blob/8b14bf4d2f895da6aea5d7885d409315bd303ec6/padua/visualize.py#L1495-L1605
def kegg_pathway(df, pathway, a, b=None, ids_from="Proteins", cmap=cm.PuOr_r, is_log2=False, fillna=None, z_score=1): """ Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return: """ df = df.copy() if np.any(df.values < 0) and not is_log2: warnings.warn("Input data has negative values. If data is log2 transformed, set is_log2=True.") if fillna is not None: df = df.fillna(fillna) if z_score is None: pass elif z_score == 0: df = (df - df.median(axis=0)) / df.std(axis=0) elif z_score == 1: df = ((df.T - df.median(axis=1).T) / df.std(axis=1).T).T if b is not None: # Calculate ratio between two groups g1, g2 = df[a].values, df[b].values if is_log2: dr = np.nanmean(g2, axis=1) - np.nanmean(g1, axis=1) else: dr = np.log2(np.nanmean(g2, axis=1) / np.nanmean(g1, axis=1)) else: g1 = df[a].values dr = np.nanmean(g1, axis=1) maxi = np.max(abs(dr)) norm = mpl.colors.Normalize(vmin=-maxi, vmax=maxi) mapper = cm.ScalarMappable(norm=norm, cmap=cm.PuOr_r) # Orange up node_colors = {} for p, v in zip(df.index.get_level_values(ids_from), dr): pid = str(p).split(";")[-1] if "_" in pid: pid = pid[:pid.index("_")] node_colors[pid] = mpl.colors.rgb2hex(mapper.to_rgba(v)) global uniprot_kegg_cache # Only do this once upids = list( node_colors.keys() ) upids = [p for p in upids if p not in uniprot_kegg_cache.keys()] if upids: new_pairs = get_uniprot_id_mapping_pairs('ACC+ID', 'KEGG_ID', upids) uniprot_kegg_cache.update(new_pairs) for p in upids: if p not in uniprot_kegg_cache: uniprot_kegg_cache[p] = None # Not found, don't look again with StringIO() as f: f.write('#hsa\tData\n') for k, c in list(node_colors.items()): if k in uniprot_kegg_cache and uniprot_kegg_cache[k] is not None: kids = uniprot_kegg_cache[k] for kegg_id in kids: f.write('%s\t%s\n' % (kegg_id.split(':')[-1], c )) # Reset file f.seek(0) url = 'https://www.kegg.jp/kegg-bin/mcolor_pathway' m = MultipartEncoder( fields={ 'map': pathway, 'mapping_list': ('filename', f), 'mode': 'color', 'submit': 'Exec', 'reference': 'white', } ) r = requests.post(url, data=m, headers={'Content-Type': m.content_type}) if r.status_code == 200: # src="/tmp/mark_pathway154353327948969/hsa04010.1.png" ms = re.finditer('src="(/tmp/mark_pathway[^"]*.png)"', r.text) m = list(ms).pop() # Download image data image = Image.open(requests.get('http://www.kegg.jp%s' % m.group(1), stream=True).raw) width, height = image.size # Get dimensions image = image.crop((1, 1, width-1, height-1)) # Crop black outline print("Scale range: %.2f .. %.2f" % (-maxi, maxi)) return image
[ "def", "kegg_pathway", "(", "df", ",", "pathway", ",", "a", ",", "b", "=", "None", ",", "ids_from", "=", "\"Proteins\"", ",", "cmap", "=", "cm", ".", "PuOr_r", ",", "is_log2", "=", "False", ",", "fillna", "=", "None", ",", "z_score", "=", "1", ")", ":", "df", "=", "df", ".", "copy", "(", ")", "if", "np", ".", "any", "(", "df", ".", "values", "<", "0", ")", "and", "not", "is_log2", ":", "warnings", ".", "warn", "(", "\"Input data has negative values. If data is log2 transformed, set is_log2=True.\"", ")", "if", "fillna", "is", "not", "None", ":", "df", "=", "df", ".", "fillna", "(", "fillna", ")", "if", "z_score", "is", "None", ":", "pass", "elif", "z_score", "==", "0", ":", "df", "=", "(", "df", "-", "df", ".", "median", "(", "axis", "=", "0", ")", ")", "/", "df", ".", "std", "(", "axis", "=", "0", ")", "elif", "z_score", "==", "1", ":", "df", "=", "(", "(", "df", ".", "T", "-", "df", ".", "median", "(", "axis", "=", "1", ")", ".", "T", ")", "/", "df", ".", "std", "(", "axis", "=", "1", ")", ".", "T", ")", ".", "T", "if", "b", "is", "not", "None", ":", "# Calculate ratio between two groups", "g1", ",", "g2", "=", "df", "[", "a", "]", ".", "values", ",", "df", "[", "b", "]", ".", "values", "if", "is_log2", ":", "dr", "=", "np", ".", "nanmean", "(", "g2", ",", "axis", "=", "1", ")", "-", "np", ".", "nanmean", "(", "g1", ",", "axis", "=", "1", ")", "else", ":", "dr", "=", "np", ".", "log2", "(", "np", ".", "nanmean", "(", "g2", ",", "axis", "=", "1", ")", "/", "np", ".", "nanmean", "(", "g1", ",", "axis", "=", "1", ")", ")", "else", ":", "g1", "=", "df", "[", "a", "]", ".", "values", "dr", "=", "np", ".", "nanmean", "(", "g1", ",", "axis", "=", "1", ")", "maxi", "=", "np", ".", "max", "(", "abs", "(", "dr", ")", ")", "norm", "=", "mpl", ".", "colors", ".", "Normalize", "(", "vmin", "=", "-", "maxi", ",", "vmax", "=", "maxi", ")", "mapper", "=", "cm", ".", "ScalarMappable", "(", "norm", "=", "norm", ",", "cmap", "=", "cm", ".", "PuOr_r", ")", "# Orange up", "node_colors", "=", "{", "}", "for", "p", ",", "v", "in", "zip", "(", "df", ".", "index", ".", "get_level_values", "(", "ids_from", ")", ",", "dr", ")", ":", "pid", "=", "str", "(", "p", ")", ".", "split", "(", "\";\"", ")", "[", "-", "1", "]", "if", "\"_\"", "in", "pid", ":", "pid", "=", "pid", "[", ":", "pid", ".", "index", "(", "\"_\"", ")", "]", "node_colors", "[", "pid", "]", "=", "mpl", ".", "colors", ".", "rgb2hex", "(", "mapper", ".", "to_rgba", "(", "v", ")", ")", "global", "uniprot_kegg_cache", "# Only do this once", "upids", "=", "list", "(", "node_colors", ".", "keys", "(", ")", ")", "upids", "=", "[", "p", "for", "p", "in", "upids", "if", "p", "not", "in", "uniprot_kegg_cache", ".", "keys", "(", ")", "]", "if", "upids", ":", "new_pairs", "=", "get_uniprot_id_mapping_pairs", "(", "'ACC+ID'", ",", "'KEGG_ID'", ",", "upids", ")", "uniprot_kegg_cache", ".", "update", "(", "new_pairs", ")", "for", "p", "in", "upids", ":", "if", "p", "not", "in", "uniprot_kegg_cache", ":", "uniprot_kegg_cache", "[", "p", "]", "=", "None", "# Not found, don't look again", "with", "StringIO", "(", ")", "as", "f", ":", "f", ".", "write", "(", "'#hsa\\tData\\n'", ")", "for", "k", ",", "c", "in", "list", "(", "node_colors", ".", "items", "(", ")", ")", ":", "if", "k", "in", "uniprot_kegg_cache", "and", "uniprot_kegg_cache", "[", "k", "]", "is", "not", "None", ":", "kids", "=", "uniprot_kegg_cache", "[", "k", "]", "for", "kegg_id", "in", "kids", ":", "f", ".", "write", "(", "'%s\\t%s\\n'", "%", "(", "kegg_id", ".", "split", "(", "':'", ")", "[", "-", "1", "]", ",", "c", ")", ")", "# Reset file", "f", ".", "seek", "(", "0", ")", "url", "=", "'https://www.kegg.jp/kegg-bin/mcolor_pathway'", "m", "=", "MultipartEncoder", "(", "fields", "=", "{", "'map'", ":", "pathway", ",", "'mapping_list'", ":", "(", "'filename'", ",", "f", ")", ",", "'mode'", ":", "'color'", ",", "'submit'", ":", "'Exec'", ",", "'reference'", ":", "'white'", ",", "}", ")", "r", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "m", ",", "headers", "=", "{", "'Content-Type'", ":", "m", ".", "content_type", "}", ")", "if", "r", ".", "status_code", "==", "200", ":", "# src=\"/tmp/mark_pathway154353327948969/hsa04010.1.png\"", "ms", "=", "re", ".", "finditer", "(", "'src=\"(/tmp/mark_pathway[^\"]*.png)\"'", ",", "r", ".", "text", ")", "m", "=", "list", "(", "ms", ")", ".", "pop", "(", ")", "# Download image data", "image", "=", "Image", ".", "open", "(", "requests", ".", "get", "(", "'http://www.kegg.jp%s'", "%", "m", ".", "group", "(", "1", ")", ",", "stream", "=", "True", ")", ".", "raw", ")", "width", ",", "height", "=", "image", ".", "size", "# Get dimensions", "image", "=", "image", ".", "crop", "(", "(", "1", ",", "1", ",", "width", "-", "1", ",", "height", "-", "1", ")", ")", "# Crop black outline", "print", "(", "\"Scale range: %.2f .. %.2f\"", "%", "(", "-", "maxi", ",", "maxi", ")", ")", "return", "image" ]
Visualize data on a kegg pathway. :param df: :param pathway: :param a: :param b: :param ids_from: :param cmap: :param is_log2: :param fillna: :param z_score: :return:
[ "Visualize", "data", "on", "a", "kegg", "pathway", "." ]
python
train
archman/beamline
beamline/ui/myappframe.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/ui/myappframe.py#L752-L764
def pack_found_items(self, s_text, target): """ pack up found items for search ctrl :param target: treectrl obj :param s_text: text to search, lower case return list of found items """ all_children = self.all_children all_text = [target.GetItemText(i).lower() for i in all_children] found_items = [child for i, child in enumerate(all_children) if s_text in all_text[i]] return found_items
[ "def", "pack_found_items", "(", "self", ",", "s_text", ",", "target", ")", ":", "all_children", "=", "self", ".", "all_children", "all_text", "=", "[", "target", ".", "GetItemText", "(", "i", ")", ".", "lower", "(", ")", "for", "i", "in", "all_children", "]", "found_items", "=", "[", "child", "for", "i", ",", "child", "in", "enumerate", "(", "all_children", ")", "if", "s_text", "in", "all_text", "[", "i", "]", "]", "return", "found_items" ]
pack up found items for search ctrl :param target: treectrl obj :param s_text: text to search, lower case return list of found items
[ "pack", "up", "found", "items", "for", "search", "ctrl", ":", "param", "target", ":", "treectrl", "obj", ":", "param", "s_text", ":", "text", "to", "search", "lower", "case", "return", "list", "of", "found", "items" ]
python
train
tjkessler/PyGenetics
pygenetics/selection_functions.py
https://github.com/tjkessler/PyGenetics/blob/b78ee6393605d6e85d2279fb05f3983f5833df40/pygenetics/selection_functions.py#L10-L23
def minimize_best_n(Members): ''' Orders population members from lowest fitness to highest fitness Args: Members (list): list of PyGenetics Member objects Returns: lsit: ordered lsit of Members, from highest fitness to lowest fitness ''' return(list(reversed(sorted( Members, key=lambda Member: Member.fitness_score ))))
[ "def", "minimize_best_n", "(", "Members", ")", ":", "return", "(", "list", "(", "reversed", "(", "sorted", "(", "Members", ",", "key", "=", "lambda", "Member", ":", "Member", ".", "fitness_score", ")", ")", ")", ")" ]
Orders population members from lowest fitness to highest fitness Args: Members (list): list of PyGenetics Member objects Returns: lsit: ordered lsit of Members, from highest fitness to lowest fitness
[ "Orders", "population", "members", "from", "lowest", "fitness", "to", "highest", "fitness" ]
python
test
sdispater/cachy
cachy/tagged_cache.py
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/tagged_cache.py#L153-L179
def remember(self, key, minutes, callback): """ Get an item from the cache, or store the default value. :param key: The cache key :type key: str :param minutes: The lifetime in minutes of the cached value :type minutes: int or datetime :param callback: The default function :type callback: mixed :rtype: mixed """ # If the item exists in the cache we will just return this immediately # otherwise we will execute the given callback and cache the result # of that execution for the given number of minutes in storage. val = self.get(key) if val is not None: return val val = value(callback) self.put(key, val, minutes) return val
[ "def", "remember", "(", "self", ",", "key", ",", "minutes", ",", "callback", ")", ":", "# If the item exists in the cache we will just return this immediately", "# otherwise we will execute the given callback and cache the result", "# of that execution for the given number of minutes in storage.", "val", "=", "self", ".", "get", "(", "key", ")", "if", "val", "is", "not", "None", ":", "return", "val", "val", "=", "value", "(", "callback", ")", "self", ".", "put", "(", "key", ",", "val", ",", "minutes", ")", "return", "val" ]
Get an item from the cache, or store the default value. :param key: The cache key :type key: str :param minutes: The lifetime in minutes of the cached value :type minutes: int or datetime :param callback: The default function :type callback: mixed :rtype: mixed
[ "Get", "an", "item", "from", "the", "cache", "or", "store", "the", "default", "value", "." ]
python
train
liftoff/pyminifier
pyminifier/minification.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/minification.py#L401-L419
def minify(tokens, options): """ Performs minification on *tokens* according to the values in *options* """ # Remove comments remove_comments(tokens) # Remove docstrings remove_docstrings(tokens) result = token_utils.untokenize(tokens) # Minify our input script result = multiline_indicator.sub('', result) result = fix_empty_methods(result) result = join_multiline_pairs(result) result = join_multiline_pairs(result, '[]') result = join_multiline_pairs(result, '{}') result = remove_blank_lines(result) result = reduce_operators(result) result = dedent(result, use_tabs=options.tabs) return result
[ "def", "minify", "(", "tokens", ",", "options", ")", ":", "# Remove comments", "remove_comments", "(", "tokens", ")", "# Remove docstrings", "remove_docstrings", "(", "tokens", ")", "result", "=", "token_utils", ".", "untokenize", "(", "tokens", ")", "# Minify our input script", "result", "=", "multiline_indicator", ".", "sub", "(", "''", ",", "result", ")", "result", "=", "fix_empty_methods", "(", "result", ")", "result", "=", "join_multiline_pairs", "(", "result", ")", "result", "=", "join_multiline_pairs", "(", "result", ",", "'[]'", ")", "result", "=", "join_multiline_pairs", "(", "result", ",", "'{}'", ")", "result", "=", "remove_blank_lines", "(", "result", ")", "result", "=", "reduce_operators", "(", "result", ")", "result", "=", "dedent", "(", "result", ",", "use_tabs", "=", "options", ".", "tabs", ")", "return", "result" ]
Performs minification on *tokens* according to the values in *options*
[ "Performs", "minification", "on", "*", "tokens", "*", "according", "to", "the", "values", "in", "*", "options", "*" ]
python
train
fastai/fastai
fastai/vision/gan.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/gan.py#L219-L223
def from_learners(cls, learn_gen:Learner, learn_crit:Learner, switcher:Callback=None, weights_gen:Tuple[float,float]=None, **learn_kwargs): "Create a GAN from `learn_gen` and `learn_crit`." losses = gan_loss_from_func(learn_gen.loss_func, learn_crit.loss_func, weights_gen=weights_gen) return cls(learn_gen.data, learn_gen.model, learn_crit.model, *losses, switcher=switcher, **learn_kwargs)
[ "def", "from_learners", "(", "cls", ",", "learn_gen", ":", "Learner", ",", "learn_crit", ":", "Learner", ",", "switcher", ":", "Callback", "=", "None", ",", "weights_gen", ":", "Tuple", "[", "float", ",", "float", "]", "=", "None", ",", "*", "*", "learn_kwargs", ")", ":", "losses", "=", "gan_loss_from_func", "(", "learn_gen", ".", "loss_func", ",", "learn_crit", ".", "loss_func", ",", "weights_gen", "=", "weights_gen", ")", "return", "cls", "(", "learn_gen", ".", "data", ",", "learn_gen", ".", "model", ",", "learn_crit", ".", "model", ",", "*", "losses", ",", "switcher", "=", "switcher", ",", "*", "*", "learn_kwargs", ")" ]
Create a GAN from `learn_gen` and `learn_crit`.
[ "Create", "a", "GAN", "from", "learn_gen", "and", "learn_crit", "." ]
python
train