repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
alphagov/performanceplatform-collector
performanceplatform/collector/gcloud/sales_parser.py
https://github.com/alphagov/performanceplatform-collector/blob/de68ab4aa500c31e436e050fa1268fa928c522a5/performanceplatform/collector/gcloud/sales_parser.py#L71-L90
def process_csv(f): """ Take a file-like object and yield OrderedDicts to be inserted into raw spending database. """ reader = unicodecsv.DictReader(f, encoding=_ENCODING) for row in reader: month, year = parse_month_year(row['Return Month']) yield OrderedDict([ ('customer_name', row['CustomerName']), ('supplier_name', row['SupplierName']), ('month', month), ('year', year), ('date', datetime.date(year, month, 1)), ('total_ex_vat', parse_price(row['EvidencedSpend'])), ('lot', parse_lot_name(row['LotDescription'])), ('customer_sector', parse_customer_sector(row['Sector'])), ('supplier_type', parse_sme_or_large(row['SME or Large'])), ])
[ "def", "process_csv", "(", "f", ")", ":", "reader", "=", "unicodecsv", ".", "DictReader", "(", "f", ",", "encoding", "=", "_ENCODING", ")", "for", "row", "in", "reader", ":", "month", ",", "year", "=", "parse_month_year", "(", "row", "[", "'Return Month'", "]", ")", "yield", "OrderedDict", "(", "[", "(", "'customer_name'", ",", "row", "[", "'CustomerName'", "]", ")", ",", "(", "'supplier_name'", ",", "row", "[", "'SupplierName'", "]", ")", ",", "(", "'month'", ",", "month", ")", ",", "(", "'year'", ",", "year", ")", ",", "(", "'date'", ",", "datetime", ".", "date", "(", "year", ",", "month", ",", "1", ")", ")", ",", "(", "'total_ex_vat'", ",", "parse_price", "(", "row", "[", "'EvidencedSpend'", "]", ")", ")", ",", "(", "'lot'", ",", "parse_lot_name", "(", "row", "[", "'LotDescription'", "]", ")", ")", ",", "(", "'customer_sector'", ",", "parse_customer_sector", "(", "row", "[", "'Sector'", "]", ")", ")", ",", "(", "'supplier_type'", ",", "parse_sme_or_large", "(", "row", "[", "'SME or Large'", "]", ")", ")", ",", "]", ")" ]
Take a file-like object and yield OrderedDicts to be inserted into raw spending database.
[ "Take", "a", "file", "-", "like", "object", "and", "yield", "OrderedDicts", "to", "be", "inserted", "into", "raw", "spending", "database", "." ]
python
train
saltstack/salt
salt/modules/neutronng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/neutronng.py#L191-L207
def network_get(auth=None, **kwargs): ''' Get a single network filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.network_get name=XLB4 ''' cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.get_network(**kwargs)
[ "def", "network_get", "(", "auth", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cloud", "=", "get_operator_cloud", "(", "auth", ")", "kwargs", "=", "_clean_kwargs", "(", "*", "*", "kwargs", ")", "return", "cloud", ".", "get_network", "(", "*", "*", "kwargs", ")" ]
Get a single network filters A Python dictionary of filter conditions to push down CLI Example: .. code-block:: bash salt '*' neutronng.network_get name=XLB4
[ "Get", "a", "single", "network" ]
python
train
richardkiss/pycoin
pycoin/networks/ParseAPI.py
https://github.com/richardkiss/pycoin/blob/1e8d0d9fe20ce0347b97847bb529cd1bd84c7442/pycoin/networks/ParseAPI.py#L96-L105
def electrum_prv(self, s): """ Parse an electrum private key from a text string in seed form ("E:xxx" where xxx is a 64-character hex string). Return a :class:`ElectrumWallet <pycoin.key.electrum.ElectrumWallet>` or None. """ blob = self._electrum_to_blob(s) if blob and len(blob) == 32: mpk = from_bytes_32(blob) return self._network.keys.electrum_private(master_private_key=mpk)
[ "def", "electrum_prv", "(", "self", ",", "s", ")", ":", "blob", "=", "self", ".", "_electrum_to_blob", "(", "s", ")", "if", "blob", "and", "len", "(", "blob", ")", "==", "32", ":", "mpk", "=", "from_bytes_32", "(", "blob", ")", "return", "self", ".", "_network", ".", "keys", ".", "electrum_private", "(", "master_private_key", "=", "mpk", ")" ]
Parse an electrum private key from a text string in seed form ("E:xxx" where xxx is a 64-character hex string). Return a :class:`ElectrumWallet <pycoin.key.electrum.ElectrumWallet>` or None.
[ "Parse", "an", "electrum", "private", "key", "from", "a", "text", "string", "in", "seed", "form", "(", "E", ":", "xxx", "where", "xxx", "is", "a", "64", "-", "character", "hex", "string", ")", ".", "Return", "a", ":", "class", ":", "ElectrumWallet", "<pycoin", ".", "key", ".", "electrum", ".", "ElectrumWallet", ">", "or", "None", "." ]
python
train
fred49/argtoolbox
argtoolbox/argtoolbox.py
https://github.com/fred49/argtoolbox/blob/e32ad6265567d5a1891df3c3425423774dafab41/argtoolbox/argtoolbox.py#L815-L844
def write_config_file(self, f, comments): """This method write a sample file, with attributes, descriptions, sample values, required flags, using the configuration object properties. """ if self.conf_hidden: return False if comments: f.write("\n") f.write("# Attribute (") f.write(str(self.e_type.__name__)) f.write(") : ") f.write(self._name.upper()) f.write("\n") if self._desc and self._desc != argparse.SUPPRESS: f.write("# Description : ") for i in self._desc.split('\n'): f.write("# ") f.write(i) f.write("\n") f.write("\n") if not self.conf_required: f.write(";") f.write(self._name) f.write("=") if self.default is not None and not self.hidden: f.write(str(self.default)) f.write("\n")
[ "def", "write_config_file", "(", "self", ",", "f", ",", "comments", ")", ":", "if", "self", ".", "conf_hidden", ":", "return", "False", "if", "comments", ":", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\"# Attribute (\"", ")", "f", ".", "write", "(", "str", "(", "self", ".", "e_type", ".", "__name__", ")", ")", "f", ".", "write", "(", "\") : \"", ")", "f", ".", "write", "(", "self", ".", "_name", ".", "upper", "(", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")", "if", "self", ".", "_desc", "and", "self", ".", "_desc", "!=", "argparse", ".", "SUPPRESS", ":", "f", ".", "write", "(", "\"# Description : \"", ")", "for", "i", "in", "self", ".", "_desc", ".", "split", "(", "'\\n'", ")", ":", "f", ".", "write", "(", "\"# \"", ")", "f", ".", "write", "(", "i", ")", "f", ".", "write", "(", "\"\\n\"", ")", "f", ".", "write", "(", "\"\\n\"", ")", "if", "not", "self", ".", "conf_required", ":", "f", ".", "write", "(", "\";\"", ")", "f", ".", "write", "(", "self", ".", "_name", ")", "f", ".", "write", "(", "\"=\"", ")", "if", "self", ".", "default", "is", "not", "None", "and", "not", "self", ".", "hidden", ":", "f", ".", "write", "(", "str", "(", "self", ".", "default", ")", ")", "f", ".", "write", "(", "\"\\n\"", ")" ]
This method write a sample file, with attributes, descriptions, sample values, required flags, using the configuration object properties.
[ "This", "method", "write", "a", "sample", "file", "with", "attributes", "descriptions", "sample", "values", "required", "flags", "using", "the", "configuration", "object", "properties", "." ]
python
train
joealcorn/xbox
xbox/vendor/requests/packages/urllib3/response.py
https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/response.py#L143-L237
def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get('content-encoding', '').lower() if self._decoder is None: if content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False try: try: if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() flush_decoder = True else: cache_content = False data = self._fp.read(amt) if amt != 0 and not data: # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if not 'read operation timed out' in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise raise ReadTimeoutError(self._pool, None, 'Read timed out.') except HTTPException as e: # This includes IncompleteRead. raise ProtocolError('Connection broken: %r' % e, e) self._fp_bytes_read += len(data) try: if decode_content and self._decoder: data = self._decoder.decompress(data) except (IOError, zlib.error) as e: raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e) if flush_decoder and decode_content and self._decoder: buf = self._decoder.decompress(binary_type()) data += buf + self._decoder.flush() if cache_content: self._body = data return data finally: if self._original_response and self._original_response.isclosed(): self.release_conn()
[ "def", "read", "(", "self", ",", "amt", "=", "None", ",", "decode_content", "=", "None", ",", "cache_content", "=", "False", ")", ":", "# Note: content-encoding value should be case-insensitive, per RFC 7230", "# Section 3.2", "content_encoding", "=", "self", ".", "headers", ".", "get", "(", "'content-encoding'", ",", "''", ")", ".", "lower", "(", ")", "if", "self", ".", "_decoder", "is", "None", ":", "if", "content_encoding", "in", "self", ".", "CONTENT_DECODERS", ":", "self", ".", "_decoder", "=", "_get_decoder", "(", "content_encoding", ")", "if", "decode_content", "is", "None", ":", "decode_content", "=", "self", ".", "decode_content", "if", "self", ".", "_fp", "is", "None", ":", "return", "flush_decoder", "=", "False", "try", ":", "try", ":", "if", "amt", "is", "None", ":", "# cStringIO doesn't like amt=None", "data", "=", "self", ".", "_fp", ".", "read", "(", ")", "flush_decoder", "=", "True", "else", ":", "cache_content", "=", "False", "data", "=", "self", ".", "_fp", ".", "read", "(", "amt", ")", "if", "amt", "!=", "0", "and", "not", "data", ":", "# Platform-specific: Buggy versions of Python.", "# Close the connection when no data is returned", "#", "# This is redundant to what httplib/http.client _should_", "# already do. However, versions of python released before", "# December 15, 2012 (http://bugs.python.org/issue16298) do", "# not properly close the connection in all cases. There is", "# no harm in redundantly calling close.", "self", ".", "_fp", ".", "close", "(", ")", "flush_decoder", "=", "True", "except", "SocketTimeout", ":", "# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but", "# there is yet no clean way to get at it from this context.", "raise", "ReadTimeoutError", "(", "self", ".", "_pool", ",", "None", ",", "'Read timed out.'", ")", "except", "BaseSSLError", "as", "e", ":", "# FIXME: Is there a better way to differentiate between SSLErrors?", "if", "not", "'read operation timed out'", "in", "str", "(", "e", ")", ":", "# Defensive:", "# This shouldn't happen but just in case we're missing an edge", "# case, let's avoid swallowing SSL errors.", "raise", "raise", "ReadTimeoutError", "(", "self", ".", "_pool", ",", "None", ",", "'Read timed out.'", ")", "except", "HTTPException", "as", "e", ":", "# This includes IncompleteRead.", "raise", "ProtocolError", "(", "'Connection broken: %r'", "%", "e", ",", "e", ")", "self", ".", "_fp_bytes_read", "+=", "len", "(", "data", ")", "try", ":", "if", "decode_content", "and", "self", ".", "_decoder", ":", "data", "=", "self", ".", "_decoder", ".", "decompress", "(", "data", ")", "except", "(", "IOError", ",", "zlib", ".", "error", ")", "as", "e", ":", "raise", "DecodeError", "(", "\"Received response with content-encoding: %s, but \"", "\"failed to decode it.\"", "%", "content_encoding", ",", "e", ")", "if", "flush_decoder", "and", "decode_content", "and", "self", ".", "_decoder", ":", "buf", "=", "self", ".", "_decoder", ".", "decompress", "(", "binary_type", "(", ")", ")", "data", "+=", "buf", "+", "self", ".", "_decoder", ".", "flush", "(", ")", "if", "cache_content", ":", "self", ".", "_body", "=", "data", "return", "data", "finally", ":", "if", "self", ".", "_original_response", "and", "self", ".", "_original_response", ".", "isclosed", "(", ")", ":", "self", ".", "release_conn", "(", ")" ]
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.)
[ "Similar", "to", ":", "meth", ":", "httplib", ".", "HTTPResponse", ".", "read", "but", "with", "two", "additional", "parameters", ":", "decode_content", "and", "cache_content", "." ]
python
train
yyuu/botornado
boto/ec2/connection.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/ec2/connection.py#L1863-L1894
def create_security_group(self, name, description, vpc_id=None): """ Create a new security group for your account. This will create the security group within the region you are currently connected to. :type name: string :param name: The name of the new security group :type description: string :param description: The description of the new security group :type vpc_id: string :param vpc_id: The ID of the VPC to create the security group in, if any. :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` :return: The newly created :class:`boto.ec2.keypair.KeyPair`. """ params = { 'GroupName': name, 'GroupDescription': description } if vpc_id is not None: params['VpcId'] = vpc_id group = self.get_object('CreateSecurityGroup', params, SecurityGroup, verb='POST') group.name = name group.description = description return group
[ "def", "create_security_group", "(", "self", ",", "name", ",", "description", ",", "vpc_id", "=", "None", ")", ":", "params", "=", "{", "'GroupName'", ":", "name", ",", "'GroupDescription'", ":", "description", "}", "if", "vpc_id", "is", "not", "None", ":", "params", "[", "'VpcId'", "]", "=", "vpc_id", "group", "=", "self", ".", "get_object", "(", "'CreateSecurityGroup'", ",", "params", ",", "SecurityGroup", ",", "verb", "=", "'POST'", ")", "group", ".", "name", "=", "name", "group", ".", "description", "=", "description", "return", "group" ]
Create a new security group for your account. This will create the security group within the region you are currently connected to. :type name: string :param name: The name of the new security group :type description: string :param description: The description of the new security group :type vpc_id: string :param vpc_id: The ID of the VPC to create the security group in, if any. :rtype: :class:`boto.ec2.securitygroup.SecurityGroup` :return: The newly created :class:`boto.ec2.keypair.KeyPair`.
[ "Create", "a", "new", "security", "group", "for", "your", "account", ".", "This", "will", "create", "the", "security", "group", "within", "the", "region", "you", "are", "currently", "connected", "to", "." ]
python
train
polysquare/polysquare-generic-file-linter
polysquarelinter/linter.py
https://github.com/polysquare/polysquare-generic-file-linter/blob/cfc88771acd3d5551c28fa5d917bb0aeb584c4cc/polysquarelinter/linter.py#L801-L813
def _run_lint_on_file_stamped(*args): """Run linter functions on file_path, stamping in stamp_file_path.""" # We pass an empty dictionary as keyword arguments here to work # around a bug in frosted, which crashes when no keyword arguments # are passed # # suppress(E204) stamp_args, stamp_kwargs = _run_lint_on_file_stamped_args(*args, **{}) return jobstamp.run(_run_lint_on_file_exceptions, *stamp_args, **stamp_kwargs)
[ "def", "_run_lint_on_file_stamped", "(", "*", "args", ")", ":", "# We pass an empty dictionary as keyword arguments here to work", "# around a bug in frosted, which crashes when no keyword arguments", "# are passed", "#", "# suppress(E204)", "stamp_args", ",", "stamp_kwargs", "=", "_run_lint_on_file_stamped_args", "(", "*", "args", ",", "*", "*", "{", "}", ")", "return", "jobstamp", ".", "run", "(", "_run_lint_on_file_exceptions", ",", "*", "stamp_args", ",", "*", "*", "stamp_kwargs", ")" ]
Run linter functions on file_path, stamping in stamp_file_path.
[ "Run", "linter", "functions", "on", "file_path", "stamping", "in", "stamp_file_path", "." ]
python
train
BeyondTheClouds/enoslib
enoslib/infra/enos_g5k/g5k_api_utils.py
https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/infra/enos_g5k/g5k_api_utils.py#L493-L543
def _do_synchronise_jobs(walltime, machines): """ This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes. """ offset = SYNCHRONISATION_OFFSET start = time.time() + offset _t = time.strptime(walltime, "%H:%M:%S") _walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec # Compute the demand for each cluster demands = defaultdict(int) for machine in machines: cluster = machine["cluster"] demands[cluster] += machine["nodes"] # Early leave if only one cluster is there if len(list(demands.keys())) <= 1: logger.debug("Only one cluster detected: no synchronisation needed") return None clusters = clusters_sites_obj(list(demands.keys())) # Early leave if only one site is concerned sites = set(list(clusters.values())) if len(sites) <= 1: logger.debug("Only one site detected: no synchronisation needed") return None # Test the proposed reservation_date ok = True for cluster, nodes in demands.items(): cluster_status = clusters[cluster].status.list() ok = ok and can_start_on_cluster(cluster_status.nodes, nodes, start, _walltime) if not ok: break if ok: # The proposed reservation_date fits logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites)) return start if start is None: raise EnosG5kSynchronisationError(sites)
[ "def", "_do_synchronise_jobs", "(", "walltime", ",", "machines", ")", ":", "offset", "=", "SYNCHRONISATION_OFFSET", "start", "=", "time", ".", "time", "(", ")", "+", "offset", "_t", "=", "time", ".", "strptime", "(", "walltime", ",", "\"%H:%M:%S\"", ")", "_walltime", "=", "_t", ".", "tm_hour", "*", "3600", "+", "_t", ".", "tm_min", "*", "60", "+", "_t", ".", "tm_sec", "# Compute the demand for each cluster", "demands", "=", "defaultdict", "(", "int", ")", "for", "machine", "in", "machines", ":", "cluster", "=", "machine", "[", "\"cluster\"", "]", "demands", "[", "cluster", "]", "+=", "machine", "[", "\"nodes\"", "]", "# Early leave if only one cluster is there", "if", "len", "(", "list", "(", "demands", ".", "keys", "(", ")", ")", ")", "<=", "1", ":", "logger", ".", "debug", "(", "\"Only one cluster detected: no synchronisation needed\"", ")", "return", "None", "clusters", "=", "clusters_sites_obj", "(", "list", "(", "demands", ".", "keys", "(", ")", ")", ")", "# Early leave if only one site is concerned", "sites", "=", "set", "(", "list", "(", "clusters", ".", "values", "(", ")", ")", ")", "if", "len", "(", "sites", ")", "<=", "1", ":", "logger", ".", "debug", "(", "\"Only one site detected: no synchronisation needed\"", ")", "return", "None", "# Test the proposed reservation_date", "ok", "=", "True", "for", "cluster", ",", "nodes", "in", "demands", ".", "items", "(", ")", ":", "cluster_status", "=", "clusters", "[", "cluster", "]", ".", "status", ".", "list", "(", ")", "ok", "=", "ok", "and", "can_start_on_cluster", "(", "cluster_status", ".", "nodes", ",", "nodes", ",", "start", ",", "_walltime", ")", "if", "not", "ok", ":", "break", "if", "ok", ":", "# The proposed reservation_date fits", "logger", ".", "info", "(", "\"Reservation_date=%s (%s)\"", "%", "(", "_date2h", "(", "start", ")", ",", "sites", ")", ")", "return", "start", "if", "start", "is", "None", ":", "raise", "EnosG5kSynchronisationError", "(", "sites", ")" ]
This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes.
[ "This", "returns", "a", "common", "reservation", "date", "for", "all", "the", "jobs", "." ]
python
train
funilrys/PyFunceble
PyFunceble/core.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/core.py#L844-L893
def colorify_logo(cls, home=False): """ Print the colored logo based on global results. :param home: Tell us if we have to print the initial coloration. :type home: bool """ if not PyFunceble.CONFIGURATION["quiet"]: # The quiet mode is not activated. to_print = [] if home: # We have to print the initial logo. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append( PyFunceble.Fore.YELLOW + line + PyFunceble.Fore.RESET ) elif PyFunceble.INTERN["counter"]["percentage"]["up"] >= 50: # The percentage of up is greater or equal to 50%. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append( PyFunceble.Fore.GREEN + line + PyFunceble.Fore.RESET ) else: # The percentage of up is less than 50%. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append(PyFunceble.Fore.RED + line + PyFunceble.Fore.RESET) print("\n".join(to_print))
[ "def", "colorify_logo", "(", "cls", ",", "home", "=", "False", ")", ":", "if", "not", "PyFunceble", ".", "CONFIGURATION", "[", "\"quiet\"", "]", ":", "# The quiet mode is not activated.", "to_print", "=", "[", "]", "if", "home", ":", "# We have to print the initial logo.", "for", "line", "in", "PyFunceble", ".", "ASCII_PYFUNCEBLE", ".", "split", "(", "\"\\n\"", ")", ":", "# We loop through each lines of the ASCII representation", "# of PyFunceble.", "# And we append to the data to print the currently read", "# line with the right coloration.", "to_print", ".", "append", "(", "PyFunceble", ".", "Fore", ".", "YELLOW", "+", "line", "+", "PyFunceble", ".", "Fore", ".", "RESET", ")", "elif", "PyFunceble", ".", "INTERN", "[", "\"counter\"", "]", "[", "\"percentage\"", "]", "[", "\"up\"", "]", ">=", "50", ":", "# The percentage of up is greater or equal to 50%.", "for", "line", "in", "PyFunceble", ".", "ASCII_PYFUNCEBLE", ".", "split", "(", "\"\\n\"", ")", ":", "# We loop through each lines of the ASCII representation", "# of PyFunceble.", "# And we append to the data to print the currently read", "# line with the right coloration.", "to_print", ".", "append", "(", "PyFunceble", ".", "Fore", ".", "GREEN", "+", "line", "+", "PyFunceble", ".", "Fore", ".", "RESET", ")", "else", ":", "# The percentage of up is less than 50%.", "for", "line", "in", "PyFunceble", ".", "ASCII_PYFUNCEBLE", ".", "split", "(", "\"\\n\"", ")", ":", "# We loop through each lines of the ASCII representation", "# of PyFunceble.", "# And we append to the data to print the currently read", "# line with the right coloration.", "to_print", ".", "append", "(", "PyFunceble", ".", "Fore", ".", "RED", "+", "line", "+", "PyFunceble", ".", "Fore", ".", "RESET", ")", "print", "(", "\"\\n\"", ".", "join", "(", "to_print", ")", ")" ]
Print the colored logo based on global results. :param home: Tell us if we have to print the initial coloration. :type home: bool
[ "Print", "the", "colored", "logo", "based", "on", "global", "results", "." ]
python
test
mozilla-releng/scriptworker
scriptworker/cot/verify.py
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/cot/verify.py#L1866-L1886
async def verify_task_types(chain): """Verify the task type (e.g. decision, build) of each link in the chain. Args: chain (ChainOfTrust): the chain we're operating on Returns: dict: mapping task type to the number of links. """ valid_task_types = get_valid_task_types() task_count = {} for obj in chain.get_all_links_in_chain(): task_type = obj.task_type log.info("Verifying {} {} as a {} task...".format(obj.name, obj.task_id, task_type)) task_count.setdefault(task_type, 0) task_count[task_type] += 1 # Run tests synchronously for now. We can parallelize if efficiency # is more important than a single simple logfile. await valid_task_types[task_type](chain, obj) return task_count
[ "async", "def", "verify_task_types", "(", "chain", ")", ":", "valid_task_types", "=", "get_valid_task_types", "(", ")", "task_count", "=", "{", "}", "for", "obj", "in", "chain", ".", "get_all_links_in_chain", "(", ")", ":", "task_type", "=", "obj", ".", "task_type", "log", ".", "info", "(", "\"Verifying {} {} as a {} task...\"", ".", "format", "(", "obj", ".", "name", ",", "obj", ".", "task_id", ",", "task_type", ")", ")", "task_count", ".", "setdefault", "(", "task_type", ",", "0", ")", "task_count", "[", "task_type", "]", "+=", "1", "# Run tests synchronously for now. We can parallelize if efficiency", "# is more important than a single simple logfile.", "await", "valid_task_types", "[", "task_type", "]", "(", "chain", ",", "obj", ")", "return", "task_count" ]
Verify the task type (e.g. decision, build) of each link in the chain. Args: chain (ChainOfTrust): the chain we're operating on Returns: dict: mapping task type to the number of links.
[ "Verify", "the", "task", "type", "(", "e", ".", "g", ".", "decision", "build", ")", "of", "each", "link", "in", "the", "chain", "." ]
python
train
thespacedoctor/sherlock
sherlock/transient_classifier.py
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_classifier.py#L479-L540
def _update_ned_stream( self, transientsMetadataList ): """ update the NED stream within the catalogues database at the locations of the transients **Key Arguments:** - ``transientsMetadataList`` -- the list of transient metadata lifted from the database. .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``_update_ned_stream`` method') coordinateList = [] for i in transientsMetadataList: # thisList = str(i["ra"]) + " " + str(i["dec"]) thisList = (i["ra"], i["dec"]) coordinateList.append(thisList) coordinateList = self._remove_previous_ned_queries( coordinateList=coordinateList ) # MINIMISE COORDINATES IN LIST TO REDUCE NUMBER OF REQUIRE NED QUERIES coordinateList = self._consolidate_coordinateList( coordinateList=coordinateList ) stream = ned( log=self.log, settings=self.settings, coordinateList=coordinateList, radiusArcsec=self.settings["ned stream search radius arcec"] ) stream.ingest() sqlQuery = """SET session sql_mode = "";""" % locals( ) writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn ) sqlQuery = """update tcs_cat_ned_stream set magnitude = CAST(`magnitude_filter` AS DECIMAL(5,2)) where magnitude is null;""" % locals( ) writequery( log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn ) self.log.debug('completed the ``_update_ned_stream`` method') return None
[ "def", "_update_ned_stream", "(", "self", ",", "transientsMetadataList", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``_update_ned_stream`` method'", ")", "coordinateList", "=", "[", "]", "for", "i", "in", "transientsMetadataList", ":", "# thisList = str(i[\"ra\"]) + \" \" + str(i[\"dec\"])", "thisList", "=", "(", "i", "[", "\"ra\"", "]", ",", "i", "[", "\"dec\"", "]", ")", "coordinateList", ".", "append", "(", "thisList", ")", "coordinateList", "=", "self", ".", "_remove_previous_ned_queries", "(", "coordinateList", "=", "coordinateList", ")", "# MINIMISE COORDINATES IN LIST TO REDUCE NUMBER OF REQUIRE NED QUERIES", "coordinateList", "=", "self", ".", "_consolidate_coordinateList", "(", "coordinateList", "=", "coordinateList", ")", "stream", "=", "ned", "(", "log", "=", "self", ".", "log", ",", "settings", "=", "self", ".", "settings", ",", "coordinateList", "=", "coordinateList", ",", "radiusArcsec", "=", "self", ".", "settings", "[", "\"ned stream search radius arcec\"", "]", ")", "stream", ".", "ingest", "(", ")", "sqlQuery", "=", "\"\"\"SET session sql_mode = \"\";\"\"\"", "%", "locals", "(", ")", "writequery", "(", "log", "=", "self", ".", "log", ",", "sqlQuery", "=", "sqlQuery", ",", "dbConn", "=", "self", ".", "cataloguesDbConn", ")", "sqlQuery", "=", "\"\"\"update tcs_cat_ned_stream set magnitude = CAST(`magnitude_filter` AS DECIMAL(5,2)) where magnitude is null;\"\"\"", "%", "locals", "(", ")", "writequery", "(", "log", "=", "self", ".", "log", ",", "sqlQuery", "=", "sqlQuery", ",", "dbConn", "=", "self", ".", "cataloguesDbConn", ")", "self", ".", "log", ".", "debug", "(", "'completed the ``_update_ned_stream`` method'", ")", "return", "None" ]
update the NED stream within the catalogues database at the locations of the transients **Key Arguments:** - ``transientsMetadataList`` -- the list of transient metadata lifted from the database. .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
[ "update", "the", "NED", "stream", "within", "the", "catalogues", "database", "at", "the", "locations", "of", "the", "transients" ]
python
train
mozillazg/python-shanbay
shanbay/__init__.py
https://github.com/mozillazg/python-shanbay/blob/d505ba614dc13a36afce46969d13fc64e10dde0d/shanbay/__init__.py#L68-L82
def login(self, **kwargs): """登录""" payload = { 'username': self.username, 'password': self.password, } headers = kwargs.setdefault('headers', {}) headers.setdefault( 'Referer', 'https://www.shanbay.com/web/account/login' ) url = 'https://www.shanbay.com/api/v1/account/login/web/' response = self.request(url, 'put', json=payload, **kwargs) r_json = response.json() return r_json['status_code'] == 0
[ "def", "login", "(", "self", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "{", "'username'", ":", "self", ".", "username", ",", "'password'", ":", "self", ".", "password", ",", "}", "headers", "=", "kwargs", ".", "setdefault", "(", "'headers'", ",", "{", "}", ")", "headers", ".", "setdefault", "(", "'Referer'", ",", "'https://www.shanbay.com/web/account/login'", ")", "url", "=", "'https://www.shanbay.com/api/v1/account/login/web/'", "response", "=", "self", ".", "request", "(", "url", ",", "'put'", ",", "json", "=", "payload", ",", "*", "*", "kwargs", ")", "r_json", "=", "response", ".", "json", "(", ")", "return", "r_json", "[", "'status_code'", "]", "==", "0" ]
登录
[ "登录" ]
python
train
horazont/aioxmpp
aioxmpp/muc/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/muc/service.py#L986-L997
def members(self): """ A copy of the list of occupants. The local user is always the first item in the list, unless the :meth:`on_enter` has not fired yet. """ if self._this_occupant is not None: items = [self._this_occupant] else: items = [] items += list(self._occupant_info.values()) return items
[ "def", "members", "(", "self", ")", ":", "if", "self", ".", "_this_occupant", "is", "not", "None", ":", "items", "=", "[", "self", ".", "_this_occupant", "]", "else", ":", "items", "=", "[", "]", "items", "+=", "list", "(", "self", ".", "_occupant_info", ".", "values", "(", ")", ")", "return", "items" ]
A copy of the list of occupants. The local user is always the first item in the list, unless the :meth:`on_enter` has not fired yet.
[ "A", "copy", "of", "the", "list", "of", "occupants", ".", "The", "local", "user", "is", "always", "the", "first", "item", "in", "the", "list", "unless", "the", ":", "meth", ":", "on_enter", "has", "not", "fired", "yet", "." ]
python
train
jlmadurga/permabots
permabots/views/api/hook.py
https://github.com/jlmadurga/permabots/blob/781a91702529a23fe7bc2aa84c5d88e961412466/permabots/views/api/hook.py#L192-L204
def delete(self, request, bot_id, hook_id, id, format=None): """ Delete an existing telegram recipient --- responseMessages: - code: 401 message: Not authenticated """ bot = self.get_bot(bot_id, request.user) hook = self.get_hook(hook_id, bot, request.user) recipient = self.get_recipient(id, hook, request.user) recipient.delete() return Response(status=status.HTTP_204_NO_CONTENT)
[ "def", "delete", "(", "self", ",", "request", ",", "bot_id", ",", "hook_id", ",", "id", ",", "format", "=", "None", ")", ":", "bot", "=", "self", ".", "get_bot", "(", "bot_id", ",", "request", ".", "user", ")", "hook", "=", "self", ".", "get_hook", "(", "hook_id", ",", "bot", ",", "request", ".", "user", ")", "recipient", "=", "self", ".", "get_recipient", "(", "id", ",", "hook", ",", "request", ".", "user", ")", "recipient", ".", "delete", "(", ")", "return", "Response", "(", "status", "=", "status", ".", "HTTP_204_NO_CONTENT", ")" ]
Delete an existing telegram recipient --- responseMessages: - code: 401 message: Not authenticated
[ "Delete", "an", "existing", "telegram", "recipient", "---", "responseMessages", ":", "-", "code", ":", "401", "message", ":", "Not", "authenticated" ]
python
train
the01/python-floscraper
floscraper/webscraper.py
https://github.com/the01/python-floscraper/blob/d578cd3d6381070d9a07dade1e10387ae33e9a65/floscraper/webscraper.py#L111-L129
def _browser_init(self): """ Init the browsing instance if not setup :rtype: None """ if self.session: return self.session = requests.Session() headers = {} if self.user_agent: headers['User-agent'] = self.user_agent self.session.headers.update(headers) if self._auth_method in [None, "", "HTTPBasicAuth"]: if self._auth_username is not None: self.session.auth = (self._auth_username, self._auth_password)
[ "def", "_browser_init", "(", "self", ")", ":", "if", "self", ".", "session", ":", "return", "self", ".", "session", "=", "requests", ".", "Session", "(", ")", "headers", "=", "{", "}", "if", "self", ".", "user_agent", ":", "headers", "[", "'User-agent'", "]", "=", "self", ".", "user_agent", "self", ".", "session", ".", "headers", ".", "update", "(", "headers", ")", "if", "self", ".", "_auth_method", "in", "[", "None", ",", "\"\"", ",", "\"HTTPBasicAuth\"", "]", ":", "if", "self", ".", "_auth_username", "is", "not", "None", ":", "self", ".", "session", ".", "auth", "=", "(", "self", ".", "_auth_username", ",", "self", ".", "_auth_password", ")" ]
Init the browsing instance if not setup :rtype: None
[ "Init", "the", "browsing", "instance", "if", "not", "setup" ]
python
train
obriencj/python-javatools
javatools/jarutil.py
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/jarutil.py#L70-L174
def verify(certificate, jar_file, sf_name=None): """ Verifies signature of a JAR file. Limitations: - diagnostic is less verbose than of jarsigner :return None if verification succeeds. :exception SignatureBlockFileVerificationError, ManifestChecksumError, JarChecksumError, JarSignatureMissingError Reference: http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#Signature_Validation Note that the validation is done in three steps. Failure at any step is a failure of the whole validation. """ # noqua # Step 0: get the "key alias", used also for naming of sig-related files. zip_file = ZipFile(jar_file) sf_files = [f for f in zip_file.namelist() if file_matches_sigfile(f)] if len(sf_files) == 0: raise JarSignatureMissingError("No .SF file in %s" % jar_file) elif len(sf_files) > 1: if sf_name is None: msg = "Multiple .SF files in %s, but SF_NAME.SF not specified" \ % jar_file raise VerificationError(msg) elif ('META-INF/' + sf_name) in sf_files: sf_filename = 'META-INF/' + sf_name else: msg = "No .SF file in %s named META-INF/%s (found %d .SF files)" \ % (jar_file, sf_name, len(sf_files)) raise VerificationError(msg) elif len(sf_files) == 1: if sf_name is None: sf_filename = sf_files[0] elif sf_files[0] == 'META-INF/' + sf_name: sf_filename = sf_files[0] else: msg = "No .SF file in %s named META-INF/%s" % (jar_file, sf_name) raise VerificationError(msg) key_alias = sf_filename[9:-3] # "META-INF/%s.SF" sf_data = zip_file.read(sf_filename) # Step 1: check the crypto part. file_list = zip_file.namelist() sig_block_filename = None # JAR specification mentions only RSA and DSA; jarsigner also has EC # TODO: what about "SIG-*"? signature_extensions = ("RSA", "DSA", "EC") for extension in signature_extensions: candidate_filename = "META-INF/%s.%s" % (key_alias, extension) if candidate_filename in file_list: sig_block_filename = candidate_filename break if sig_block_filename is None: msg = "None of %s found in JAR" % \ ", ".join(key_alias + "." + x for x in signature_extensions) raise JarSignatureMissingError(msg) sig_block_data = zip_file.read(sig_block_filename) try: verify_signature_block(certificate, sf_data, sig_block_data) except SignatureBlockVerificationError as message: message = "Signature block verification failed: %s" % message raise SignatureBlockFileVerificationError(message) # KEYALIAS.SF is correctly signed. # Step 2: Check that it contains correct checksum of the manifest. signature_manifest = SignatureManifest() signature_manifest.parse(sf_data) jar_manifest = Manifest() jar_manifest.load_from_jar(jar_file) errors = signature_manifest.verify_manifest(jar_manifest) if len(errors) > 0: msg = "%s: in .SF file, section checksum(s) failed for: %s" \ % (jar_file, ",".join(errors)) raise ManifestChecksumError(msg) # Checksums of MANIFEST.MF itself are correct. # Step 3: Check that it contains valid checksums for each file # from the JAR. NOTE: the check is done for JAR entries. If some # JAR entries are deleted after signing, the verification still # succeeds. This seems to not follow the reference specification, # but that's what jarsigner does. errors = jar_manifest.verify_jar_checksums(jar_file) if len(errors) > 0: msg = "Checksum(s) for jar entries of jar file %s failed for: %s" \ % (jar_file, ",".join(errors)) raise JarChecksumError(msg) return None
[ "def", "verify", "(", "certificate", ",", "jar_file", ",", "sf_name", "=", "None", ")", ":", "# noqua", "# Step 0: get the \"key alias\", used also for naming of sig-related files.", "zip_file", "=", "ZipFile", "(", "jar_file", ")", "sf_files", "=", "[", "f", "for", "f", "in", "zip_file", ".", "namelist", "(", ")", "if", "file_matches_sigfile", "(", "f", ")", "]", "if", "len", "(", "sf_files", ")", "==", "0", ":", "raise", "JarSignatureMissingError", "(", "\"No .SF file in %s\"", "%", "jar_file", ")", "elif", "len", "(", "sf_files", ")", ">", "1", ":", "if", "sf_name", "is", "None", ":", "msg", "=", "\"Multiple .SF files in %s, but SF_NAME.SF not specified\"", "%", "jar_file", "raise", "VerificationError", "(", "msg", ")", "elif", "(", "'META-INF/'", "+", "sf_name", ")", "in", "sf_files", ":", "sf_filename", "=", "'META-INF/'", "+", "sf_name", "else", ":", "msg", "=", "\"No .SF file in %s named META-INF/%s (found %d .SF files)\"", "%", "(", "jar_file", ",", "sf_name", ",", "len", "(", "sf_files", ")", ")", "raise", "VerificationError", "(", "msg", ")", "elif", "len", "(", "sf_files", ")", "==", "1", ":", "if", "sf_name", "is", "None", ":", "sf_filename", "=", "sf_files", "[", "0", "]", "elif", "sf_files", "[", "0", "]", "==", "'META-INF/'", "+", "sf_name", ":", "sf_filename", "=", "sf_files", "[", "0", "]", "else", ":", "msg", "=", "\"No .SF file in %s named META-INF/%s\"", "%", "(", "jar_file", ",", "sf_name", ")", "raise", "VerificationError", "(", "msg", ")", "key_alias", "=", "sf_filename", "[", "9", ":", "-", "3", "]", "# \"META-INF/%s.SF\"", "sf_data", "=", "zip_file", ".", "read", "(", "sf_filename", ")", "# Step 1: check the crypto part.", "file_list", "=", "zip_file", ".", "namelist", "(", ")", "sig_block_filename", "=", "None", "# JAR specification mentions only RSA and DSA; jarsigner also has EC", "# TODO: what about \"SIG-*\"?", "signature_extensions", "=", "(", "\"RSA\"", ",", "\"DSA\"", ",", "\"EC\"", ")", "for", "extension", "in", "signature_extensions", ":", "candidate_filename", "=", "\"META-INF/%s.%s\"", "%", "(", "key_alias", ",", "extension", ")", "if", "candidate_filename", "in", "file_list", ":", "sig_block_filename", "=", "candidate_filename", "break", "if", "sig_block_filename", "is", "None", ":", "msg", "=", "\"None of %s found in JAR\"", "%", "\", \"", ".", "join", "(", "key_alias", "+", "\".\"", "+", "x", "for", "x", "in", "signature_extensions", ")", "raise", "JarSignatureMissingError", "(", "msg", ")", "sig_block_data", "=", "zip_file", ".", "read", "(", "sig_block_filename", ")", "try", ":", "verify_signature_block", "(", "certificate", ",", "sf_data", ",", "sig_block_data", ")", "except", "SignatureBlockVerificationError", "as", "message", ":", "message", "=", "\"Signature block verification failed: %s\"", "%", "message", "raise", "SignatureBlockFileVerificationError", "(", "message", ")", "# KEYALIAS.SF is correctly signed.", "# Step 2: Check that it contains correct checksum of the manifest.", "signature_manifest", "=", "SignatureManifest", "(", ")", "signature_manifest", ".", "parse", "(", "sf_data", ")", "jar_manifest", "=", "Manifest", "(", ")", "jar_manifest", ".", "load_from_jar", "(", "jar_file", ")", "errors", "=", "signature_manifest", ".", "verify_manifest", "(", "jar_manifest", ")", "if", "len", "(", "errors", ")", ">", "0", ":", "msg", "=", "\"%s: in .SF file, section checksum(s) failed for: %s\"", "%", "(", "jar_file", ",", "\",\"", ".", "join", "(", "errors", ")", ")", "raise", "ManifestChecksumError", "(", "msg", ")", "# Checksums of MANIFEST.MF itself are correct.", "# Step 3: Check that it contains valid checksums for each file", "# from the JAR. NOTE: the check is done for JAR entries. If some", "# JAR entries are deleted after signing, the verification still", "# succeeds. This seems to not follow the reference specification,", "# but that's what jarsigner does.", "errors", "=", "jar_manifest", ".", "verify_jar_checksums", "(", "jar_file", ")", "if", "len", "(", "errors", ")", ">", "0", ":", "msg", "=", "\"Checksum(s) for jar entries of jar file %s failed for: %s\"", "%", "(", "jar_file", ",", "\",\"", ".", "join", "(", "errors", ")", ")", "raise", "JarChecksumError", "(", "msg", ")", "return", "None" ]
Verifies signature of a JAR file. Limitations: - diagnostic is less verbose than of jarsigner :return None if verification succeeds. :exception SignatureBlockFileVerificationError, ManifestChecksumError, JarChecksumError, JarSignatureMissingError Reference: http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html#Signature_Validation Note that the validation is done in three steps. Failure at any step is a failure of the whole validation.
[ "Verifies", "signature", "of", "a", "JAR", "file", "." ]
python
train
sorgerlab/indra
indra/sources/reach/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/reach/processor.py#L835-L862
def determine_reach_subtype(event_name): """Returns the category of reach rule from the reach rule instance. Looks at a list of regular expressions corresponding to reach rule types, and returns the longest regexp that matches, or None if none of them match. Parameters ---------- evidence : indra.statements.Evidence A reach evidence object to subtype Returns ------- best_match : str A regular expression corresponding to the reach rule that was used to extract this evidence """ best_match_length = None best_match = None for ss in reach_rule_regexps: if re.search(ss, event_name): if best_match is None or len(ss) > best_match_length: best_match = ss best_match_length = len(ss) return best_match
[ "def", "determine_reach_subtype", "(", "event_name", ")", ":", "best_match_length", "=", "None", "best_match", "=", "None", "for", "ss", "in", "reach_rule_regexps", ":", "if", "re", ".", "search", "(", "ss", ",", "event_name", ")", ":", "if", "best_match", "is", "None", "or", "len", "(", "ss", ")", ">", "best_match_length", ":", "best_match", "=", "ss", "best_match_length", "=", "len", "(", "ss", ")", "return", "best_match" ]
Returns the category of reach rule from the reach rule instance. Looks at a list of regular expressions corresponding to reach rule types, and returns the longest regexp that matches, or None if none of them match. Parameters ---------- evidence : indra.statements.Evidence A reach evidence object to subtype Returns ------- best_match : str A regular expression corresponding to the reach rule that was used to extract this evidence
[ "Returns", "the", "category", "of", "reach", "rule", "from", "the", "reach", "rule", "instance", "." ]
python
train
KeplerGO/K2fov
K2fov/greatcircle.py
https://github.com/KeplerGO/K2fov/blob/fb122b35687340e0357cba9e0dd47b3be0760693/K2fov/greatcircle.py#L10-L42
def sphericalAngSep(ra0, dec0, ra1, dec1, radians=False): """ Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0 """ if radians==False: ra0 = np.radians(ra0) dec0 = np.radians(dec0) ra1 = np.radians(ra1) dec1 = np.radians(dec1) deltaRa= ra1-ra0 deltaDec= dec1-dec0 val = haversine(deltaDec) val += np.cos(dec0) * np.cos(dec1) * haversine(deltaRa) val = min(1, np.sqrt(val)) ; #Guard against round off error? val = 2*np.arcsin(val) #Convert back to degrees if necessary if radians==False: val = np.degrees(val) return val
[ "def", "sphericalAngSep", "(", "ra0", ",", "dec0", ",", "ra1", ",", "dec1", ",", "radians", "=", "False", ")", ":", "if", "radians", "==", "False", ":", "ra0", "=", "np", ".", "radians", "(", "ra0", ")", "dec0", "=", "np", ".", "radians", "(", "dec0", ")", "ra1", "=", "np", ".", "radians", "(", "ra1", ")", "dec1", "=", "np", ".", "radians", "(", "dec1", ")", "deltaRa", "=", "ra1", "-", "ra0", "deltaDec", "=", "dec1", "-", "dec0", "val", "=", "haversine", "(", "deltaDec", ")", "val", "+=", "np", ".", "cos", "(", "dec0", ")", "*", "np", ".", "cos", "(", "dec1", ")", "*", "haversine", "(", "deltaRa", ")", "val", "=", "min", "(", "1", ",", "np", ".", "sqrt", "(", "val", ")", ")", "#Guard against round off error?", "val", "=", "2", "*", "np", ".", "arcsin", "(", "val", ")", "#Convert back to degrees if necessary", "if", "radians", "==", "False", ":", "val", "=", "np", ".", "degrees", "(", "val", ")", "return", "val" ]
Compute the spherical angular separation between two points on the sky. //Taken from http://www.movable-type.co.uk/scripts/gis-faq-5.1.html NB: For small distances you can probably use sqrt( dDec**2 + cos^2(dec)*dRa) where dDec = dec1 - dec0 and dRa = ra1 - ra0 and dec1 \approx dec \approx dec0
[ "Compute", "the", "spherical", "angular", "separation", "between", "two", "points", "on", "the", "sky", "." ]
python
train
mlperf/training
single_stage_detector/ssd/utils.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/single_stage_detector/ssd/utils.py#L120-L156
def scale_back_batch(self, bboxes_in, scores_in): """ Do scale and transform from xywh to ltrb suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox """ if bboxes_in.device == torch.device("cpu"): self.dboxes = self.dboxes.cpu() self.dboxes_xywh = self.dboxes_xywh.cpu() else: self.dboxes = self.dboxes.cuda() self.dboxes_xywh = self.dboxes_xywh.cuda() bboxes_in = bboxes_in.permute(0, 2, 1) scores_in = scores_in.permute(0, 2, 1) # print(bboxes_in.device, scores_in.device, self.dboxes_xywh.device) bboxes_in[:, :, :2] = self.scale_xy * bboxes_in[:, :, :2] bboxes_in[:, :, 2:] = self.scale_wh * bboxes_in[:, :, 2:] bboxes_in[:, :, :2] = bboxes_in[:, :, :2] * self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2] bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp() * self.dboxes_xywh[:, :, 2:] # Transform format to ltrb l, t, r, b = bboxes_in[:, :, 0] - 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] - 0.5 * bboxes_in[:, :, 3], \ bboxes_in[:, :, 0] + 0.5 * bboxes_in[:, :, 2], \ bboxes_in[:, :, 1] + 0.5 * bboxes_in[:, :, 3] bboxes_in[:, :, 0] = l bboxes_in[:, :, 1] = t bboxes_in[:, :, 2] = r bboxes_in[:, :, 3] = b return bboxes_in, F.softmax(scores_in, dim=-1)
[ "def", "scale_back_batch", "(", "self", ",", "bboxes_in", ",", "scores_in", ")", ":", "if", "bboxes_in", ".", "device", "==", "torch", ".", "device", "(", "\"cpu\"", ")", ":", "self", ".", "dboxes", "=", "self", ".", "dboxes", ".", "cpu", "(", ")", "self", ".", "dboxes_xywh", "=", "self", ".", "dboxes_xywh", ".", "cpu", "(", ")", "else", ":", "self", ".", "dboxes", "=", "self", ".", "dboxes", ".", "cuda", "(", ")", "self", ".", "dboxes_xywh", "=", "self", ".", "dboxes_xywh", ".", "cuda", "(", ")", "bboxes_in", "=", "bboxes_in", ".", "permute", "(", "0", ",", "2", ",", "1", ")", "scores_in", "=", "scores_in", ".", "permute", "(", "0", ",", "2", ",", "1", ")", "# print(bboxes_in.device, scores_in.device, self.dboxes_xywh.device)", "bboxes_in", "[", ":", ",", ":", ",", ":", "2", "]", "=", "self", ".", "scale_xy", "*", "bboxes_in", "[", ":", ",", ":", ",", ":", "2", "]", "bboxes_in", "[", ":", ",", ":", ",", "2", ":", "]", "=", "self", ".", "scale_wh", "*", "bboxes_in", "[", ":", ",", ":", ",", "2", ":", "]", "bboxes_in", "[", ":", ",", ":", ",", ":", "2", "]", "=", "bboxes_in", "[", ":", ",", ":", ",", ":", "2", "]", "*", "self", ".", "dboxes_xywh", "[", ":", ",", ":", ",", "2", ":", "]", "+", "self", ".", "dboxes_xywh", "[", ":", ",", ":", ",", ":", "2", "]", "bboxes_in", "[", ":", ",", ":", ",", "2", ":", "]", "=", "bboxes_in", "[", ":", ",", ":", ",", "2", ":", "]", ".", "exp", "(", ")", "*", "self", ".", "dboxes_xywh", "[", ":", ",", ":", ",", "2", ":", "]", "# Transform format to ltrb", "l", ",", "t", ",", "r", ",", "b", "=", "bboxes_in", "[", ":", ",", ":", ",", "0", "]", "-", "0.5", "*", "bboxes_in", "[", ":", ",", ":", ",", "2", "]", ",", "bboxes_in", "[", ":", ",", ":", ",", "1", "]", "-", "0.5", "*", "bboxes_in", "[", ":", ",", ":", ",", "3", "]", ",", "bboxes_in", "[", ":", ",", ":", ",", "0", "]", "+", "0.5", "*", "bboxes_in", "[", ":", ",", ":", ",", "2", "]", ",", "bboxes_in", "[", ":", ",", ":", ",", "1", "]", "+", "0.5", "*", "bboxes_in", "[", ":", ",", ":", ",", "3", "]", "bboxes_in", "[", ":", ",", ":", ",", "0", "]", "=", "l", "bboxes_in", "[", ":", ",", ":", ",", "1", "]", "=", "t", "bboxes_in", "[", ":", ",", ":", ",", "2", "]", "=", "r", "bboxes_in", "[", ":", ",", ":", ",", "3", "]", "=", "b", "return", "bboxes_in", ",", "F", ".", "softmax", "(", "scores_in", ",", "dim", "=", "-", "1", ")" ]
Do scale and transform from xywh to ltrb suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox
[ "Do", "scale", "and", "transform", "from", "xywh", "to", "ltrb", "suppose", "input", "Nx4xnum_bbox", "Nxlabel_numxnum_bbox" ]
python
train
explosion/thinc
thinc/check.py
https://github.com/explosion/thinc/blob/90129be5f0d6c665344245a7c37dbe1b8afceea2/thinc/check.py#L48-L71
def has_shape(shape): """Check that a particular argument is an array with a given shape. The shape may contain string attributes, which will be fetched from arg0 to the function (usually self). """ def has_shape_inner(arg_id, args, kwargs): self = args[0] arg = args[arg_id] if not hasattr(arg, "shape"): raise ExpectedTypeError(arg, ["array"]) shape_values = [] for dim in shape: if not isinstance(dim, integer_types): dim = getattr(self, dim, None) shape_values.append(dim) if len(shape) != len(arg.shape): raise ShapeMismatchError(arg.shape, tuple(shape_values), shape) for i, dim in enumerate(shape_values): # Allow underspecified dimensions if dim is not None and arg.shape[i] != dim: raise ShapeMismatchError(arg.shape, shape_values, shape) return has_shape_inner
[ "def", "has_shape", "(", "shape", ")", ":", "def", "has_shape_inner", "(", "arg_id", ",", "args", ",", "kwargs", ")", ":", "self", "=", "args", "[", "0", "]", "arg", "=", "args", "[", "arg_id", "]", "if", "not", "hasattr", "(", "arg", ",", "\"shape\"", ")", ":", "raise", "ExpectedTypeError", "(", "arg", ",", "[", "\"array\"", "]", ")", "shape_values", "=", "[", "]", "for", "dim", "in", "shape", ":", "if", "not", "isinstance", "(", "dim", ",", "integer_types", ")", ":", "dim", "=", "getattr", "(", "self", ",", "dim", ",", "None", ")", "shape_values", ".", "append", "(", "dim", ")", "if", "len", "(", "shape", ")", "!=", "len", "(", "arg", ".", "shape", ")", ":", "raise", "ShapeMismatchError", "(", "arg", ".", "shape", ",", "tuple", "(", "shape_values", ")", ",", "shape", ")", "for", "i", ",", "dim", "in", "enumerate", "(", "shape_values", ")", ":", "# Allow underspecified dimensions", "if", "dim", "is", "not", "None", "and", "arg", ".", "shape", "[", "i", "]", "!=", "dim", ":", "raise", "ShapeMismatchError", "(", "arg", ".", "shape", ",", "shape_values", ",", "shape", ")", "return", "has_shape_inner" ]
Check that a particular argument is an array with a given shape. The shape may contain string attributes, which will be fetched from arg0 to the function (usually self).
[ "Check", "that", "a", "particular", "argument", "is", "an", "array", "with", "a", "given", "shape", ".", "The", "shape", "may", "contain", "string", "attributes", "which", "will", "be", "fetched", "from", "arg0", "to", "the", "function", "(", "usually", "self", ")", "." ]
python
train
Aplopio/django_rip
rip/generic_steps/default_entity_actions.py
https://github.com/Aplopio/django_rip/blob/6b03962ccb778c1a95950a3803e5170c7a2392df/rip/generic_steps/default_entity_actions.py#L33-L54
def read_list(self, request): """ :param request: an apiv2 request object :return: request if successful with entities set on request """ request_filters = request.context_params.get( self.request_filters_property, {}) request_filters.update(**self.get_limit_and_offset(request_filters)) entities = self.get_entity_list(request, **request_filters) request.context_params[self.list_property_name] = entities # offset and limit don't make sense to get aggregates count_request_filters = request_filters.copy() count_request_filters.pop('offset', None) count_request_filters.pop('limit', None) count_request_filters.pop('order_by', None) total_count = self.get_entity_list_total_count(request, **count_request_filters) request.context_params[self.entity_list_total_count_property_name] = \ total_count return request
[ "def", "read_list", "(", "self", ",", "request", ")", ":", "request_filters", "=", "request", ".", "context_params", ".", "get", "(", "self", ".", "request_filters_property", ",", "{", "}", ")", "request_filters", ".", "update", "(", "*", "*", "self", ".", "get_limit_and_offset", "(", "request_filters", ")", ")", "entities", "=", "self", ".", "get_entity_list", "(", "request", ",", "*", "*", "request_filters", ")", "request", ".", "context_params", "[", "self", ".", "list_property_name", "]", "=", "entities", "# offset and limit don't make sense to get aggregates", "count_request_filters", "=", "request_filters", ".", "copy", "(", ")", "count_request_filters", ".", "pop", "(", "'offset'", ",", "None", ")", "count_request_filters", ".", "pop", "(", "'limit'", ",", "None", ")", "count_request_filters", ".", "pop", "(", "'order_by'", ",", "None", ")", "total_count", "=", "self", ".", "get_entity_list_total_count", "(", "request", ",", "*", "*", "count_request_filters", ")", "request", ".", "context_params", "[", "self", ".", "entity_list_total_count_property_name", "]", "=", "total_count", "return", "request" ]
:param request: an apiv2 request object :return: request if successful with entities set on request
[ ":", "param", "request", ":", "an", "apiv2", "request", "object", ":", "return", ":", "request", "if", "successful", "with", "entities", "set", "on", "request" ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/geometry.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geometry.py#L258-L273
def get_polygon(self, reverse=False): """ Returns a tuple of coordinates of 5 points describing a polygon. Points are listed in clockwise order, first point is the same as the last. :param reverse: `True` if x and y coordinates should be switched and `False` otherwise :type reverse: bool :return: `((x_1, y_1), ... , (x_5, y_5))` :rtype: tuple(tuple(float)) """ bbox = self.reverse() if reverse else self polygon = ((bbox.min_x, bbox.min_y), (bbox.min_x, bbox.max_y), (bbox.max_x, bbox.max_y), (bbox.max_x, bbox.min_y), (bbox.min_x, bbox.min_y)) return polygon
[ "def", "get_polygon", "(", "self", ",", "reverse", "=", "False", ")", ":", "bbox", "=", "self", ".", "reverse", "(", ")", "if", "reverse", "else", "self", "polygon", "=", "(", "(", "bbox", ".", "min_x", ",", "bbox", ".", "min_y", ")", ",", "(", "bbox", ".", "min_x", ",", "bbox", ".", "max_y", ")", ",", "(", "bbox", ".", "max_x", ",", "bbox", ".", "max_y", ")", ",", "(", "bbox", ".", "max_x", ",", "bbox", ".", "min_y", ")", ",", "(", "bbox", ".", "min_x", ",", "bbox", ".", "min_y", ")", ")", "return", "polygon" ]
Returns a tuple of coordinates of 5 points describing a polygon. Points are listed in clockwise order, first point is the same as the last. :param reverse: `True` if x and y coordinates should be switched and `False` otherwise :type reverse: bool :return: `((x_1, y_1), ... , (x_5, y_5))` :rtype: tuple(tuple(float))
[ "Returns", "a", "tuple", "of", "coordinates", "of", "5", "points", "describing", "a", "polygon", ".", "Points", "are", "listed", "in", "clockwise", "order", "first", "point", "is", "the", "same", "as", "the", "last", "." ]
python
train
cdumay/kser
src/kser/tracing/task.py
https://github.com/cdumay/kser/blob/fbd6fe9ab34b8b89d9937e5ff727614304af48c1/src/kser/tracing/task.py#L20-L34
def unsafe_execute(self, result=None): """ un-wrapped execution, can raise excepetion :return: Execution result :rtype: kser.result.Result """ if result: self.result += result with opentracing.tracer.start_span( obj=self, child_of=KserSpan.extract_span(self), span_factory=KserSpan) as span: self.result = self._onsuccess(self._postrun(self._run())) span.obj = self return self.result
[ "def", "unsafe_execute", "(", "self", ",", "result", "=", "None", ")", ":", "if", "result", ":", "self", ".", "result", "+=", "result", "with", "opentracing", ".", "tracer", ".", "start_span", "(", "obj", "=", "self", ",", "child_of", "=", "KserSpan", ".", "extract_span", "(", "self", ")", ",", "span_factory", "=", "KserSpan", ")", "as", "span", ":", "self", ".", "result", "=", "self", ".", "_onsuccess", "(", "self", ".", "_postrun", "(", "self", ".", "_run", "(", ")", ")", ")", "span", ".", "obj", "=", "self", "return", "self", ".", "result" ]
un-wrapped execution, can raise excepetion :return: Execution result :rtype: kser.result.Result
[ "un", "-", "wrapped", "execution", "can", "raise", "excepetion" ]
python
train
marcomusy/vtkplotter
vtkplotter/utils.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L35-L43
def isSequence(arg): """Check if input is iterable.""" if hasattr(arg, "strip"): return False if hasattr(arg, "__getslice__"): return True if hasattr(arg, "__iter__"): return True return False
[ "def", "isSequence", "(", "arg", ")", ":", "if", "hasattr", "(", "arg", ",", "\"strip\"", ")", ":", "return", "False", "if", "hasattr", "(", "arg", ",", "\"__getslice__\"", ")", ":", "return", "True", "if", "hasattr", "(", "arg", ",", "\"__iter__\"", ")", ":", "return", "True", "return", "False" ]
Check if input is iterable.
[ "Check", "if", "input", "is", "iterable", "." ]
python
train
broadinstitute/fiss
firecloud/api.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/api.py#L259-L273
def get_entities(namespace, workspace, etype): """List entities of given type in a workspace. Response content will be in JSON format. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name etype (str): Entity type Swagger: https://api.firecloud.org/#!/Entities/getEntities """ uri = "workspaces/{0}/{1}/entities/{2}".format(namespace, workspace, etype) return __get(uri)
[ "def", "get_entities", "(", "namespace", ",", "workspace", ",", "etype", ")", ":", "uri", "=", "\"workspaces/{0}/{1}/entities/{2}\"", ".", "format", "(", "namespace", ",", "workspace", ",", "etype", ")", "return", "__get", "(", "uri", ")" ]
List entities of given type in a workspace. Response content will be in JSON format. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name etype (str): Entity type Swagger: https://api.firecloud.org/#!/Entities/getEntities
[ "List", "entities", "of", "given", "type", "in", "a", "workspace", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/objects.py#L479-L491
def get_answers(self): """Gets the answers. return: (osid.assessment.AnswerList) - the answers raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.Asset.get_asset_contents_template return AnswerList( self._my_map['answers'], runtime=self._runtime, proxy=self._proxy)
[ "def", "get_answers", "(", "self", ")", ":", "# Implemented from template for osid.repository.Asset.get_asset_contents_template", "return", "AnswerList", "(", "self", ".", "_my_map", "[", "'answers'", "]", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")" ]
Gets the answers. return: (osid.assessment.AnswerList) - the answers raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "answers", "." ]
python
train
pip-services3-python/pip-services3-components-python
pip_services3_components/log/Logger.py
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/log/Logger.py#L138-L152
def error(self, correlation_id, error, message, *args, **kwargs): """ Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message. """ self._format_and_write(LogLevel.Error, correlation_id, error, message, args, kwargs)
[ "def", "error", "(", "self", ",", "correlation_id", ",", "error", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_format_and_write", "(", "LogLevel", ".", "Error", ",", "correlation_id", ",", "error", ",", "message", ",", "args", ",", "kwargs", ")" ]
Logs recoverable application error. :param correlation_id: (optional) transaction id to trace execution through call chain. :param error: an error object associated with this message. :param message: a human-readable message to log. :param args: arguments to parameterize the message. :param kwargs: arguments to parameterize the message.
[ "Logs", "recoverable", "application", "error", "." ]
python
train
Grunny/zap-cli
zapcli/commands/scanners.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/scanners.py#L31-L41
def list_scanners(zap_helper, scanners): """Get a list of scanners and whether or not they are enabled.""" scanner_list = zap_helper.zap.ascan.scanners() if scanners is not None and 'all' not in scanners: scanner_list = filter_by_ids(scanner_list, scanners) click.echo(tabulate([[s['id'], s['name'], s['policyId'], s['enabled'], s['attackStrength'], s['alertThreshold']] for s in scanner_list], headers=['ID', 'Name', 'Policy ID', 'Enabled', 'Strength', 'Threshold'], tablefmt='grid'))
[ "def", "list_scanners", "(", "zap_helper", ",", "scanners", ")", ":", "scanner_list", "=", "zap_helper", ".", "zap", ".", "ascan", ".", "scanners", "(", ")", "if", "scanners", "is", "not", "None", "and", "'all'", "not", "in", "scanners", ":", "scanner_list", "=", "filter_by_ids", "(", "scanner_list", ",", "scanners", ")", "click", ".", "echo", "(", "tabulate", "(", "[", "[", "s", "[", "'id'", "]", ",", "s", "[", "'name'", "]", ",", "s", "[", "'policyId'", "]", ",", "s", "[", "'enabled'", "]", ",", "s", "[", "'attackStrength'", "]", ",", "s", "[", "'alertThreshold'", "]", "]", "for", "s", "in", "scanner_list", "]", ",", "headers", "=", "[", "'ID'", ",", "'Name'", ",", "'Policy ID'", ",", "'Enabled'", ",", "'Strength'", ",", "'Threshold'", "]", ",", "tablefmt", "=", "'grid'", ")", ")" ]
Get a list of scanners and whether or not they are enabled.
[ "Get", "a", "list", "of", "scanners", "and", "whether", "or", "not", "they", "are", "enabled", "." ]
python
train
Riffstation/flask-philo
flask_philo/serializers.py
https://github.com/Riffstation/flask-philo/blob/76c9d562edb4a77010c8da6dfdb6489fa29cbc9e/flask_philo/serializers.py#L102-L119
def to_json(self): """ Returns a json representation """ data = {} for k, v in self.__dict__.items(): if not k.startswith('_'): # values not serializable, should be converted to strings if isinstance(v, datetime): v = utils.datetime_to_string(v) elif isinstance(v, date): v = utils.date_to_string(v) elif isinstance(v, uuid.UUID): v = str(v) elif isinstance(v, Decimal): v = str(v) data[k] = v return data
[ "def", "to_json", "(", "self", ")", ":", "data", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "'_'", ")", ":", "# values not serializable, should be converted to strings", "if", "isinstance", "(", "v", ",", "datetime", ")", ":", "v", "=", "utils", ".", "datetime_to_string", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "date", ")", ":", "v", "=", "utils", ".", "date_to_string", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "uuid", ".", "UUID", ")", ":", "v", "=", "str", "(", "v", ")", "elif", "isinstance", "(", "v", ",", "Decimal", ")", ":", "v", "=", "str", "(", "v", ")", "data", "[", "k", "]", "=", "v", "return", "data" ]
Returns a json representation
[ "Returns", "a", "json", "representation" ]
python
train
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L810-L819
def get_property(self, name): # type: (str) -> object """ Retrieves a framework or system property. As framework properties don't change while it's running, this method don't need to be protected. :param name: The property name """ with self.__properties_lock: return self.__properties.get(name, os.getenv(name))
[ "def", "get_property", "(", "self", ",", "name", ")", ":", "# type: (str) -> object", "with", "self", ".", "__properties_lock", ":", "return", "self", ".", "__properties", ".", "get", "(", "name", ",", "os", ".", "getenv", "(", "name", ")", ")" ]
Retrieves a framework or system property. As framework properties don't change while it's running, this method don't need to be protected. :param name: The property name
[ "Retrieves", "a", "framework", "or", "system", "property", ".", "As", "framework", "properties", "don", "t", "change", "while", "it", "s", "running", "this", "method", "don", "t", "need", "to", "be", "protected", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/interval.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/interval.py#L528-L545
def saturdays_of_weekends(self) -> Set[datetime.date]: """ Returns the dates of all Saturdays that are part of weekends that this interval covers (each Saturday representing a unique identifier for that weekend). The Saturday itself isn't necessarily the part of the weekend that the interval covers! """ startdate = self.start.date() enddate = self.end.date() ndays = (enddate - startdate).days + 1 saturdays = set() for i in range(ndays): date = startdate + datetime.timedelta(days=i) if is_saturday(date): saturdays.add(date) elif is_sunday(date): saturdays.add(date - datetime.timedelta(days=1)) return saturdays
[ "def", "saturdays_of_weekends", "(", "self", ")", "->", "Set", "[", "datetime", ".", "date", "]", ":", "startdate", "=", "self", ".", "start", ".", "date", "(", ")", "enddate", "=", "self", ".", "end", ".", "date", "(", ")", "ndays", "=", "(", "enddate", "-", "startdate", ")", ".", "days", "+", "1", "saturdays", "=", "set", "(", ")", "for", "i", "in", "range", "(", "ndays", ")", ":", "date", "=", "startdate", "+", "datetime", ".", "timedelta", "(", "days", "=", "i", ")", "if", "is_saturday", "(", "date", ")", ":", "saturdays", ".", "add", "(", "date", ")", "elif", "is_sunday", "(", "date", ")", ":", "saturdays", ".", "add", "(", "date", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", ")", "return", "saturdays" ]
Returns the dates of all Saturdays that are part of weekends that this interval covers (each Saturday representing a unique identifier for that weekend). The Saturday itself isn't necessarily the part of the weekend that the interval covers!
[ "Returns", "the", "dates", "of", "all", "Saturdays", "that", "are", "part", "of", "weekends", "that", "this", "interval", "covers", "(", "each", "Saturday", "representing", "a", "unique", "identifier", "for", "that", "weekend", ")", ".", "The", "Saturday", "itself", "isn", "t", "necessarily", "the", "part", "of", "the", "weekend", "that", "the", "interval", "covers!" ]
python
train
Erotemic/utool
utool/util_list.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L923-L929
def ziptake(items_list, indexes_list): """ SeeAlso: vt.ziptake """ return [take(list_, index_list) for list_, index_list in zip(items_list, indexes_list)]
[ "def", "ziptake", "(", "items_list", ",", "indexes_list", ")", ":", "return", "[", "take", "(", "list_", ",", "index_list", ")", "for", "list_", ",", "index_list", "in", "zip", "(", "items_list", ",", "indexes_list", ")", "]" ]
SeeAlso: vt.ziptake
[ "SeeAlso", ":", "vt", ".", "ziptake" ]
python
train
sentinel-hub/sentinelhub-py
sentinelhub/geopedia.py
https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geopedia.py#L30-L49
def _parse_layer(layer, return_wms_name=False): """ Helper function for parsing Geopedia layer name. If WMS name is required and wrong form is given it will return a string with 'ttl' at the beginning. (WMS name can also start with something else, e.g. only 't' instead 'ttl', therefore anything else is also allowed.) Otherwise it will parse it into a number. """ if not isinstance(layer, (int, str)): raise ValueError("Parameter 'layer' should be an integer or a string, but {} found".format(type(layer))) if return_wms_name: if isinstance(layer, int) or layer.isdigit(): return 'ttl{}'.format(layer) return layer if isinstance(layer, str): stripped_layer = layer.lstrip('tl') if not stripped_layer.isdigit(): raise ValueError("Parameter 'layer' has unsupported value {}, expected an integer".format(layer)) layer = stripped_layer return int(layer)
[ "def", "_parse_layer", "(", "layer", ",", "return_wms_name", "=", "False", ")", ":", "if", "not", "isinstance", "(", "layer", ",", "(", "int", ",", "str", ")", ")", ":", "raise", "ValueError", "(", "\"Parameter 'layer' should be an integer or a string, but {} found\"", ".", "format", "(", "type", "(", "layer", ")", ")", ")", "if", "return_wms_name", ":", "if", "isinstance", "(", "layer", ",", "int", ")", "or", "layer", ".", "isdigit", "(", ")", ":", "return", "'ttl{}'", ".", "format", "(", "layer", ")", "return", "layer", "if", "isinstance", "(", "layer", ",", "str", ")", ":", "stripped_layer", "=", "layer", ".", "lstrip", "(", "'tl'", ")", "if", "not", "stripped_layer", ".", "isdigit", "(", ")", ":", "raise", "ValueError", "(", "\"Parameter 'layer' has unsupported value {}, expected an integer\"", ".", "format", "(", "layer", ")", ")", "layer", "=", "stripped_layer", "return", "int", "(", "layer", ")" ]
Helper function for parsing Geopedia layer name. If WMS name is required and wrong form is given it will return a string with 'ttl' at the beginning. (WMS name can also start with something else, e.g. only 't' instead 'ttl', therefore anything else is also allowed.) Otherwise it will parse it into a number.
[ "Helper", "function", "for", "parsing", "Geopedia", "layer", "name", ".", "If", "WMS", "name", "is", "required", "and", "wrong", "form", "is", "given", "it", "will", "return", "a", "string", "with", "ttl", "at", "the", "beginning", ".", "(", "WMS", "name", "can", "also", "start", "with", "something", "else", "e", ".", "g", ".", "only", "t", "instead", "ttl", "therefore", "anything", "else", "is", "also", "allowed", ".", ")", "Otherwise", "it", "will", "parse", "it", "into", "a", "number", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2883-L2888
def isTrackedDeviceConnected(self, unDeviceIndex): """Returns true if there is a device connected in this slot.""" fn = self.function_table.isTrackedDeviceConnected result = fn(unDeviceIndex) return result
[ "def", "isTrackedDeviceConnected", "(", "self", ",", "unDeviceIndex", ")", ":", "fn", "=", "self", ".", "function_table", ".", "isTrackedDeviceConnected", "result", "=", "fn", "(", "unDeviceIndex", ")", "return", "result" ]
Returns true if there is a device connected in this slot.
[ "Returns", "true", "if", "there", "is", "a", "device", "connected", "in", "this", "slot", "." ]
python
train
inveniosoftware/invenio-communities
invenio_communities/models.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/models.py#L257-L284
def filter_communities(cls, p, so, with_deleted=False): """Search for communities. Helper function which takes from database only those communities which match search criteria. Uses parameter 'so' to set communities in the correct order. Parameter 'page' is introduced to restrict results and return only slice of them for the current page. If page == 0 function will return all communities that match the pattern. """ query = cls.query if with_deleted else \ cls.query.filter(cls.deleted_at.is_(None)) if p: p = p.replace(' ', '%') query = query.filter(db.or_( cls.id.ilike('%' + p + '%'), cls.title.ilike('%' + p + '%'), cls.description.ilike('%' + p + '%'), )) if so in current_app.config['COMMUNITIES_SORTING_OPTIONS']: order = so == 'title' and db.asc or db.desc query = query.order_by(order(getattr(cls, so))) else: query = query.order_by(db.desc(cls.ranking)) return query
[ "def", "filter_communities", "(", "cls", ",", "p", ",", "so", ",", "with_deleted", "=", "False", ")", ":", "query", "=", "cls", ".", "query", "if", "with_deleted", "else", "cls", ".", "query", ".", "filter", "(", "cls", ".", "deleted_at", ".", "is_", "(", "None", ")", ")", "if", "p", ":", "p", "=", "p", ".", "replace", "(", "' '", ",", "'%'", ")", "query", "=", "query", ".", "filter", "(", "db", ".", "or_", "(", "cls", ".", "id", ".", "ilike", "(", "'%'", "+", "p", "+", "'%'", ")", ",", "cls", ".", "title", ".", "ilike", "(", "'%'", "+", "p", "+", "'%'", ")", ",", "cls", ".", "description", ".", "ilike", "(", "'%'", "+", "p", "+", "'%'", ")", ",", ")", ")", "if", "so", "in", "current_app", ".", "config", "[", "'COMMUNITIES_SORTING_OPTIONS'", "]", ":", "order", "=", "so", "==", "'title'", "and", "db", ".", "asc", "or", "db", ".", "desc", "query", "=", "query", ".", "order_by", "(", "order", "(", "getattr", "(", "cls", ",", "so", ")", ")", ")", "else", ":", "query", "=", "query", ".", "order_by", "(", "db", ".", "desc", "(", "cls", ".", "ranking", ")", ")", "return", "query" ]
Search for communities. Helper function which takes from database only those communities which match search criteria. Uses parameter 'so' to set communities in the correct order. Parameter 'page' is introduced to restrict results and return only slice of them for the current page. If page == 0 function will return all communities that match the pattern.
[ "Search", "for", "communities", "." ]
python
train
vertexproject/synapse
synapse/common.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/common.py#L196-L214
def genfile(*paths): ''' Create or open ( for read/write ) a file path join. Args: *paths: A list of paths to join together to make the file. Notes: If the file already exists, the fd returned is opened in ``r+b`` mode. Otherwise, the fd is opened in ``w+b`` mode. Returns: io.BufferedRandom: A file-object which can be read/written too. ''' path = genpath(*paths) gendir(os.path.dirname(path)) if not os.path.isfile(path): return io.open(path, 'w+b') return io.open(path, 'r+b')
[ "def", "genfile", "(", "*", "paths", ")", ":", "path", "=", "genpath", "(", "*", "paths", ")", "gendir", "(", "os", ".", "path", ".", "dirname", "(", "path", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "return", "io", ".", "open", "(", "path", ",", "'w+b'", ")", "return", "io", ".", "open", "(", "path", ",", "'r+b'", ")" ]
Create or open ( for read/write ) a file path join. Args: *paths: A list of paths to join together to make the file. Notes: If the file already exists, the fd returned is opened in ``r+b`` mode. Otherwise, the fd is opened in ``w+b`` mode. Returns: io.BufferedRandom: A file-object which can be read/written too.
[ "Create", "or", "open", "(", "for", "read", "/", "write", ")", "a", "file", "path", "join", "." ]
python
train
klahnakoski/pyLibrary
pyLibrary/env/emailer.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/pyLibrary/env/emailer.py#L63-L111
def send_email(self, from_address=None, to_address=None, subject=None, text_data=None, html_data=None ): """Sends an email. from_addr is an email address; to_addrs is a list of email adresses. Addresses can be plain (e.g. "[email protected]") or with real names (e.g. "John Smith <[email protected]>"). text_data and html_data are both strings. You can specify one or both. If you specify both, the email will be sent as a MIME multipart alternative, i.e., the recipient will see the HTML content if his viewer supports it; otherwise he'll see the text content. """ settings = self.settings from_address = coalesce(from_address, settings["from"], settings.from_address) to_address = listwrap(coalesce(to_address, settings.to_address, settings.to_addrs)) if not from_address or not to_address: raise Exception("Both from_addr and to_addrs must be specified") if not text_data and not html_data: raise Exception("Must specify either text_data or html_data") if not html_data: msg = MIMEText(text_data) elif not text_data: msg = MIMEText(html_data, 'html') else: msg = MIMEMultipart('alternative') msg.attach(MIMEText(text_data, 'plain')) msg.attach(MIMEText(html_data, 'html')) msg['Subject'] = coalesce(subject, settings.subject) msg['From'] = from_address msg['To'] = ', '.join(to_address) if self.server: # CALL AS PART OF A SMTP SESSION self.server.sendmail(from_address, to_address, msg.as_string()) else: # CALL AS STAND-ALONE with self: self.server.sendmail(from_address, to_address, msg.as_string())
[ "def", "send_email", "(", "self", ",", "from_address", "=", "None", ",", "to_address", "=", "None", ",", "subject", "=", "None", ",", "text_data", "=", "None", ",", "html_data", "=", "None", ")", ":", "settings", "=", "self", ".", "settings", "from_address", "=", "coalesce", "(", "from_address", ",", "settings", "[", "\"from\"", "]", ",", "settings", ".", "from_address", ")", "to_address", "=", "listwrap", "(", "coalesce", "(", "to_address", ",", "settings", ".", "to_address", ",", "settings", ".", "to_addrs", ")", ")", "if", "not", "from_address", "or", "not", "to_address", ":", "raise", "Exception", "(", "\"Both from_addr and to_addrs must be specified\"", ")", "if", "not", "text_data", "and", "not", "html_data", ":", "raise", "Exception", "(", "\"Must specify either text_data or html_data\"", ")", "if", "not", "html_data", ":", "msg", "=", "MIMEText", "(", "text_data", ")", "elif", "not", "text_data", ":", "msg", "=", "MIMEText", "(", "html_data", ",", "'html'", ")", "else", ":", "msg", "=", "MIMEMultipart", "(", "'alternative'", ")", "msg", ".", "attach", "(", "MIMEText", "(", "text_data", ",", "'plain'", ")", ")", "msg", ".", "attach", "(", "MIMEText", "(", "html_data", ",", "'html'", ")", ")", "msg", "[", "'Subject'", "]", "=", "coalesce", "(", "subject", ",", "settings", ".", "subject", ")", "msg", "[", "'From'", "]", "=", "from_address", "msg", "[", "'To'", "]", "=", "', '", ".", "join", "(", "to_address", ")", "if", "self", ".", "server", ":", "# CALL AS PART OF A SMTP SESSION", "self", ".", "server", ".", "sendmail", "(", "from_address", ",", "to_address", ",", "msg", ".", "as_string", "(", ")", ")", "else", ":", "# CALL AS STAND-ALONE", "with", "self", ":", "self", ".", "server", ".", "sendmail", "(", "from_address", ",", "to_address", ",", "msg", ".", "as_string", "(", ")", ")" ]
Sends an email. from_addr is an email address; to_addrs is a list of email adresses. Addresses can be plain (e.g. "[email protected]") or with real names (e.g. "John Smith <[email protected]>"). text_data and html_data are both strings. You can specify one or both. If you specify both, the email will be sent as a MIME multipart alternative, i.e., the recipient will see the HTML content if his viewer supports it; otherwise he'll see the text content.
[ "Sends", "an", "email", "." ]
python
train
tgbugs/pyontutils
pyontutils/sheets.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/sheets.py#L32-L59
def update_sheet_values(spreadsheet_name, sheet_name, values, spreadsheet_service=None): SPREADSHEET_ID = devconfig.secrets(spreadsheet_name) if spreadsheet_service is None: service = get_oauth_service(readonly=False) ss = service.spreadsheets() else: ss = spreadsheet_service """ requests = [ {'updateCells': { 'start': {'sheetId': TODO, 'rowIndex': 0, 'columnIndex': 0} 'rows': {'values'} } }] response = ss.batchUpdate( spreadsheetId=SPREADSHEET_ID, range=sheet_name, body=body).execute() """ body = {'values': values} response = ss.values().update( spreadsheetId=SPREADSHEET_ID, range=sheet_name, valueInputOption='USER_ENTERED', body=body).execute() return response
[ "def", "update_sheet_values", "(", "spreadsheet_name", ",", "sheet_name", ",", "values", ",", "spreadsheet_service", "=", "None", ")", ":", "SPREADSHEET_ID", "=", "devconfig", ".", "secrets", "(", "spreadsheet_name", ")", "if", "spreadsheet_service", "is", "None", ":", "service", "=", "get_oauth_service", "(", "readonly", "=", "False", ")", "ss", "=", "service", ".", "spreadsheets", "(", ")", "else", ":", "ss", "=", "spreadsheet_service", "body", "=", "{", "'values'", ":", "values", "}", "response", "=", "ss", ".", "values", "(", ")", ".", "update", "(", "spreadsheetId", "=", "SPREADSHEET_ID", ",", "range", "=", "sheet_name", ",", "valueInputOption", "=", "'USER_ENTERED'", ",", "body", "=", "body", ")", ".", "execute", "(", ")", "return", "response" ]
requests = [ {'updateCells': { 'start': {'sheetId': TODO, 'rowIndex': 0, 'columnIndex': 0} 'rows': {'values'} } }] response = ss.batchUpdate( spreadsheetId=SPREADSHEET_ID, range=sheet_name, body=body).execute()
[ "requests", "=", "[", "{", "updateCells", ":", "{", "start", ":", "{", "sheetId", ":", "TODO", "rowIndex", ":", "0", "columnIndex", ":", "0", "}", "rows", ":", "{", "values", "}", "}", "}", "]", "response", "=", "ss", ".", "batchUpdate", "(", "spreadsheetId", "=", "SPREADSHEET_ID", "range", "=", "sheet_name", "body", "=", "body", ")", ".", "execute", "()" ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L1438-L1553
def template(self, data=None, settings=None): """ Python micro-templating, similar to John Resig's implementation. Underscore templating handles arbitrary delimiters, preserves whitespace, and correctly escapes quotes within interpolated code. """ if settings is None: settings = {} ts = _.templateSettings _.defaults(ts, self.templateSettings) _.extend(settings, ts) # settings = { # "interpolate": self.templateSettings.get('interpolate'), # "evaluate": self.templateSettings.get('evaluate'), # "escape": self.templateSettings.get('escape') # } _.extend(settings, { "escaper": r"\\|'|\r|\n|\t|\u2028|\u2029", "unescaper": r"\\(\\|'|r|n|t|u2028|u2029)" }) src = self.obj #src = re.sub('"', r'\"', src) #src = re.sub(r'\\', r"\\", src) ns = self.Namespace() ns.indent_level = 1 def unescape(code): def unescapes(matchobj): a = re.sub("^[\'\"]|[\'\"]$", "", ("%r" % matchobj.group(1))) # Python doesn't accept \n as a key if a == '\n': a = "bn" if a == '\r': a = "br" if a == '\t': a = "bt" if a == '\u2028': a = 'bu2028' if a == '\u2029': a = 'bu2029' return self.escapes[a] return re.sub(settings.get('unescaper'), unescapes, code) def escapes(matchobj): a = matchobj.group(0) # Python doesn't accept \n as a key if a == '\n': a = "bn" if a == '\r': a = "br" if a == '\t': a = "bt" if a == '\u2028': a = 'bu2028' if a == '\u2029': a = 'bu2029' return '\\' + self.escapes[a] def indent(n=None): if n is not None: ns.indent_level += n return " " * ns.indent_level def interpolate(matchobj): if getattr(str, 'decode', False): key = (matchobj.group(1).decode('string-escape')).strip() else: key = (bytes(matchobj.group(1), "utf-8").decode()).strip() return "' + str(" + unescape(key) + " or '') + '" def evaluate(matchobj): if getattr(str, 'decode', False): code = (matchobj.group(1).decode('string-escape')).strip() else: code = (bytes(matchobj.group(1), "utf-8").decode()).strip() if code.startswith("end"): return "')\n" + indent(-1) + "ns.__p += ('" elif code.endswith(':'): return "')\n" + indent() + unescape(code) + \ "\n" + indent(+1) + "ns.__p += ('" else: return "')\n" + indent() + unescape(code) + \ "\n" + indent() + "ns.__p += ('" def escape(matchobj): if getattr(str, 'decode', False): key = (matchobj.group(1).decode('string-escape')).strip() else: key = (bytes(matchobj.group(1), "utf-8").decode()).strip() return "' + _.escape(str(" + unescape(key) + " or '')) + '" source = indent() + 'class closure(object):\n pass' + \ ' # for full closure support\n' source += indent() + 'ns = closure()\n' source += indent() + "ns.__p = ''\n" #src = re.sub("^[\'\"]|[\'\"]$", "", ("%r" % src)) src = re.sub(settings.get("escaper"), escapes, src) source += indent() + "ns.__p += ('" + \ re.sub(settings.get('escape'), escape, src) + "')\n" source = re.sub(settings.get('interpolate'), interpolate, source) source = re.sub(settings.get('evaluate'), evaluate, source) if getattr(str, 'decode', False): source += indent() + 'return ns.__p.decode("string_escape")\n' else: source += indent() + 'return bytes(ns.__p, "utf-8").decode()\n' f = self.create_function(settings.get("variable") or "obj=None", source) if data is not None: return f(data) return f
[ "def", "template", "(", "self", ",", "data", "=", "None", ",", "settings", "=", "None", ")", ":", "if", "settings", "is", "None", ":", "settings", "=", "{", "}", "ts", "=", "_", ".", "templateSettings", "_", ".", "defaults", "(", "ts", ",", "self", ".", "templateSettings", ")", "_", ".", "extend", "(", "settings", ",", "ts", ")", "# settings = {", "# \"interpolate\": self.templateSettings.get('interpolate'),", "# \"evaluate\": self.templateSettings.get('evaluate'),", "# \"escape\": self.templateSettings.get('escape')", "# }", "_", ".", "extend", "(", "settings", ",", "{", "\"escaper\"", ":", "r\"\\\\|'|\\r|\\n|\\t|\\u2028|\\u2029\"", ",", "\"unescaper\"", ":", "r\"\\\\(\\\\|'|r|n|t|u2028|u2029)\"", "}", ")", "src", "=", "self", ".", "obj", "#src = re.sub('\"', r'\\\"', src)", "#src = re.sub(r'\\\\', r\"\\\\\", src)", "ns", "=", "self", ".", "Namespace", "(", ")", "ns", ".", "indent_level", "=", "1", "def", "unescape", "(", "code", ")", ":", "def", "unescapes", "(", "matchobj", ")", ":", "a", "=", "re", ".", "sub", "(", "\"^[\\'\\\"]|[\\'\\\"]$\"", ",", "\"\"", ",", "(", "\"%r\"", "%", "matchobj", ".", "group", "(", "1", ")", ")", ")", "# Python doesn't accept \\n as a key", "if", "a", "==", "'\\n'", ":", "a", "=", "\"bn\"", "if", "a", "==", "'\\r'", ":", "a", "=", "\"br\"", "if", "a", "==", "'\\t'", ":", "a", "=", "\"bt\"", "if", "a", "==", "'\\u2028'", ":", "a", "=", "'bu2028'", "if", "a", "==", "'\\u2029'", ":", "a", "=", "'bu2029'", "return", "self", ".", "escapes", "[", "a", "]", "return", "re", ".", "sub", "(", "settings", ".", "get", "(", "'unescaper'", ")", ",", "unescapes", ",", "code", ")", "def", "escapes", "(", "matchobj", ")", ":", "a", "=", "matchobj", ".", "group", "(", "0", ")", "# Python doesn't accept \\n as a key", "if", "a", "==", "'\\n'", ":", "a", "=", "\"bn\"", "if", "a", "==", "'\\r'", ":", "a", "=", "\"br\"", "if", "a", "==", "'\\t'", ":", "a", "=", "\"bt\"", "if", "a", "==", "'\\u2028'", ":", "a", "=", "'bu2028'", "if", "a", "==", "'\\u2029'", ":", "a", "=", "'bu2029'", "return", "'\\\\'", "+", "self", ".", "escapes", "[", "a", "]", "def", "indent", "(", "n", "=", "None", ")", ":", "if", "n", "is", "not", "None", ":", "ns", ".", "indent_level", "+=", "n", "return", "\" \"", "*", "ns", ".", "indent_level", "def", "interpolate", "(", "matchobj", ")", ":", "if", "getattr", "(", "str", ",", "'decode'", ",", "False", ")", ":", "key", "=", "(", "matchobj", ".", "group", "(", "1", ")", ".", "decode", "(", "'string-escape'", ")", ")", ".", "strip", "(", ")", "else", ":", "key", "=", "(", "bytes", "(", "matchobj", ".", "group", "(", "1", ")", ",", "\"utf-8\"", ")", ".", "decode", "(", ")", ")", ".", "strip", "(", ")", "return", "\"' + str(\"", "+", "unescape", "(", "key", ")", "+", "\" or '') + '\"", "def", "evaluate", "(", "matchobj", ")", ":", "if", "getattr", "(", "str", ",", "'decode'", ",", "False", ")", ":", "code", "=", "(", "matchobj", ".", "group", "(", "1", ")", ".", "decode", "(", "'string-escape'", ")", ")", ".", "strip", "(", ")", "else", ":", "code", "=", "(", "bytes", "(", "matchobj", ".", "group", "(", "1", ")", ",", "\"utf-8\"", ")", ".", "decode", "(", ")", ")", ".", "strip", "(", ")", "if", "code", ".", "startswith", "(", "\"end\"", ")", ":", "return", "\"')\\n\"", "+", "indent", "(", "-", "1", ")", "+", "\"ns.__p += ('\"", "elif", "code", ".", "endswith", "(", "':'", ")", ":", "return", "\"')\\n\"", "+", "indent", "(", ")", "+", "unescape", "(", "code", ")", "+", "\"\\n\"", "+", "indent", "(", "+", "1", ")", "+", "\"ns.__p += ('\"", "else", ":", "return", "\"')\\n\"", "+", "indent", "(", ")", "+", "unescape", "(", "code", ")", "+", "\"\\n\"", "+", "indent", "(", ")", "+", "\"ns.__p += ('\"", "def", "escape", "(", "matchobj", ")", ":", "if", "getattr", "(", "str", ",", "'decode'", ",", "False", ")", ":", "key", "=", "(", "matchobj", ".", "group", "(", "1", ")", ".", "decode", "(", "'string-escape'", ")", ")", ".", "strip", "(", ")", "else", ":", "key", "=", "(", "bytes", "(", "matchobj", ".", "group", "(", "1", ")", ",", "\"utf-8\"", ")", ".", "decode", "(", ")", ")", ".", "strip", "(", ")", "return", "\"' + _.escape(str(\"", "+", "unescape", "(", "key", ")", "+", "\" or '')) + '\"", "source", "=", "indent", "(", ")", "+", "'class closure(object):\\n pass'", "+", "' # for full closure support\\n'", "source", "+=", "indent", "(", ")", "+", "'ns = closure()\\n'", "source", "+=", "indent", "(", ")", "+", "\"ns.__p = ''\\n\"", "#src = re.sub(\"^[\\'\\\"]|[\\'\\\"]$\", \"\", (\"%r\" % src))", "src", "=", "re", ".", "sub", "(", "settings", ".", "get", "(", "\"escaper\"", ")", ",", "escapes", ",", "src", ")", "source", "+=", "indent", "(", ")", "+", "\"ns.__p += ('\"", "+", "re", ".", "sub", "(", "settings", ".", "get", "(", "'escape'", ")", ",", "escape", ",", "src", ")", "+", "\"')\\n\"", "source", "=", "re", ".", "sub", "(", "settings", ".", "get", "(", "'interpolate'", ")", ",", "interpolate", ",", "source", ")", "source", "=", "re", ".", "sub", "(", "settings", ".", "get", "(", "'evaluate'", ")", ",", "evaluate", ",", "source", ")", "if", "getattr", "(", "str", ",", "'decode'", ",", "False", ")", ":", "source", "+=", "indent", "(", ")", "+", "'return ns.__p.decode(\"string_escape\")\\n'", "else", ":", "source", "+=", "indent", "(", ")", "+", "'return bytes(ns.__p, \"utf-8\").decode()\\n'", "f", "=", "self", ".", "create_function", "(", "settings", ".", "get", "(", "\"variable\"", ")", "or", "\"obj=None\"", ",", "source", ")", "if", "data", "is", "not", "None", ":", "return", "f", "(", "data", ")", "return", "f" ]
Python micro-templating, similar to John Resig's implementation. Underscore templating handles arbitrary delimiters, preserves whitespace, and correctly escapes quotes within interpolated code.
[ "Python", "micro", "-", "templating", "similar", "to", "John", "Resig", "s", "implementation", ".", "Underscore", "templating", "handles", "arbitrary", "delimiters", "preserves", "whitespace", "and", "correctly", "escapes", "quotes", "within", "interpolated", "code", "." ]
python
train
Shapeways/coyote_framework
coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/webdriver/webdriverwrapper/WebDriverWrapper.py#L201-L224
def visit(self, url=''): """ Driver gets the provided url in the browser, returns True if successful url -- An absolute or relative url stored as a string """ def _visit(url): if len(url) > 0 and url[0] == '/': # url's first character is a forward slash; treat as relative path path = url full_url = self.driver.current_url parsed_url = urlparse(full_url) base_url = str(parsed_url.scheme) + '://' + str(parsed_url.netloc) url = urljoin(base_url, path) try: return self.driver.get(url) except TimeoutException: if self.ignore_page_load_timeouts: pass else: raise PageTimeoutException.PageTimeoutException(self, url) return self.execute_and_handle_webdriver_exceptions(lambda: _visit(url))
[ "def", "visit", "(", "self", ",", "url", "=", "''", ")", ":", "def", "_visit", "(", "url", ")", ":", "if", "len", "(", "url", ")", ">", "0", "and", "url", "[", "0", "]", "==", "'/'", ":", "# url's first character is a forward slash; treat as relative path", "path", "=", "url", "full_url", "=", "self", ".", "driver", ".", "current_url", "parsed_url", "=", "urlparse", "(", "full_url", ")", "base_url", "=", "str", "(", "parsed_url", ".", "scheme", ")", "+", "'://'", "+", "str", "(", "parsed_url", ".", "netloc", ")", "url", "=", "urljoin", "(", "base_url", ",", "path", ")", "try", ":", "return", "self", ".", "driver", ".", "get", "(", "url", ")", "except", "TimeoutException", ":", "if", "self", ".", "ignore_page_load_timeouts", ":", "pass", "else", ":", "raise", "PageTimeoutException", ".", "PageTimeoutException", "(", "self", ",", "url", ")", "return", "self", ".", "execute_and_handle_webdriver_exceptions", "(", "lambda", ":", "_visit", "(", "url", ")", ")" ]
Driver gets the provided url in the browser, returns True if successful url -- An absolute or relative url stored as a string
[ "Driver", "gets", "the", "provided", "url", "in", "the", "browser", "returns", "True", "if", "successful" ]
python
train
Opentrons/opentrons
api/src/opentrons/protocol_api/contexts.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/contexts.py#L443-L448
def home(self): """ Homes the robot. """ self._log.debug("home") self._location_cache = None self._hw_manager.hardware.home()
[ "def", "home", "(", "self", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"home\"", ")", "self", ".", "_location_cache", "=", "None", "self", ".", "_hw_manager", ".", "hardware", ".", "home", "(", ")" ]
Homes the robot.
[ "Homes", "the", "robot", "." ]
python
train
pvlib/pvlib-python
pvlib/clearsky.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/clearsky.py#L563-L570
def _calc_d(aod700, p): """Calculate the d coefficient.""" p0 = 101325. dp = 1/(18 + 152*aod700) d = -0.337*aod700**2 + 0.63*aod700 + 0.116 + dp*np.log(p/p0) return d
[ "def", "_calc_d", "(", "aod700", ",", "p", ")", ":", "p0", "=", "101325.", "dp", "=", "1", "/", "(", "18", "+", "152", "*", "aod700", ")", "d", "=", "-", "0.337", "*", "aod700", "**", "2", "+", "0.63", "*", "aod700", "+", "0.116", "+", "dp", "*", "np", ".", "log", "(", "p", "/", "p0", ")", "return", "d" ]
Calculate the d coefficient.
[ "Calculate", "the", "d", "coefficient", "." ]
python
train
bmuller/kademlia
kademlia/network.py
https://github.com/bmuller/kademlia/blob/4a8d445c9ee8f3ca10f56107e4445daed4933c8a/kademlia/network.py#L195-L211
def save_state(self, fname): """ Save the state of this node (the alpha/ksize/id/immediate neighbors) to a cache file with the given fname. """ log.info("Saving state to %s", fname) data = { 'ksize': self.ksize, 'alpha': self.alpha, 'id': self.node.id, 'neighbors': self.bootstrappable_neighbors() } if not data['neighbors']: log.warning("No known neighbors, so not writing to cache.") return with open(fname, 'wb') as file: pickle.dump(data, file)
[ "def", "save_state", "(", "self", ",", "fname", ")", ":", "log", ".", "info", "(", "\"Saving state to %s\"", ",", "fname", ")", "data", "=", "{", "'ksize'", ":", "self", ".", "ksize", ",", "'alpha'", ":", "self", ".", "alpha", ",", "'id'", ":", "self", ".", "node", ".", "id", ",", "'neighbors'", ":", "self", ".", "bootstrappable_neighbors", "(", ")", "}", "if", "not", "data", "[", "'neighbors'", "]", ":", "log", ".", "warning", "(", "\"No known neighbors, so not writing to cache.\"", ")", "return", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "file", ":", "pickle", ".", "dump", "(", "data", ",", "file", ")" ]
Save the state of this node (the alpha/ksize/id/immediate neighbors) to a cache file with the given fname.
[ "Save", "the", "state", "of", "this", "node", "(", "the", "alpha", "/", "ksize", "/", "id", "/", "immediate", "neighbors", ")", "to", "a", "cache", "file", "with", "the", "given", "fname", "." ]
python
train
bslatkin/dpxdt
dpxdt/server/auth.py
https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L345-L359
def superuser_api_key_required(f): """Decorator ensures only superuser API keys can request this function.""" @functools.wraps(f) def wrapped(*args, **kwargs): api_key = current_api_key() g.api_key = api_key utils.jsonify_assert( api_key.superuser, 'API key=%r must be a super user' % api_key.id, 403) return f(*args, **kwargs) return wrapped
[ "def", "superuser_api_key_required", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "api_key", "=", "current_api_key", "(", ")", "g", ".", "api_key", "=", "api_key", "utils", ".", "jsonify_assert", "(", "api_key", ".", "superuser", ",", "'API key=%r must be a super user'", "%", "api_key", ".", "id", ",", "403", ")", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped" ]
Decorator ensures only superuser API keys can request this function.
[ "Decorator", "ensures", "only", "superuser", "API", "keys", "can", "request", "this", "function", "." ]
python
train
zhanglab/psamm
psamm/importers/sbml.py
https://github.com/zhanglab/psamm/blob/dc427848c4f9d109ca590f0afa024c63b685b3f4/psamm/importers/sbml.py#L44-L54
def _resolve_source(self, source): """Resolve source to filepath if it is a directory.""" if os.path.isdir(source): sources = glob.glob(os.path.join(source, '*.sbml')) if len(sources) == 0: raise ModelLoadError('No .sbml file found in source directory') elif len(sources) > 1: raise ModelLoadError( 'More than one .sbml file found in source directory') return sources[0] return source
[ "def", "_resolve_source", "(", "self", ",", "source", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", ":", "sources", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "source", ",", "'*.sbml'", ")", ")", "if", "len", "(", "sources", ")", "==", "0", ":", "raise", "ModelLoadError", "(", "'No .sbml file found in source directory'", ")", "elif", "len", "(", "sources", ")", ">", "1", ":", "raise", "ModelLoadError", "(", "'More than one .sbml file found in source directory'", ")", "return", "sources", "[", "0", "]", "return", "source" ]
Resolve source to filepath if it is a directory.
[ "Resolve", "source", "to", "filepath", "if", "it", "is", "a", "directory", "." ]
python
train
archman/beamline
beamline/element.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/element.py#L323-L338
def _printCtrlConf(self): """ get PV value and print out """ if self.ctrlinfo: print("Control configs:") for k, v in sorted(self.ctrlinfo.items(), reverse=True): pv = v['pv'] rval = epics.caget(pv) if rval is None: val = '' else: val = self.unitTrans(rval, direction='+') print(" {k:6s} = {pv:6s}, raw: {rval:>6s}, real: {val:>6s}".format(k=str(k), pv=str(pv), rval=str(rval), val=str(val)))
[ "def", "_printCtrlConf", "(", "self", ")", ":", "if", "self", ".", "ctrlinfo", ":", "print", "(", "\"Control configs:\"", ")", "for", "k", ",", "v", "in", "sorted", "(", "self", ".", "ctrlinfo", ".", "items", "(", ")", ",", "reverse", "=", "True", ")", ":", "pv", "=", "v", "[", "'pv'", "]", "rval", "=", "epics", ".", "caget", "(", "pv", ")", "if", "rval", "is", "None", ":", "val", "=", "''", "else", ":", "val", "=", "self", ".", "unitTrans", "(", "rval", ",", "direction", "=", "'+'", ")", "print", "(", "\" {k:6s} = {pv:6s}, raw: {rval:>6s}, real: {val:>6s}\"", ".", "format", "(", "k", "=", "str", "(", "k", ")", ",", "pv", "=", "str", "(", "pv", ")", ",", "rval", "=", "str", "(", "rval", ")", ",", "val", "=", "str", "(", "val", ")", ")", ")" ]
get PV value and print out
[ "get", "PV", "value", "and", "print", "out" ]
python
train
mayfield/shellish
shellish/layout/table.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/layout/table.py#L472-L478
def render_filter(self, next_filter): """ Produce formatted output from the raw data stream. """ next(next_filter) while True: data = (yield) res = [self.cell_format(access(data)) for access in self.accessors] next_filter.send(res)
[ "def", "render_filter", "(", "self", ",", "next_filter", ")", ":", "next", "(", "next_filter", ")", "while", "True", ":", "data", "=", "(", "yield", ")", "res", "=", "[", "self", ".", "cell_format", "(", "access", "(", "data", ")", ")", "for", "access", "in", "self", ".", "accessors", "]", "next_filter", ".", "send", "(", "res", ")" ]
Produce formatted output from the raw data stream.
[ "Produce", "formatted", "output", "from", "the", "raw", "data", "stream", "." ]
python
train
tradenity/python-sdk
tradenity/resources/option_value.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/option_value.py#L644-L665
def replace_option_value_by_id(cls, option_value_id, option_value, **kwargs): """Replace OptionValue Replace all attributes of OptionValue This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_option_value_by_id(option_value_id, option_value, async=True) >>> result = thread.get() :param async bool :param str option_value_id: ID of optionValue to replace (required) :param OptionValue option_value: Attributes of optionValue to replace (required) :return: OptionValue If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_option_value_by_id_with_http_info(option_value_id, option_value, **kwargs) else: (data) = cls._replace_option_value_by_id_with_http_info(option_value_id, option_value, **kwargs) return data
[ "def", "replace_option_value_by_id", "(", "cls", ",", "option_value_id", ",", "option_value", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_replace_option_value_by_id_with_http_info", "(", "option_value_id", ",", "option_value", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_replace_option_value_by_id_with_http_info", "(", "option_value_id", ",", "option_value", ",", "*", "*", "kwargs", ")", "return", "data" ]
Replace OptionValue Replace all attributes of OptionValue This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_option_value_by_id(option_value_id, option_value, async=True) >>> result = thread.get() :param async bool :param str option_value_id: ID of optionValue to replace (required) :param OptionValue option_value: Attributes of optionValue to replace (required) :return: OptionValue If the method is called asynchronously, returns the request thread.
[ "Replace", "OptionValue" ]
python
train
vladcalin/gemstone
gemstone/core/handlers.py
https://github.com/vladcalin/gemstone/blob/325a49d17621b9d45ffd2b5eca6f0de284de8ba4/gemstone/core/handlers.py#L119-L204
def handle_single_request(self, request_object): """ Handles a single request object and returns the correct result as follows: - A valid response object if it is a regular request (with ID) - ``None`` if it was a notification (if None is returned, a response object with "received" body was already sent to the client. :param request_object: A :py:class:`gemstone.core.structs.JsonRpcRequest` object representing a Request object :return: A :py:class:`gemstone.core.structs.JsonRpcResponse` object representing a Response object or None if no response is expected (it was a notification) """ # don't handle responses? if isinstance(request_object, JsonRpcResponse): return request_object error = None result = None id_ = request_object.id # validate method name if request_object.method not in self.methods: resp = GenericResponse.METHOD_NOT_FOUND resp.id = id_ return resp # check for private access method = self.methods[request_object.method] if isinstance(request_object.params, (list, tuple)): self.call_method_from_all_plugins("on_method_call", request_object) else: self.call_method_from_all_plugins("on_method_call", request_object) if self._method_is_private(method): if not self.get_current_user(): resp = GenericResponse.ACCESS_DENIED resp.id = id_ return resp method = self.prepare_method_call(method, request_object.params) # before request hook _method_duration = time.time() try: result = yield self.call_method(method) except Exception as e: # catch all exceptions generated by method # and handle in a special manner only the TypeError if isinstance(e, TypeError): # TODO: find a proper way to check that the function got the wrong # parameters (with **kwargs) if "got an unexpected keyword argument" in e.args[0]: resp = GenericResponse.INVALID_PARAMS resp.id = id_ return resp # TODO: find a proper way to check that the function got the wrong # parameters (with *args) elif "takes" in e.args[0] and "positional argument" in e.args[0] and "were given" in \ e.args[0]: resp = GenericResponse.INVALID_PARAMS resp.id = id_ return resp elif "missing" in e.args[0] and "required positional argument" in e.args[0]: resp = GenericResponse.INVALID_PARAMS resp.id = id_ return resp # generic handling for any exception (even TypeError) that # is not generated because of bad parameters self.call_method_from_all_plugins("on_internal_error", e) err = GenericResponse.INTERNAL_ERROR err.id = id_ err.error["data"] = { "class": type(e).__name__, "info": str(e) } return err to_return_resp = JsonRpcResponse(result=result, error=error, id=id_) return to_return_resp
[ "def", "handle_single_request", "(", "self", ",", "request_object", ")", ":", "# don't handle responses?", "if", "isinstance", "(", "request_object", ",", "JsonRpcResponse", ")", ":", "return", "request_object", "error", "=", "None", "result", "=", "None", "id_", "=", "request_object", ".", "id", "# validate method name", "if", "request_object", ".", "method", "not", "in", "self", ".", "methods", ":", "resp", "=", "GenericResponse", ".", "METHOD_NOT_FOUND", "resp", ".", "id", "=", "id_", "return", "resp", "# check for private access", "method", "=", "self", ".", "methods", "[", "request_object", ".", "method", "]", "if", "isinstance", "(", "request_object", ".", "params", ",", "(", "list", ",", "tuple", ")", ")", ":", "self", ".", "call_method_from_all_plugins", "(", "\"on_method_call\"", ",", "request_object", ")", "else", ":", "self", ".", "call_method_from_all_plugins", "(", "\"on_method_call\"", ",", "request_object", ")", "if", "self", ".", "_method_is_private", "(", "method", ")", ":", "if", "not", "self", ".", "get_current_user", "(", ")", ":", "resp", "=", "GenericResponse", ".", "ACCESS_DENIED", "resp", ".", "id", "=", "id_", "return", "resp", "method", "=", "self", ".", "prepare_method_call", "(", "method", ",", "request_object", ".", "params", ")", "# before request hook", "_method_duration", "=", "time", ".", "time", "(", ")", "try", ":", "result", "=", "yield", "self", ".", "call_method", "(", "method", ")", "except", "Exception", "as", "e", ":", "# catch all exceptions generated by method", "# and handle in a special manner only the TypeError", "if", "isinstance", "(", "e", ",", "TypeError", ")", ":", "# TODO: find a proper way to check that the function got the wrong", "# parameters (with **kwargs)", "if", "\"got an unexpected keyword argument\"", "in", "e", ".", "args", "[", "0", "]", ":", "resp", "=", "GenericResponse", ".", "INVALID_PARAMS", "resp", ".", "id", "=", "id_", "return", "resp", "# TODO: find a proper way to check that the function got the wrong", "# parameters (with *args)", "elif", "\"takes\"", "in", "e", ".", "args", "[", "0", "]", "and", "\"positional argument\"", "in", "e", ".", "args", "[", "0", "]", "and", "\"were given\"", "in", "e", ".", "args", "[", "0", "]", ":", "resp", "=", "GenericResponse", ".", "INVALID_PARAMS", "resp", ".", "id", "=", "id_", "return", "resp", "elif", "\"missing\"", "in", "e", ".", "args", "[", "0", "]", "and", "\"required positional argument\"", "in", "e", ".", "args", "[", "0", "]", ":", "resp", "=", "GenericResponse", ".", "INVALID_PARAMS", "resp", ".", "id", "=", "id_", "return", "resp", "# generic handling for any exception (even TypeError) that", "# is not generated because of bad parameters", "self", ".", "call_method_from_all_plugins", "(", "\"on_internal_error\"", ",", "e", ")", "err", "=", "GenericResponse", ".", "INTERNAL_ERROR", "err", ".", "id", "=", "id_", "err", ".", "error", "[", "\"data\"", "]", "=", "{", "\"class\"", ":", "type", "(", "e", ")", ".", "__name__", ",", "\"info\"", ":", "str", "(", "e", ")", "}", "return", "err", "to_return_resp", "=", "JsonRpcResponse", "(", "result", "=", "result", ",", "error", "=", "error", ",", "id", "=", "id_", ")", "return", "to_return_resp" ]
Handles a single request object and returns the correct result as follows: - A valid response object if it is a regular request (with ID) - ``None`` if it was a notification (if None is returned, a response object with "received" body was already sent to the client. :param request_object: A :py:class:`gemstone.core.structs.JsonRpcRequest` object representing a Request object :return: A :py:class:`gemstone.core.structs.JsonRpcResponse` object representing a Response object or None if no response is expected (it was a notification)
[ "Handles", "a", "single", "request", "object", "and", "returns", "the", "correct", "result", "as", "follows", ":" ]
python
train
pandas-dev/pandas
pandas/core/indexes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L1123-L1148
def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values. """ from pandas import Series if index is None: index = self._shallow_copy() if name is None: name = self.name return Series(self.values.copy(), index=index, name=name)
[ "def", "to_series", "(", "self", ",", "index", "=", "None", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "Series", "if", "index", "is", "None", ":", "index", "=", "self", ".", "_shallow_copy", "(", ")", "if", "name", "is", "None", ":", "name", "=", "self", ".", "name", "return", "Series", "(", "self", ".", "values", ".", "copy", "(", ")", ",", "index", "=", "index", ",", "name", "=", "name", ")" ]
Create a Series with both index and values equal to the index keys useful with map for returning an indexer based on an index. Parameters ---------- index : Index, optional index of resulting Series. If None, defaults to original index name : string, optional name of resulting Series. If None, defaults to name of original index Returns ------- Series : dtype will be based on the type of the Index values.
[ "Create", "a", "Series", "with", "both", "index", "and", "values", "equal", "to", "the", "index", "keys", "useful", "with", "map", "for", "returning", "an", "indexer", "based", "on", "an", "index", "." ]
python
train
barrust/mediawiki
mediawiki/utilities.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/utilities.py#L86-L93
def is_relative_url(url): """ simple method to determine if a url is relative or absolute """ if url.startswith("#"): return None if url.find("://") > 0 or url.startswith("//"): # either 'http(s)://...' or '//cdn...' and therefore absolute return False return True
[ "def", "is_relative_url", "(", "url", ")", ":", "if", "url", ".", "startswith", "(", "\"#\"", ")", ":", "return", "None", "if", "url", ".", "find", "(", "\"://\"", ")", ">", "0", "or", "url", ".", "startswith", "(", "\"//\"", ")", ":", "# either 'http(s)://...' or '//cdn...' and therefore absolute", "return", "False", "return", "True" ]
simple method to determine if a url is relative or absolute
[ "simple", "method", "to", "determine", "if", "a", "url", "is", "relative", "or", "absolute" ]
python
train
saltstack/salt
salt/modules/status.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/status.py#L268-L293
def loadavg(): ''' Return the load averages for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.loadavg :raises CommandExecutionError: If the system cannot report loadaverages to Python ''' if __grains__['kernel'] == 'AIX': return _aix_loadavg() try: load_avg = os.getloadavg() except AttributeError: # Some UNIX-based operating systems do not have os.getloadavg() raise salt.exceptions.CommandExecutionError('status.loadavag is not available on your platform') return {'1-min': load_avg[0], '5-min': load_avg[1], '15-min': load_avg[2]}
[ "def", "loadavg", "(", ")", ":", "if", "__grains__", "[", "'kernel'", "]", "==", "'AIX'", ":", "return", "_aix_loadavg", "(", ")", "try", ":", "load_avg", "=", "os", ".", "getloadavg", "(", ")", "except", "AttributeError", ":", "# Some UNIX-based operating systems do not have os.getloadavg()", "raise", "salt", ".", "exceptions", ".", "CommandExecutionError", "(", "'status.loadavag is not available on your platform'", ")", "return", "{", "'1-min'", ":", "load_avg", "[", "0", "]", ",", "'5-min'", ":", "load_avg", "[", "1", "]", ",", "'15-min'", ":", "load_avg", "[", "2", "]", "}" ]
Return the load averages for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.loadavg :raises CommandExecutionError: If the system cannot report loadaverages to Python
[ "Return", "the", "load", "averages", "for", "this", "minion" ]
python
train
twisted/epsilon
epsilon/amprouter.py
https://github.com/twisted/epsilon/blob/e85fa985a41983ef06e1d3bb26639181e1f78b24/epsilon/amprouter.py#L171-L183
def startReceivingBoxes(self, sender): """ Initialize route tracking objects. """ self._sender = sender for routeName, route in self._unstarted.iteritems(): # Any route which has been bound but which does not yet have a # remote route name should not yet be started. These will be # started in Route.connectTo. if route.remoteRouteName is not _unspecified: route.start() self._routes = self._unstarted self._unstarted = None
[ "def", "startReceivingBoxes", "(", "self", ",", "sender", ")", ":", "self", ".", "_sender", "=", "sender", "for", "routeName", ",", "route", "in", "self", ".", "_unstarted", ".", "iteritems", "(", ")", ":", "# Any route which has been bound but which does not yet have a", "# remote route name should not yet be started. These will be", "# started in Route.connectTo.", "if", "route", ".", "remoteRouteName", "is", "not", "_unspecified", ":", "route", ".", "start", "(", ")", "self", ".", "_routes", "=", "self", ".", "_unstarted", "self", ".", "_unstarted", "=", "None" ]
Initialize route tracking objects.
[ "Initialize", "route", "tracking", "objects", "." ]
python
train
mitsei/dlkit
dlkit/json_/learning/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/objects.py#L343-L353
def get_cognitive_process_metadata(self): """Gets the metadata for a cognitive process. return: (osid.Metadata) - metadata for the cognitive process *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template metadata = dict(self._mdata['cognitive_process']) metadata.update({'existing_id_values': self._my_map['cognitiveProcessId']}) return Metadata(**metadata)
[ "def", "get_cognitive_process_metadata", "(", "self", ")", ":", "# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template", "metadata", "=", "dict", "(", "self", ".", "_mdata", "[", "'cognitive_process'", "]", ")", "metadata", ".", "update", "(", "{", "'existing_id_values'", ":", "self", ".", "_my_map", "[", "'cognitiveProcessId'", "]", "}", ")", "return", "Metadata", "(", "*", "*", "metadata", ")" ]
Gets the metadata for a cognitive process. return: (osid.Metadata) - metadata for the cognitive process *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "metadata", "for", "a", "cognitive", "process", "." ]
python
train
michaeljohnbarr/django-timezone-utils
timezone_utils/fields.py
https://github.com/michaeljohnbarr/django-timezone-utils/blob/61c8b50c59049cb7eccd4e3892f332f88b890f00/timezone_utils/fields.py#L77-L85
def get_prep_value(self, value): """Converts timezone instances to strings for db storage.""" # pylint: disable=newstyle value = super(TimeZoneField, self).get_prep_value(value) if isinstance(value, tzinfo): return value.zone return value
[ "def", "get_prep_value", "(", "self", ",", "value", ")", ":", "# pylint: disable=newstyle", "value", "=", "super", "(", "TimeZoneField", ",", "self", ")", ".", "get_prep_value", "(", "value", ")", "if", "isinstance", "(", "value", ",", "tzinfo", ")", ":", "return", "value", ".", "zone", "return", "value" ]
Converts timezone instances to strings for db storage.
[ "Converts", "timezone", "instances", "to", "strings", "for", "db", "storage", "." ]
python
train
bkg/greenwich
greenwich/raster.py
https://github.com/bkg/greenwich/blob/57ec644dadfe43ce0ecf2cfd32a2de71e0c8c141/greenwich/raster.py#L634-L637
def shape(self): """Returns a tuple of row, column, (band count if multidimensional).""" shp = (self.ds.RasterYSize, self.ds.RasterXSize, self.ds.RasterCount) return shp[:2] if shp[2] <= 1 else shp
[ "def", "shape", "(", "self", ")", ":", "shp", "=", "(", "self", ".", "ds", ".", "RasterYSize", ",", "self", ".", "ds", ".", "RasterXSize", ",", "self", ".", "ds", ".", "RasterCount", ")", "return", "shp", "[", ":", "2", "]", "if", "shp", "[", "2", "]", "<=", "1", "else", "shp" ]
Returns a tuple of row, column, (band count if multidimensional).
[ "Returns", "a", "tuple", "of", "row", "column", "(", "band", "count", "if", "multidimensional", ")", "." ]
python
test
Autodesk/aomi
aomi/helpers.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/helpers.py#L56-L68
def is_tagged(required_tags, has_tags): """Checks if tags match""" if not required_tags and not has_tags: return True elif not required_tags: return False found_tags = [] for tag in required_tags: if tag in has_tags: found_tags.append(tag) return len(found_tags) == len(required_tags)
[ "def", "is_tagged", "(", "required_tags", ",", "has_tags", ")", ":", "if", "not", "required_tags", "and", "not", "has_tags", ":", "return", "True", "elif", "not", "required_tags", ":", "return", "False", "found_tags", "=", "[", "]", "for", "tag", "in", "required_tags", ":", "if", "tag", "in", "has_tags", ":", "found_tags", ".", "append", "(", "tag", ")", "return", "len", "(", "found_tags", ")", "==", "len", "(", "required_tags", ")" ]
Checks if tags match
[ "Checks", "if", "tags", "match" ]
python
train
aiortc/aiortc
aiortc/rtcsctptransport.py
https://github.com/aiortc/aiortc/blob/60ed036abf4575bd63985724b4493d569e6da29b/aiortc/rtcsctptransport.py#L1485-L1493
def _data_channel_close(self, channel, transmit=True): """ Request closing the datachannel by sending an Outgoing Stream Reset Request. """ if channel.readyState not in ['closing', 'closed']: channel._setReadyState('closing') self._reconfig_queue.append(channel.id) if len(self._reconfig_queue) == 1: asyncio.ensure_future(self._transmit_reconfig())
[ "def", "_data_channel_close", "(", "self", ",", "channel", ",", "transmit", "=", "True", ")", ":", "if", "channel", ".", "readyState", "not", "in", "[", "'closing'", ",", "'closed'", "]", ":", "channel", ".", "_setReadyState", "(", "'closing'", ")", "self", ".", "_reconfig_queue", ".", "append", "(", "channel", ".", "id", ")", "if", "len", "(", "self", ".", "_reconfig_queue", ")", "==", "1", ":", "asyncio", ".", "ensure_future", "(", "self", ".", "_transmit_reconfig", "(", ")", ")" ]
Request closing the datachannel by sending an Outgoing Stream Reset Request.
[ "Request", "closing", "the", "datachannel", "by", "sending", "an", "Outgoing", "Stream", "Reset", "Request", "." ]
python
train
marshmallow-code/apispec
src/apispec/ext/marshmallow/openapi.py
https://github.com/marshmallow-code/apispec/blob/e92ceffd12b2e392b8d199ed314bd2a7e6512dff/src/apispec/ext/marshmallow/openapi.py#L696-L706
def get_ref_dict(self, schema): """Method to create a dictionary containing a JSON reference to the schema in the spec """ schema_key = make_schema_key(schema) ref_schema = build_reference( "schema", self.openapi_version.major, self.refs[schema_key] ) if getattr(schema, "many", False): return {"type": "array", "items": ref_schema} return ref_schema
[ "def", "get_ref_dict", "(", "self", ",", "schema", ")", ":", "schema_key", "=", "make_schema_key", "(", "schema", ")", "ref_schema", "=", "build_reference", "(", "\"schema\"", ",", "self", ".", "openapi_version", ".", "major", ",", "self", ".", "refs", "[", "schema_key", "]", ")", "if", "getattr", "(", "schema", ",", "\"many\"", ",", "False", ")", ":", "return", "{", "\"type\"", ":", "\"array\"", ",", "\"items\"", ":", "ref_schema", "}", "return", "ref_schema" ]
Method to create a dictionary containing a JSON reference to the schema in the spec
[ "Method", "to", "create", "a", "dictionary", "containing", "a", "JSON", "reference", "to", "the", "schema", "in", "the", "spec" ]
python
train
Yelp/kafka-utils
kafka_utils/kafka_corruption_check/main.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_corruption_check/main.py#L206-L226
def check_corrupted_files_cmd(java_home, files): """Check the file corruption of the specified files. :param java_home: the JAVA_HOME :type java_home: string :param files: list of files to be checked :type files: list of string """ files_str = ",".join(files) check_command = CHECK_COMMAND.format( ionice=IONICE, java_home=java_home, files=files_str, ) # One line per message can generate several MB/s of data # Use pre-filtering on the server side to reduce it command = "{check_command} | {reduce_output}".format( check_command=check_command, reduce_output=REDUCE_OUTPUT, ) return command
[ "def", "check_corrupted_files_cmd", "(", "java_home", ",", "files", ")", ":", "files_str", "=", "\",\"", ".", "join", "(", "files", ")", "check_command", "=", "CHECK_COMMAND", ".", "format", "(", "ionice", "=", "IONICE", ",", "java_home", "=", "java_home", ",", "files", "=", "files_str", ",", ")", "# One line per message can generate several MB/s of data", "# Use pre-filtering on the server side to reduce it", "command", "=", "\"{check_command} | {reduce_output}\"", ".", "format", "(", "check_command", "=", "check_command", ",", "reduce_output", "=", "REDUCE_OUTPUT", ",", ")", "return", "command" ]
Check the file corruption of the specified files. :param java_home: the JAVA_HOME :type java_home: string :param files: list of files to be checked :type files: list of string
[ "Check", "the", "file", "corruption", "of", "the", "specified", "files", "." ]
python
train
TissueMAPS/TmClient
src/python/tmclient/auth.py
https://github.com/TissueMAPS/TmClient/blob/6fb40622af19142cb5169a64b8c2965993a25ab1/src/python/tmclient/auth.py#L23-L69
def load_credentials_from_file(username): '''Loads password for `username` from a file. The file must be called ``.tm_pass`` and stored in the home directory. It must provide a YAML mapping where keys are usernames and values the corresponding passwords. Parameters ---------- username: str name of the TissueMAPS user Returns ------- str password for the given user Raises ------ OSError when the file does not exist SyntaxError when the file content cannot be parsed KeyError when the file does not contains a password for `username` Warning ------- This is not safe! Passwords are stored in plain text. ''' filename = os.path.expandvars(os.path.join('$HOME', '.tm_pass')) try: with open(filename) as f: credentials = yaml.load(f.read()) except OSError as err: raise OSError( 'No credentials file:\n{0}'.format(filename) ) except Exception as err: raise SyntaxError( 'Could not be read credentials from file:\n{0}'.format(str(err)) ) if username not in credentials: raise KeyError( 'No credentials provided for user "{0}"'.format(username) ) return credentials[username]
[ "def", "load_credentials_from_file", "(", "username", ")", ":", "filename", "=", "os", ".", "path", ".", "expandvars", "(", "os", ".", "path", ".", "join", "(", "'$HOME'", ",", "'.tm_pass'", ")", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "credentials", "=", "yaml", ".", "load", "(", "f", ".", "read", "(", ")", ")", "except", "OSError", "as", "err", ":", "raise", "OSError", "(", "'No credentials file:\\n{0}'", ".", "format", "(", "filename", ")", ")", "except", "Exception", "as", "err", ":", "raise", "SyntaxError", "(", "'Could not be read credentials from file:\\n{0}'", ".", "format", "(", "str", "(", "err", ")", ")", ")", "if", "username", "not", "in", "credentials", ":", "raise", "KeyError", "(", "'No credentials provided for user \"{0}\"'", ".", "format", "(", "username", ")", ")", "return", "credentials", "[", "username", "]" ]
Loads password for `username` from a file. The file must be called ``.tm_pass`` and stored in the home directory. It must provide a YAML mapping where keys are usernames and values the corresponding passwords. Parameters ---------- username: str name of the TissueMAPS user Returns ------- str password for the given user Raises ------ OSError when the file does not exist SyntaxError when the file content cannot be parsed KeyError when the file does not contains a password for `username` Warning ------- This is not safe! Passwords are stored in plain text.
[ "Loads", "password", "for", "username", "from", "a", "file", "." ]
python
train
sorgerlab/indra
indra/assemblers/english/assembler.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/english/assembler.py#L306-L314
def _assemble_translocation(stmt): """Assemble Translocation statements into text.""" agent_str = _assemble_agent_str(stmt.agent) stmt_str = agent_str + ' translocates' if stmt.from_location is not None: stmt_str += ' from the ' + stmt.from_location if stmt.to_location is not None: stmt_str += ' to the ' + stmt.to_location return _make_sentence(stmt_str)
[ "def", "_assemble_translocation", "(", "stmt", ")", ":", "agent_str", "=", "_assemble_agent_str", "(", "stmt", ".", "agent", ")", "stmt_str", "=", "agent_str", "+", "' translocates'", "if", "stmt", ".", "from_location", "is", "not", "None", ":", "stmt_str", "+=", "' from the '", "+", "stmt", ".", "from_location", "if", "stmt", ".", "to_location", "is", "not", "None", ":", "stmt_str", "+=", "' to the '", "+", "stmt", ".", "to_location", "return", "_make_sentence", "(", "stmt_str", ")" ]
Assemble Translocation statements into text.
[ "Assemble", "Translocation", "statements", "into", "text", "." ]
python
train
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L770-L801
def is_affected(hazard, classification, hazard_class, qsettings=None): """Get affected flag for hazard in classification in hazard class. :param hazard: The hazard key. :type hazard: basestring :param classification: The classification key. :type classification: basestring :param hazard_class: The hazard class key. :type hazard_class: basestring :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: True if it's affected, else False. Default to False. :rtype: bool """ preference_data = setting( 'population_preference', default=generate_default_profile(), qsettings=qsettings) # Use default from the default profile default_profile = generate_default_profile() default_affected_value = default_profile.get(hazard, {}).get( classification, {}).get(hazard_class, {}).get( 'affected', not_exposed_class['key']) # noinspection PyUnresolvedReferences return preference_data.get(hazard, {}).get(classification, {}).get( hazard_class, {}).get('affected', default_affected_value)
[ "def", "is_affected", "(", "hazard", ",", "classification", ",", "hazard_class", ",", "qsettings", "=", "None", ")", ":", "preference_data", "=", "setting", "(", "'population_preference'", ",", "default", "=", "generate_default_profile", "(", ")", ",", "qsettings", "=", "qsettings", ")", "# Use default from the default profile", "default_profile", "=", "generate_default_profile", "(", ")", "default_affected_value", "=", "default_profile", ".", "get", "(", "hazard", ",", "{", "}", ")", ".", "get", "(", "classification", ",", "{", "}", ")", ".", "get", "(", "hazard_class", ",", "{", "}", ")", ".", "get", "(", "'affected'", ",", "not_exposed_class", "[", "'key'", "]", ")", "# noinspection PyUnresolvedReferences", "return", "preference_data", ".", "get", "(", "hazard", ",", "{", "}", ")", ".", "get", "(", "classification", ",", "{", "}", ")", ".", "get", "(", "hazard_class", ",", "{", "}", ")", ".", "get", "(", "'affected'", ",", "default_affected_value", ")" ]
Get affected flag for hazard in classification in hazard class. :param hazard: The hazard key. :type hazard: basestring :param classification: The classification key. :type classification: basestring :param hazard_class: The hazard class key. :type hazard_class: basestring :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: True if it's affected, else False. Default to False. :rtype: bool
[ "Get", "affected", "flag", "for", "hazard", "in", "classification", "in", "hazard", "class", "." ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsAggShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsAggShockModel.py#L1343-L1362
def update(self): ''' Use primitive parameters (and perfect foresight calibrations) to make interest factor and wage rate functions (of capital to labor ratio), as well as discrete approximations to the aggregate shock distributions. Parameters ---------- None Returns ------- None ''' CobbDouglasEconomy.update(self) StateCount = self.MrkvArray.shape[0] AFunc_all = [] for i in range(StateCount): AFunc_all.append(AggregateSavingRule(self.intercept_prev[i],self.slope_prev[i])) self.AFunc = AFunc_all
[ "def", "update", "(", "self", ")", ":", "CobbDouglasEconomy", ".", "update", "(", "self", ")", "StateCount", "=", "self", ".", "MrkvArray", ".", "shape", "[", "0", "]", "AFunc_all", "=", "[", "]", "for", "i", "in", "range", "(", "StateCount", ")", ":", "AFunc_all", ".", "append", "(", "AggregateSavingRule", "(", "self", ".", "intercept_prev", "[", "i", "]", ",", "self", ".", "slope_prev", "[", "i", "]", ")", ")", "self", ".", "AFunc", "=", "AFunc_all" ]
Use primitive parameters (and perfect foresight calibrations) to make interest factor and wage rate functions (of capital to labor ratio), as well as discrete approximations to the aggregate shock distributions. Parameters ---------- None Returns ------- None
[ "Use", "primitive", "parameters", "(", "and", "perfect", "foresight", "calibrations", ")", "to", "make", "interest", "factor", "and", "wage", "rate", "functions", "(", "of", "capital", "to", "labor", "ratio", ")", "as", "well", "as", "discrete", "approximations", "to", "the", "aggregate", "shock", "distributions", "." ]
python
train
tensorflow/hub
examples/image_retraining/retrain.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/examples/image_retraining/retrain.py#L317-L340
def run_bottleneck_on_image(sess, image_data, image_data_tensor, decoded_image_tensor, resized_input_tensor, bottleneck_tensor): """Runs inference on an image to extract the 'bottleneck' summary layer. Args: sess: Current active TensorFlow Session. image_data: String of raw JPEG data. image_data_tensor: Input data layer in the graph. decoded_image_tensor: Output of initial image resizing and preprocessing. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: Layer before the final softmax. Returns: Numpy array of bottleneck values. """ # First decode the JPEG image, resize it, and rescale the pixel values. resized_input_values = sess.run(decoded_image_tensor, {image_data_tensor: image_data}) # Then run it through the recognition network. bottleneck_values = sess.run(bottleneck_tensor, {resized_input_tensor: resized_input_values}) bottleneck_values = np.squeeze(bottleneck_values) return bottleneck_values
[ "def", "run_bottleneck_on_image", "(", "sess", ",", "image_data", ",", "image_data_tensor", ",", "decoded_image_tensor", ",", "resized_input_tensor", ",", "bottleneck_tensor", ")", ":", "# First decode the JPEG image, resize it, and rescale the pixel values.", "resized_input_values", "=", "sess", ".", "run", "(", "decoded_image_tensor", ",", "{", "image_data_tensor", ":", "image_data", "}", ")", "# Then run it through the recognition network.", "bottleneck_values", "=", "sess", ".", "run", "(", "bottleneck_tensor", ",", "{", "resized_input_tensor", ":", "resized_input_values", "}", ")", "bottleneck_values", "=", "np", ".", "squeeze", "(", "bottleneck_values", ")", "return", "bottleneck_values" ]
Runs inference on an image to extract the 'bottleneck' summary layer. Args: sess: Current active TensorFlow Session. image_data: String of raw JPEG data. image_data_tensor: Input data layer in the graph. decoded_image_tensor: Output of initial image resizing and preprocessing. resized_input_tensor: The input node of the recognition graph. bottleneck_tensor: Layer before the final softmax. Returns: Numpy array of bottleneck values.
[ "Runs", "inference", "on", "an", "image", "to", "extract", "the", "bottleneck", "summary", "layer", "." ]
python
train
pywbem/pywbem
pywbem/tupleparse.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L1888-L1914
def parse_returnvalue(self, tup_tree): """ Parse the RETURNVALUE element. Returns name, attributes, and one child as a tuple. :: <!ELEMENT RETURNVALUE (VALUE | VALUE.REFERENCE)?> <!ATTLIST RETURNVALUE %EmbeddedObject; %ParamType; #IMPLIED> """ # Version 2.1.1 of the DTD lacks the %ParamType attribute but it # is present in version 2.2. Make it optional to be backwards # compatible. self.check_node(tup_tree, 'RETURNVALUE', (), ('PARAMTYPE', 'EmbeddedObject', 'EMBEDDEDOBJECT')) child = self.optional_child(tup_tree, ('VALUE', 'VALUE.REFERENCE')) attrl = attrs(tup_tree) if 'EmbeddedObject' in attrl or 'EMBEDDEDOBJECT' in attrl: child = self.parse_embeddedObject(child) return name(tup_tree), attrl, child
[ "def", "parse_returnvalue", "(", "self", ",", "tup_tree", ")", ":", "# Version 2.1.1 of the DTD lacks the %ParamType attribute but it", "# is present in version 2.2. Make it optional to be backwards", "# compatible.", "self", ".", "check_node", "(", "tup_tree", ",", "'RETURNVALUE'", ",", "(", ")", ",", "(", "'PARAMTYPE'", ",", "'EmbeddedObject'", ",", "'EMBEDDEDOBJECT'", ")", ")", "child", "=", "self", ".", "optional_child", "(", "tup_tree", ",", "(", "'VALUE'", ",", "'VALUE.REFERENCE'", ")", ")", "attrl", "=", "attrs", "(", "tup_tree", ")", "if", "'EmbeddedObject'", "in", "attrl", "or", "'EMBEDDEDOBJECT'", "in", "attrl", ":", "child", "=", "self", ".", "parse_embeddedObject", "(", "child", ")", "return", "name", "(", "tup_tree", ")", ",", "attrl", ",", "child" ]
Parse the RETURNVALUE element. Returns name, attributes, and one child as a tuple. :: <!ELEMENT RETURNVALUE (VALUE | VALUE.REFERENCE)?> <!ATTLIST RETURNVALUE %EmbeddedObject; %ParamType; #IMPLIED>
[ "Parse", "the", "RETURNVALUE", "element", ".", "Returns", "name", "attributes", "and", "one", "child", "as", "a", "tuple", "." ]
python
train
pyBookshelf/bookshelf
bookshelf/api_v1.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v1.py#L81-L91
def add_firewalld_service(service, permanent=True): """ adds a firewall rule """ yum_install(packages=['firewalld']) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): p = '' if permanent: p = '--permanent' sudo('firewall-cmd --add-service %s %s' % (service, p)) sudo('systemctl reload firewalld')
[ "def", "add_firewalld_service", "(", "service", ",", "permanent", "=", "True", ")", ":", "yum_install", "(", "packages", "=", "[", "'firewalld'", "]", ")", "with", "settings", "(", "hide", "(", "'warnings'", ",", "'running'", ",", "'stdout'", ",", "'stderr'", ")", ",", "warn_only", "=", "True", ",", "capture", "=", "True", ")", ":", "p", "=", "''", "if", "permanent", ":", "p", "=", "'--permanent'", "sudo", "(", "'firewall-cmd --add-service %s %s'", "%", "(", "service", ",", "p", ")", ")", "sudo", "(", "'systemctl reload firewalld'", ")" ]
adds a firewall rule
[ "adds", "a", "firewall", "rule" ]
python
train
baruwa-enterprise/BaruwaAPI
BaruwaAPI/resource.py
https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L464-L469
def delete_fallbackserver(self, serverid, data): """Delete Fallback server""" return self.api_call( ENDPOINTS['fallbackservers']['delete'], dict(serverid=serverid), body=data)
[ "def", "delete_fallbackserver", "(", "self", ",", "serverid", ",", "data", ")", ":", "return", "self", ".", "api_call", "(", "ENDPOINTS", "[", "'fallbackservers'", "]", "[", "'delete'", "]", ",", "dict", "(", "serverid", "=", "serverid", ")", ",", "body", "=", "data", ")" ]
Delete Fallback server
[ "Delete", "Fallback", "server" ]
python
train
Sean1708/HipPy
hippy/compiler.py
https://github.com/Sean1708/HipPy/blob/d0ea8fb1e417f1fedaa8e215e3d420b90c4de691/hippy/compiler.py#L82-L103
def _compile_key_val(self, data, indent_level): """Compile a dictionary.""" buffer = '' for (key, val) in data.items(): buffer += self._indent * indent_level # TODO: assumes key is a string buffer += key + ':' if isinstance(val, dict): buffer += '\n' buffer += self._compile_key_val(val, indent_level+1) elif ( isinstance(val, list) and any(isinstance(i, (dict, list)) for i in val) ): buffer += self._compile_list(val, indent_level+1) else: buffer += ' ' buffer += self._compile_value(val, indent_level) buffer += '\n' return buffer
[ "def", "_compile_key_val", "(", "self", ",", "data", ",", "indent_level", ")", ":", "buffer", "=", "''", "for", "(", "key", ",", "val", ")", "in", "data", ".", "items", "(", ")", ":", "buffer", "+=", "self", ".", "_indent", "*", "indent_level", "# TODO: assumes key is a string", "buffer", "+=", "key", "+", "':'", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "buffer", "+=", "'\\n'", "buffer", "+=", "self", ".", "_compile_key_val", "(", "val", ",", "indent_level", "+", "1", ")", "elif", "(", "isinstance", "(", "val", ",", "list", ")", "and", "any", "(", "isinstance", "(", "i", ",", "(", "dict", ",", "list", ")", ")", "for", "i", "in", "val", ")", ")", ":", "buffer", "+=", "self", ".", "_compile_list", "(", "val", ",", "indent_level", "+", "1", ")", "else", ":", "buffer", "+=", "' '", "buffer", "+=", "self", ".", "_compile_value", "(", "val", ",", "indent_level", ")", "buffer", "+=", "'\\n'", "return", "buffer" ]
Compile a dictionary.
[ "Compile", "a", "dictionary", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L629-L645
def get_ip_interface_output_interface_vrf(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface output = ET.SubElement(get_ip_interface, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') vrf = ET.SubElement(interface, "vrf") vrf.text = kwargs.pop('vrf') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_ip_interface_output_interface_vrf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_ip_interface", "=", "ET", ".", "Element", "(", "\"get_ip_interface\"", ")", "config", "=", "get_ip_interface", "output", "=", "ET", ".", "SubElement", "(", "get_ip_interface", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-name\"", ")", "interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "vrf", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"vrf\"", ")", "vrf", ".", "text", "=", "kwargs", ".", "pop", "(", "'vrf'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
portfoliome/foil
foil/iteration.py
https://github.com/portfoliome/foil/blob/b66d8cf4ab048a387d8c7a033b47e922ed6917d6/foil/iteration.py#L5-L12
def chunks(items, chunksize): """Turn generator sequence into sequence of chunks.""" items = iter(items) for first in items: chunk = chain((first,), islice(items, chunksize - 1)) yield chunk deque(chunk, 0)
[ "def", "chunks", "(", "items", ",", "chunksize", ")", ":", "items", "=", "iter", "(", "items", ")", "for", "first", "in", "items", ":", "chunk", "=", "chain", "(", "(", "first", ",", ")", ",", "islice", "(", "items", ",", "chunksize", "-", "1", ")", ")", "yield", "chunk", "deque", "(", "chunk", ",", "0", ")" ]
Turn generator sequence into sequence of chunks.
[ "Turn", "generator", "sequence", "into", "sequence", "of", "chunks", "." ]
python
train
quantmind/dynts
dynts/dsl/__init__.py
https://github.com/quantmind/dynts/blob/21ac57c648bfec402fa6b1fe569496cf098fb5e8/dynts/dsl/__init__.py#L18-L43
def parse(timeseries_expression, method=None, functions=None, debug=False): '''Function for parsing :ref:`timeseries expressions <dsl-script>`. If succesful, it returns an instance of :class:`dynts.dsl.Expr` which can be used to to populate timeseries or scatters once data is available. Parsing is implemented using the ply_ module, an implementation of lex and yacc parsing tools for Python. :parameter expression: A :ref:`timeseries expressions <dsl-script>` string. :parameter method: Not yet used. :parameter functions: dictionary of functions to use when parsing. If not provided the :data:`dynts.function_registry` will be used. Default ``None``. :parameter debug: debug flag for ply_. Default ``False``. For examples and usage check the :ref:`dsl documentation <dsl>`. .. _ply: http://www.dabeaz.com/ply/ ''' if not parsefunc: raise ExpressionError('Could not parse. No parser installed.') functions = functions if functions is not None else function_registry expr_str = str(timeseries_expression).lower() return parsefunc(expr_str, functions, method, debug)
[ "def", "parse", "(", "timeseries_expression", ",", "method", "=", "None", ",", "functions", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "not", "parsefunc", ":", "raise", "ExpressionError", "(", "'Could not parse. No parser installed.'", ")", "functions", "=", "functions", "if", "functions", "is", "not", "None", "else", "function_registry", "expr_str", "=", "str", "(", "timeseries_expression", ")", ".", "lower", "(", ")", "return", "parsefunc", "(", "expr_str", ",", "functions", ",", "method", ",", "debug", ")" ]
Function for parsing :ref:`timeseries expressions <dsl-script>`. If succesful, it returns an instance of :class:`dynts.dsl.Expr` which can be used to to populate timeseries or scatters once data is available. Parsing is implemented using the ply_ module, an implementation of lex and yacc parsing tools for Python. :parameter expression: A :ref:`timeseries expressions <dsl-script>` string. :parameter method: Not yet used. :parameter functions: dictionary of functions to use when parsing. If not provided the :data:`dynts.function_registry` will be used. Default ``None``. :parameter debug: debug flag for ply_. Default ``False``. For examples and usage check the :ref:`dsl documentation <dsl>`. .. _ply: http://www.dabeaz.com/ply/
[ "Function", "for", "parsing", ":", "ref", ":", "timeseries", "expressions", "<dsl", "-", "script", ">", ".", "If", "succesful", "it", "returns", "an", "instance", "of", ":", "class", ":", "dynts", ".", "dsl", ".", "Expr", "which", "can", "be", "used", "to", "to", "populate", "timeseries", "or", "scatters", "once", "data", "is", "available", ".", "Parsing", "is", "implemented", "using", "the", "ply_", "module", "an", "implementation", "of", "lex", "and", "yacc", "parsing", "tools", "for", "Python", ".", ":", "parameter", "expression", ":", "A", ":", "ref", ":", "timeseries", "expressions", "<dsl", "-", "script", ">", "string", ".", ":", "parameter", "method", ":", "Not", "yet", "used", ".", ":", "parameter", "functions", ":", "dictionary", "of", "functions", "to", "use", "when", "parsing", ".", "If", "not", "provided", "the", ":", "data", ":", "dynts", ".", "function_registry", "will", "be", "used", ".", "Default", "None", ".", ":", "parameter", "debug", ":", "debug", "flag", "for", "ply_", ".", "Default", "False", ".", "For", "examples", "and", "usage", "check", "the", ":", "ref", ":", "dsl", "documentation", "<dsl", ">", ".", "..", "_ply", ":", "http", ":", "//", "www", ".", "dabeaz", ".", "com", "/", "ply", "/" ]
python
train
Capitains/MyCapytain
MyCapytain/resolvers/dts/api_v1.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/dts/api_v1.py#L147-L172
def getTextualNode( self, textId: str, subreference: Union[str, BaseReference]=None, prevnext: bool=False, metadata: bool=False ) -> DtsResolverDocument: """ Retrieve a text node from the API :param textId: CtsTextMetadata Identifier :type textId: str :param subreference: CapitainsCtsPassage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: CapitainsCtsPassage :rtype: CapitainsCtsPassage """ return DtsResolverDocument.parse( identifier=textId, reference=subreference, resolver=self, response=self.endpoint.get_document(collection_id=textId, ref=subreference) )
[ "def", "getTextualNode", "(", "self", ",", "textId", ":", "str", ",", "subreference", ":", "Union", "[", "str", ",", "BaseReference", "]", "=", "None", ",", "prevnext", ":", "bool", "=", "False", ",", "metadata", ":", "bool", "=", "False", ")", "->", "DtsResolverDocument", ":", "return", "DtsResolverDocument", ".", "parse", "(", "identifier", "=", "textId", ",", "reference", "=", "subreference", ",", "resolver", "=", "self", ",", "response", "=", "self", ".", "endpoint", ".", "get_document", "(", "collection_id", "=", "textId", ",", "ref", "=", "subreference", ")", ")" ]
Retrieve a text node from the API :param textId: CtsTextMetadata Identifier :type textId: str :param subreference: CapitainsCtsPassage Reference :type subreference: str :param prevnext: Retrieve graph representing previous and next passage :type prevnext: boolean :param metadata: Retrieve metadata about the passage and the text :type metadata: boolean :return: CapitainsCtsPassage :rtype: CapitainsCtsPassage
[ "Retrieve", "a", "text", "node", "from", "the", "API" ]
python
train
googleapis/google-cloud-python
datalabeling/google/cloud/datalabeling_v1beta1/gapic/data_labeling_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datalabeling/google/cloud/datalabeling_v1beta1/gapic/data_labeling_service_client.py#L109-L116
def annotated_dataset_path(cls, project, dataset, annotated_dataset): """Return a fully-qualified annotated_dataset string.""" return google.api_core.path_template.expand( "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}", project=project, dataset=dataset, annotated_dataset=annotated_dataset, )
[ "def", "annotated_dataset_path", "(", "cls", ",", "project", ",", "dataset", ",", "annotated_dataset", ")", ":", "return", "google", ".", "api_core", ".", "path_template", ".", "expand", "(", "\"projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}\"", ",", "project", "=", "project", ",", "dataset", "=", "dataset", ",", "annotated_dataset", "=", "annotated_dataset", ",", ")" ]
Return a fully-qualified annotated_dataset string.
[ "Return", "a", "fully", "-", "qualified", "annotated_dataset", "string", "." ]
python
train
MichaelAquilina/S4
s4/clients/local.py
https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/local.py#L77-L86
def unlock(self): """ Unlock the active advisory lock. """ logger.debug("Releasing lock %s", self.lock_file) self._lock.release() try: os.unlink(self.lock_file) except FileNotFoundError: pass
[ "def", "unlock", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Releasing lock %s\"", ",", "self", ".", "lock_file", ")", "self", ".", "_lock", ".", "release", "(", ")", "try", ":", "os", ".", "unlink", "(", "self", ".", "lock_file", ")", "except", "FileNotFoundError", ":", "pass" ]
Unlock the active advisory lock.
[ "Unlock", "the", "active", "advisory", "lock", "." ]
python
train
GoogleCloudPlatform/python-repo-tools
gcp_devrel/tools/pylint.py
https://github.com/GoogleCloudPlatform/python-repo-tools/blob/87422ba91814529848a2b8bf8be4294283a3e041/gcp_devrel/tools/pylint.py#L148-L167
def determine_final_config(config_module): """Determines the final additions and replacements. Combines the config module with the defaults. Args: config_module: The loaded local configuration module. Returns: Config: the final configuration. """ config = Config( DEFAULT_LIBRARY_RC_ADDITIONS, DEFAULT_LIBRARY_RC_REPLACEMENTS, DEFAULT_TEST_RC_ADDITIONS, DEFAULT_TEST_RC_REPLACEMENTS) for field in config._fields: if hasattr(config_module, field): config = config._replace(**{field: getattr(config_module, field)}) return config
[ "def", "determine_final_config", "(", "config_module", ")", ":", "config", "=", "Config", "(", "DEFAULT_LIBRARY_RC_ADDITIONS", ",", "DEFAULT_LIBRARY_RC_REPLACEMENTS", ",", "DEFAULT_TEST_RC_ADDITIONS", ",", "DEFAULT_TEST_RC_REPLACEMENTS", ")", "for", "field", "in", "config", ".", "_fields", ":", "if", "hasattr", "(", "config_module", ",", "field", ")", ":", "config", "=", "config", ".", "_replace", "(", "*", "*", "{", "field", ":", "getattr", "(", "config_module", ",", "field", ")", "}", ")", "return", "config" ]
Determines the final additions and replacements. Combines the config module with the defaults. Args: config_module: The loaded local configuration module. Returns: Config: the final configuration.
[ "Determines", "the", "final", "additions", "and", "replacements", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L25291-L25320
def get_screen_resolution(self, screen_id): """Queries certain attributes such as display width, height, color depth and the X and Y origin for a given guest screen. The parameters @a xOrigin and @a yOrigin return the X and Y coordinates of the framebuffer's origin. All return parameters are optional. in screen_id of type int out width of type int out height of type int out bits_per_pixel of type int out x_origin of type int out y_origin of type int out guest_monitor_status of type :class:`GuestMonitorStatus` """ if not isinstance(screen_id, baseinteger): raise TypeError("screen_id can only be an instance of type baseinteger") (width, height, bits_per_pixel, x_origin, y_origin, guest_monitor_status) = self._call("getScreenResolution", in_p=[screen_id]) guest_monitor_status = GuestMonitorStatus(guest_monitor_status) return (width, height, bits_per_pixel, x_origin, y_origin, guest_monitor_status)
[ "def", "get_screen_resolution", "(", "self", ",", "screen_id", ")", ":", "if", "not", "isinstance", "(", "screen_id", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"screen_id can only be an instance of type baseinteger\"", ")", "(", "width", ",", "height", ",", "bits_per_pixel", ",", "x_origin", ",", "y_origin", ",", "guest_monitor_status", ")", "=", "self", ".", "_call", "(", "\"getScreenResolution\"", ",", "in_p", "=", "[", "screen_id", "]", ")", "guest_monitor_status", "=", "GuestMonitorStatus", "(", "guest_monitor_status", ")", "return", "(", "width", ",", "height", ",", "bits_per_pixel", ",", "x_origin", ",", "y_origin", ",", "guest_monitor_status", ")" ]
Queries certain attributes such as display width, height, color depth and the X and Y origin for a given guest screen. The parameters @a xOrigin and @a yOrigin return the X and Y coordinates of the framebuffer's origin. All return parameters are optional. in screen_id of type int out width of type int out height of type int out bits_per_pixel of type int out x_origin of type int out y_origin of type int out guest_monitor_status of type :class:`GuestMonitorStatus`
[ "Queries", "certain", "attributes", "such", "as", "display", "width", "height", "color", "depth", "and", "the", "X", "and", "Y", "origin", "for", "a", "given", "guest", "screen", ".", "The", "parameters", "@a", "xOrigin", "and", "@a", "yOrigin", "return", "the", "X", "and", "Y", "coordinates", "of", "the", "framebuffer", "s", "origin", ".", "All", "return", "parameters", "are", "optional", "." ]
python
train
wakatime/wakatime
wakatime/packages/configparser/__init__.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L1235-L1239
def set(self, section, option, value=None): """Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.""" _, option, value = self._validate_value_types(option=option, value=value) super(ConfigParser, self).set(section, option, value)
[ "def", "set", "(", "self", ",", "section", ",", "option", ",", "value", "=", "None", ")", ":", "_", ",", "option", ",", "value", "=", "self", ".", "_validate_value_types", "(", "option", "=", "option", ",", "value", "=", "value", ")", "super", "(", "ConfigParser", ",", "self", ")", ".", "set", "(", "section", ",", "option", ",", "value", ")" ]
Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.
[ "Set", "an", "option", ".", "Extends", "RawConfigParser", ".", "set", "by", "validating", "type", "and", "interpolation", "syntax", "on", "the", "value", "." ]
python
train
xtuml/pyxtuml
bridgepoint/prebuild.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/bridgepoint/prebuild.py#L46-L61
def get_defining_component(pe_pe): ''' get the C_C in which pe_pe is defined ''' if pe_pe is None: return None if pe_pe.__class__.__name__ != 'PE_PE': pe_pe = xtuml.navigate_one(pe_pe).PE_PE[8001]() ep_pkg = xtuml.navigate_one(pe_pe).EP_PKG[8000]() if ep_pkg: return get_defining_component(ep_pkg) return xtuml.navigate_one(pe_pe).C_C[8003]()
[ "def", "get_defining_component", "(", "pe_pe", ")", ":", "if", "pe_pe", "is", "None", ":", "return", "None", "if", "pe_pe", ".", "__class__", ".", "__name__", "!=", "'PE_PE'", ":", "pe_pe", "=", "xtuml", ".", "navigate_one", "(", "pe_pe", ")", ".", "PE_PE", "[", "8001", "]", "(", ")", "ep_pkg", "=", "xtuml", ".", "navigate_one", "(", "pe_pe", ")", ".", "EP_PKG", "[", "8000", "]", "(", ")", "if", "ep_pkg", ":", "return", "get_defining_component", "(", "ep_pkg", ")", "return", "xtuml", ".", "navigate_one", "(", "pe_pe", ")", ".", "C_C", "[", "8003", "]", "(", ")" ]
get the C_C in which pe_pe is defined
[ "get", "the", "C_C", "in", "which", "pe_pe", "is", "defined" ]
python
test
kiliankoe/dvbpy
dvb/dvb.py
https://github.com/kiliankoe/dvbpy/blob/d499706ae56386d680f78975d3512d56f848e9dc/dvb/dvb.py#L14-L52
def monitor(stop, offset=0, limit=10, city='Dresden', *, raw=False): """ VVO Online Monitor (GET http://widgets.vvo-online.de/abfahrtsmonitor/Abfahrten.do) :param stop: Name of Stop :param offset: Minimum time of arrival :param limit: Count of returned results :param city: Name of City :param raw: Return raw response :return: Dict of stops """ try: r = requests.get( url='http://widgets.vvo-online.de/abfahrtsmonitor/Abfahrten.do', params={ 'ort': city, 'hst': stop, 'vz': offset, 'lim': limit, }, ) if r.status_code == 200: response = json.loads(r.content.decode('utf-8')) else: raise requests.HTTPError('HTTP Status: {}'.format(r.status_code)) except requests.RequestException as e: print('Failed to access VVO monitor. Request Exception', e) response = None if response is None: return None return response if raw else [ { 'line': line, 'direction': direction, 'arrival': 0 if arrival == '' else int(arrival) } for line, direction, arrival in response ]
[ "def", "monitor", "(", "stop", ",", "offset", "=", "0", ",", "limit", "=", "10", ",", "city", "=", "'Dresden'", ",", "*", ",", "raw", "=", "False", ")", ":", "try", ":", "r", "=", "requests", ".", "get", "(", "url", "=", "'http://widgets.vvo-online.de/abfahrtsmonitor/Abfahrten.do'", ",", "params", "=", "{", "'ort'", ":", "city", ",", "'hst'", ":", "stop", ",", "'vz'", ":", "offset", ",", "'lim'", ":", "limit", ",", "}", ",", ")", "if", "r", ".", "status_code", "==", "200", ":", "response", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "else", ":", "raise", "requests", ".", "HTTPError", "(", "'HTTP Status: {}'", ".", "format", "(", "r", ".", "status_code", ")", ")", "except", "requests", ".", "RequestException", "as", "e", ":", "print", "(", "'Failed to access VVO monitor. Request Exception'", ",", "e", ")", "response", "=", "None", "if", "response", "is", "None", ":", "return", "None", "return", "response", "if", "raw", "else", "[", "{", "'line'", ":", "line", ",", "'direction'", ":", "direction", ",", "'arrival'", ":", "0", "if", "arrival", "==", "''", "else", "int", "(", "arrival", ")", "}", "for", "line", ",", "direction", ",", "arrival", "in", "response", "]" ]
VVO Online Monitor (GET http://widgets.vvo-online.de/abfahrtsmonitor/Abfahrten.do) :param stop: Name of Stop :param offset: Minimum time of arrival :param limit: Count of returned results :param city: Name of City :param raw: Return raw response :return: Dict of stops
[ "VVO", "Online", "Monitor", "(", "GET", "http", ":", "//", "widgets", ".", "vvo", "-", "online", ".", "de", "/", "abfahrtsmonitor", "/", "Abfahrten", ".", "do", ")" ]
python
train
Yelp/py_zipkin
py_zipkin/thrift/__init__.py
https://github.com/Yelp/py_zipkin/blob/0944d9a3fb1f1798dbb276694aeed99f2b4283ba/py_zipkin/thrift/__init__.py#L24-L34
def create_annotation(timestamp, value, host): """ Create a zipkin annotation object :param timestamp: timestamp of when the annotation occured in microseconds :param value: name of the annotation, such as 'sr' :param host: zipkin endpoint object :returns: zipkin annotation object """ return zipkin_core.Annotation(timestamp=timestamp, value=value, host=host)
[ "def", "create_annotation", "(", "timestamp", ",", "value", ",", "host", ")", ":", "return", "zipkin_core", ".", "Annotation", "(", "timestamp", "=", "timestamp", ",", "value", "=", "value", ",", "host", "=", "host", ")" ]
Create a zipkin annotation object :param timestamp: timestamp of when the annotation occured in microseconds :param value: name of the annotation, such as 'sr' :param host: zipkin endpoint object :returns: zipkin annotation object
[ "Create", "a", "zipkin", "annotation", "object" ]
python
test
fabioz/PyDev.Debugger
third_party/isort_container/isort/natural.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/isort_container/isort/natural.py#L39-L47
def nsorted(to_sort, key=None): """Returns a naturally sorted list""" if key is None: key_callback = _natural_keys else: def key_callback(item): return _natural_keys(key(item)) return sorted(to_sort, key=key_callback)
[ "def", "nsorted", "(", "to_sort", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "key_callback", "=", "_natural_keys", "else", ":", "def", "key_callback", "(", "item", ")", ":", "return", "_natural_keys", "(", "key", "(", "item", ")", ")", "return", "sorted", "(", "to_sort", ",", "key", "=", "key_callback", ")" ]
Returns a naturally sorted list
[ "Returns", "a", "naturally", "sorted", "list" ]
python
train
shalabhms/reliable-collections-cli
rcctl/rcctl/config.py
https://github.com/shalabhms/reliable-collections-cli/blob/195d69816fb5a6e1e9ab0ab66b606b1248b4780d/rcctl/rcctl/config.py#L27-L31
def get_config_bool(name): """Checks if a config value is set to a valid bool value.""" cli_config = CLIConfig(SF_CLI_CONFIG_DIR, SF_CLI_ENV_VAR_PREFIX) return cli_config.getboolean('servicefabric', name, False)
[ "def", "get_config_bool", "(", "name", ")", ":", "cli_config", "=", "CLIConfig", "(", "SF_CLI_CONFIG_DIR", ",", "SF_CLI_ENV_VAR_PREFIX", ")", "return", "cli_config", ".", "getboolean", "(", "'servicefabric'", ",", "name", ",", "False", ")" ]
Checks if a config value is set to a valid bool value.
[ "Checks", "if", "a", "config", "value", "is", "set", "to", "a", "valid", "bool", "value", "." ]
python
valid
sdispater/orator
orator/dbal/index.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/dbal/index.py#L65-L84
def get_quoted_columns(self, platform): """ Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list """ columns = [] for column in self._columns.values(): columns.append(column.get_quoted_name(platform)) return columns
[ "def", "get_quoted_columns", "(", "self", ",", "platform", ")", ":", "columns", "=", "[", "]", "for", "column", "in", "self", ".", "_columns", ".", "values", "(", ")", ":", "columns", ".", "append", "(", "column", ".", "get_quoted_name", "(", "platform", ")", ")", "return", "columns" ]
Returns the quoted representation of the column names the constraint is associated with. But only if they were defined with one or a column name is a keyword reserved by the platform. Otherwise the plain unquoted value as inserted is returned. :param platform: The platform to use for quotation. :type platform: Platform :rtype: list
[ "Returns", "the", "quoted", "representation", "of", "the", "column", "names", "the", "constraint", "is", "associated", "with", "." ]
python
train
OpenAgInitiative/openag_python
openag/couch.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/couch.py#L54-L79
def create_user(self, username, password): """ Creates a user in the CouchDB instance with the username `username` and password `password` """ user_id = "org.couchdb.user:" + username res = self["_users"].resource.put( user_id, body=json.dumps({ "_id": user_id, "name": username, "roles": [], "type": "user", "password": password, "farms": [] }) ) if res[0] == 409: raise RuntimeError( 'The username "{}" is already taken'.format(username) ) elif res[0] != 201: raise RuntimeError( "Failed to create user ({}): {}".format( res.status_code, res.content ) )
[ "def", "create_user", "(", "self", ",", "username", ",", "password", ")", ":", "user_id", "=", "\"org.couchdb.user:\"", "+", "username", "res", "=", "self", "[", "\"_users\"", "]", ".", "resource", ".", "put", "(", "user_id", ",", "body", "=", "json", ".", "dumps", "(", "{", "\"_id\"", ":", "user_id", ",", "\"name\"", ":", "username", ",", "\"roles\"", ":", "[", "]", ",", "\"type\"", ":", "\"user\"", ",", "\"password\"", ":", "password", ",", "\"farms\"", ":", "[", "]", "}", ")", ")", "if", "res", "[", "0", "]", "==", "409", ":", "raise", "RuntimeError", "(", "'The username \"{}\" is already taken'", ".", "format", "(", "username", ")", ")", "elif", "res", "[", "0", "]", "!=", "201", ":", "raise", "RuntimeError", "(", "\"Failed to create user ({}): {}\"", ".", "format", "(", "res", ".", "status_code", ",", "res", ".", "content", ")", ")" ]
Creates a user in the CouchDB instance with the username `username` and password `password`
[ "Creates", "a", "user", "in", "the", "CouchDB", "instance", "with", "the", "username", "username", "and", "password", "password" ]
python
train
iterative/dvc
dvc/config.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/config.py#L567-L579
def set(config, section, opt, value): """Sets specified option in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): option name. value: value to set option to. """ if section not in config.keys(): config[section] = {} config[section][opt] = value
[ "def", "set", "(", "config", ",", "section", ",", "opt", ",", "value", ")", ":", "if", "section", "not", "in", "config", ".", "keys", "(", ")", ":", "config", "[", "section", "]", "=", "{", "}", "config", "[", "section", "]", "[", "opt", "]", "=", "value" ]
Sets specified option in the config. Args: config (configobj.ConfigObj): config to work on. section (str): section name. opt (str): option name. value: value to set option to.
[ "Sets", "specified", "option", "in", "the", "config", "." ]
python
train
OLC-Bioinformatics/sipprverse
pointfinder/PointFinder.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/pointfinder/PointFinder.py#L643-L696
def name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, start_offset): """ This function serves to name the individual mutations dependently on the type of the mutation. """ # Get the subject and query sequences without gaps sbjct_nucs = sbjct_rf_indel.replace("-", "") qry_nucs = qry_rf_indel.replace("-", "") # Translate nucleotides to amino acids aa_ref = "" aa_alt = "" for i in range(0, len(sbjct_nucs), 3): aa_ref += aa(sbjct_nucs[i:i+3]) for i in range(0, len(qry_nucs), 3): aa_alt += aa(qry_nucs[i:i+3]) # Identify the gapped sequence if mut == "ins": gapped_seq = sbjct_rf_indel else: gapped_seq = qry_rf_indel gap_size = gapped_seq.count("-") # Write mutation names if gap_size < 3 and len(sbjct_nucs) ==3 and len(qry_nucs) == 3: # Write mutation name for substitution mutation mut_name = "p.%s%d%s"%(aa(sbjct_nucs), codon_no, aa(qry_nucs)) elif len(gapped_seq) == gap_size: if mut == "ins": # Write mutation name for insertion mutation mut_name = name_insertion(sbjct_seq, codon_no, sbjct_nucs, aa_alt, start_offset) aa_ref = mut else: # Write mutation name for deletion mutation mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "del") aa_alt = mut # Check for delins - mix of insertion and deletion else: # Write mutation name for a mixed insertion and deletion mutation mut_name = name_deletion(sbjct_seq, sbjct_rf_indel, sbjct_nucs, codon_no, aa_alt, start_offset, mutation = "delins") # Check for frameshift if gapped_seq.count("-")%3 != 0: # Add the frameshift tag to mutation name mut_name += " - Frameshift" return mut_name, aa_ref, aa_alt
[ "def", "name_indel_mutation", "(", "sbjct_seq", ",", "indel", ",", "sbjct_rf_indel", ",", "qry_rf_indel", ",", "codon_no", ",", "mut", ",", "start_offset", ")", ":", "# Get the subject and query sequences without gaps", "sbjct_nucs", "=", "sbjct_rf_indel", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "qry_nucs", "=", "qry_rf_indel", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "# Translate nucleotides to amino acids", "aa_ref", "=", "\"\"", "aa_alt", "=", "\"\"", "for", "i", "in", "range", "(", "0", ",", "len", "(", "sbjct_nucs", ")", ",", "3", ")", ":", "aa_ref", "+=", "aa", "(", "sbjct_nucs", "[", "i", ":", "i", "+", "3", "]", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "qry_nucs", ")", ",", "3", ")", ":", "aa_alt", "+=", "aa", "(", "qry_nucs", "[", "i", ":", "i", "+", "3", "]", ")", "# Identify the gapped sequence ", "if", "mut", "==", "\"ins\"", ":", "gapped_seq", "=", "sbjct_rf_indel", "else", ":", "gapped_seq", "=", "qry_rf_indel", "gap_size", "=", "gapped_seq", ".", "count", "(", "\"-\"", ")", "# Write mutation names", "if", "gap_size", "<", "3", "and", "len", "(", "sbjct_nucs", ")", "==", "3", "and", "len", "(", "qry_nucs", ")", "==", "3", ":", "# Write mutation name for substitution mutation", "mut_name", "=", "\"p.%s%d%s\"", "%", "(", "aa", "(", "sbjct_nucs", ")", ",", "codon_no", ",", "aa", "(", "qry_nucs", ")", ")", "elif", "len", "(", "gapped_seq", ")", "==", "gap_size", ":", "if", "mut", "==", "\"ins\"", ":", "# Write mutation name for insertion mutation", "mut_name", "=", "name_insertion", "(", "sbjct_seq", ",", "codon_no", ",", "sbjct_nucs", ",", "aa_alt", ",", "start_offset", ")", "aa_ref", "=", "mut", "else", ":", "# Write mutation name for deletion mutation", "mut_name", "=", "name_deletion", "(", "sbjct_seq", ",", "sbjct_rf_indel", ",", "sbjct_nucs", ",", "codon_no", ",", "aa_alt", ",", "start_offset", ",", "mutation", "=", "\"del\"", ")", "aa_alt", "=", "mut", "# Check for delins - mix of insertion and deletion", "else", ":", "# Write mutation name for a mixed insertion and deletion mutation", "mut_name", "=", "name_deletion", "(", "sbjct_seq", ",", "sbjct_rf_indel", ",", "sbjct_nucs", ",", "codon_no", ",", "aa_alt", ",", "start_offset", ",", "mutation", "=", "\"delins\"", ")", "# Check for frameshift", "if", "gapped_seq", ".", "count", "(", "\"-\"", ")", "%", "3", "!=", "0", ":", "# Add the frameshift tag to mutation name", "mut_name", "+=", "\" - Frameshift\"", "return", "mut_name", ",", "aa_ref", ",", "aa_alt" ]
This function serves to name the individual mutations dependently on the type of the mutation.
[ "This", "function", "serves", "to", "name", "the", "individual", "mutations", "dependently", "on", "the", "type", "of", "the", "mutation", "." ]
python
train
Alignak-monitoring/alignak
alignak/objects/hostgroup.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/hostgroup.py#L380-L405
def explode(self): """ Fill members with hostgroup_members :return: None """ # We do not want a same hostgroup to be exploded again and again # so we tag it for tmp_hg in list(self.items.values()): tmp_hg.already_exploded = False for hostgroup in list(self.items.values()): if hostgroup.already_exploded: continue # get_hosts_by_explosion is a recursive # function, so we must tag hg so we do not loop for tmp_hg in list(self.items.values()): tmp_hg.rec_tag = False hostgroup.get_hosts_by_explosion(self) # We clean the tags for tmp_hg in list(self.items.values()): if hasattr(tmp_hg, 'rec_tag'): del tmp_hg.rec_tag del tmp_hg.already_exploded
[ "def", "explode", "(", "self", ")", ":", "# We do not want a same hostgroup to be exploded again and again", "# so we tag it", "for", "tmp_hg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "tmp_hg", ".", "already_exploded", "=", "False", "for", "hostgroup", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "if", "hostgroup", ".", "already_exploded", ":", "continue", "# get_hosts_by_explosion is a recursive", "# function, so we must tag hg so we do not loop", "for", "tmp_hg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "tmp_hg", ".", "rec_tag", "=", "False", "hostgroup", ".", "get_hosts_by_explosion", "(", "self", ")", "# We clean the tags", "for", "tmp_hg", "in", "list", "(", "self", ".", "items", ".", "values", "(", ")", ")", ":", "if", "hasattr", "(", "tmp_hg", ",", "'rec_tag'", ")", ":", "del", "tmp_hg", ".", "rec_tag", "del", "tmp_hg", ".", "already_exploded" ]
Fill members with hostgroup_members :return: None
[ "Fill", "members", "with", "hostgroup_members" ]
python
train
topic2k/pygcgen
pygcgen/reader.py
https://github.com/topic2k/pygcgen/blob/c41701815df2c8c3a57fd5f7b8babe702127c8a1/pygcgen/reader.py#L36-L60
def parse_heading(heading): """ Parse a single heading and return a Hash The following heading structures are currently valid: - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) (2015-03-24) - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) - ## v1.0.2 (2015-03-24) - ## v1.0.2 @param [String] heading Heading from the ChangeLog File @return [Hash] Returns a structured Hash with version, url and date """ heading_structures = [ r"^## \[(?P<version>.+?)\]\((?P<url>.+?)\)( \((?P<date>.+?)\))?$", r"^## (?P<version>.+?)( \((?P<date>.+?)\))?$", ] captures = {"version": None, "url": None, "date": None} for regexp in heading_structures: matches = re.match(regexp, heading) if matches: captures.update(matches.groupdict()) break return captures
[ "def", "parse_heading", "(", "heading", ")", ":", "heading_structures", "=", "[", "r\"^## \\[(?P<version>.+?)\\]\\((?P<url>.+?)\\)( \\((?P<date>.+?)\\))?$\"", ",", "r\"^## (?P<version>.+?)( \\((?P<date>.+?)\\))?$\"", ",", "]", "captures", "=", "{", "\"version\"", ":", "None", ",", "\"url\"", ":", "None", ",", "\"date\"", ":", "None", "}", "for", "regexp", "in", "heading_structures", ":", "matches", "=", "re", ".", "match", "(", "regexp", ",", "heading", ")", "if", "matches", ":", "captures", ".", "update", "(", "matches", ".", "groupdict", "(", ")", ")", "break", "return", "captures" ]
Parse a single heading and return a Hash The following heading structures are currently valid: - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) (2015-03-24) - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) - ## v1.0.2 (2015-03-24) - ## v1.0.2 @param [String] heading Heading from the ChangeLog File @return [Hash] Returns a structured Hash with version, url and date
[ "Parse", "a", "single", "heading", "and", "return", "a", "Hash", "The", "following", "heading", "structures", "are", "currently", "valid", ":", "-", "##", "[", "v1", ".", "0", ".", "2", "]", "(", "https", ":", "//", "github", ".", "com", "/", "zanui", "/", "chef", "-", "thumbor", "/", "tree", "/", "v1", ".", "0", ".", "1", ")", "(", "2015", "-", "03", "-", "24", ")", "-", "##", "[", "v1", ".", "0", ".", "2", "]", "(", "https", ":", "//", "github", ".", "com", "/", "zanui", "/", "chef", "-", "thumbor", "/", "tree", "/", "v1", ".", "0", ".", "1", ")", "-", "##", "v1", ".", "0", ".", "2", "(", "2015", "-", "03", "-", "24", ")", "-", "##", "v1", ".", "0", ".", "2" ]
python
valid
pycontribs/pyrax
pyrax/utils.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/utils.py#L565-L576
def get_id(id_or_obj): """ Returns the 'id' attribute of 'id_or_obj' if present; if not, returns 'id_or_obj'. """ if isinstance(id_or_obj, six.string_types + (int,)): # It's an ID return id_or_obj try: return id_or_obj.id except AttributeError: return id_or_obj
[ "def", "get_id", "(", "id_or_obj", ")", ":", "if", "isinstance", "(", "id_or_obj", ",", "six", ".", "string_types", "+", "(", "int", ",", ")", ")", ":", "# It's an ID", "return", "id_or_obj", "try", ":", "return", "id_or_obj", ".", "id", "except", "AttributeError", ":", "return", "id_or_obj" ]
Returns the 'id' attribute of 'id_or_obj' if present; if not, returns 'id_or_obj'.
[ "Returns", "the", "id", "attribute", "of", "id_or_obj", "if", "present", ";", "if", "not", "returns", "id_or_obj", "." ]
python
train
viralogic/py-enumerable
py_linq/py_linq3.py
https://github.com/viralogic/py-enumerable/blob/63363649bccef223379e1e87056747240c83aa9d/py_linq/py_linq3.py#L447-L462
def union(self, enumerable, key=lambda x: x): """ Returns enumerable that is a union of elements between self and given enumerable :param enumerable: enumerable to union self to :param key: key selector used to determine uniqueness :return: new Enumerable object """ if not isinstance(enumerable, Enumerable3): raise TypeError( u"enumerable parameter must be an instance of Enumerable") if self.count() == 0: return enumerable if enumerable.count() == 0: return self return self.concat(enumerable).distinct(key)
[ "def", "union", "(", "self", ",", "enumerable", ",", "key", "=", "lambda", "x", ":", "x", ")", ":", "if", "not", "isinstance", "(", "enumerable", ",", "Enumerable3", ")", ":", "raise", "TypeError", "(", "u\"enumerable parameter must be an instance of Enumerable\"", ")", "if", "self", ".", "count", "(", ")", "==", "0", ":", "return", "enumerable", "if", "enumerable", ".", "count", "(", ")", "==", "0", ":", "return", "self", "return", "self", ".", "concat", "(", "enumerable", ")", ".", "distinct", "(", "key", ")" ]
Returns enumerable that is a union of elements between self and given enumerable :param enumerable: enumerable to union self to :param key: key selector used to determine uniqueness :return: new Enumerable object
[ "Returns", "enumerable", "that", "is", "a", "union", "of", "elements", "between", "self", "and", "given", "enumerable", ":", "param", "enumerable", ":", "enumerable", "to", "union", "self", "to", ":", "param", "key", ":", "key", "selector", "used", "to", "determine", "uniqueness", ":", "return", ":", "new", "Enumerable", "object" ]
python
train
hammerlab/cohorts
cohorts/cohort.py
https://github.com/hammerlab/cohorts/blob/278b05e609e6c4d4a77c57d49446460be53ea33e/cohorts/cohort.py#L790-L822
def load_effects(self, patients=None, only_nonsynonymous=False, all_effects=False, filter_fn=None, **kwargs): """Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection """ filter_fn = first_not_none_param([filter_fn, self.filter_fn], no_filter) filter_fn_name = self._get_function_name(filter_fn) logger.debug("loading effects with filter_fn {}".format(filter_fn_name)) patient_effects = {} for patient in self.iter_patients(patients): effects = self._load_single_patient_effects( patient, only_nonsynonymous, all_effects, filter_fn, **kwargs) if effects is not None: patient_effects[patient.id] = effects return patient_effects
[ "def", "load_effects", "(", "self", ",", "patients", "=", "None", ",", "only_nonsynonymous", "=", "False", ",", "all_effects", "=", "False", ",", "filter_fn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "filter_fn", "=", "first_not_none_param", "(", "[", "filter_fn", ",", "self", ".", "filter_fn", "]", ",", "no_filter", ")", "filter_fn_name", "=", "self", ".", "_get_function_name", "(", "filter_fn", ")", "logger", ".", "debug", "(", "\"loading effects with filter_fn {}\"", ".", "format", "(", "filter_fn_name", ")", ")", "patient_effects", "=", "{", "}", "for", "patient", "in", "self", ".", "iter_patients", "(", "patients", ")", ":", "effects", "=", "self", ".", "_load_single_patient_effects", "(", "patient", ",", "only_nonsynonymous", ",", "all_effects", ",", "filter_fn", ",", "*", "*", "kwargs", ")", "if", "effects", "is", "not", "None", ":", "patient_effects", "[", "patient", ".", "id", "]", "=", "effects", "return", "patient_effects" ]
Load a dictionary of patient_id to varcode.EffectCollection Note that this only loads one effect per variant. Parameters ---------- patients : str, optional Filter to a subset of patients only_nonsynonymous : bool, optional If true, load only nonsynonymous effects, default False all_effects : bool, optional If true, return all effects rather than only the top-priority effect per variant filter_fn : function Takes a FilterableEffect and returns a boolean. Only effects returning True are preserved. Overrides default self.filter_fn. `None` passes through to self.filter_fn. Returns ------- effects Dictionary of patient_id to varcode.EffectCollection
[ "Load", "a", "dictionary", "of", "patient_id", "to", "varcode", ".", "EffectCollection" ]
python
train
aparo/pyes
performance/utils.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/performance/utils.py#L10-L14
def get_names(): """ Return a list of names. """ return [n.strip() for n in codecs.open(os.path.join("data", "names.txt"),"rb",'utf8').readlines()]
[ "def", "get_names", "(", ")", ":", "return", "[", "n", ".", "strip", "(", ")", "for", "n", "in", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "\"names.txt\"", ")", ",", "\"rb\"", ",", "'utf8'", ")", ".", "readlines", "(", ")", "]" ]
Return a list of names.
[ "Return", "a", "list", "of", "names", "." ]
python
train
cocaine/cocaine-framework-python
cocaine/detail/secadaptor.py
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/secadaptor.py#L54-L65
def fetch_token(self): """Gains token from secure backend service. :return: Token formatted for Cocaine protocol header. """ grant_type = 'client_credentials' channel = yield self._tvm.ticket_full( self._client_id, self._client_secret, grant_type, {}) ticket = yield channel.rx.get() raise gen.Return(self._make_token(ticket))
[ "def", "fetch_token", "(", "self", ")", ":", "grant_type", "=", "'client_credentials'", "channel", "=", "yield", "self", ".", "_tvm", ".", "ticket_full", "(", "self", ".", "_client_id", ",", "self", ".", "_client_secret", ",", "grant_type", ",", "{", "}", ")", "ticket", "=", "yield", "channel", ".", "rx", ".", "get", "(", ")", "raise", "gen", ".", "Return", "(", "self", ".", "_make_token", "(", "ticket", ")", ")" ]
Gains token from secure backend service. :return: Token formatted for Cocaine protocol header.
[ "Gains", "token", "from", "secure", "backend", "service", "." ]
python
train
pycontribs/pyrax
pyrax/manager.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/manager.py#L249-L256
def action(self, item, action_type, body={}): """ Several API calls are lumped under the 'action' API. This is the generic handler for such calls. """ uri = "/%s/%s/action" % (self.uri_base, utils.get_id(item)) action_body = {action_type: body} return self.api.method_post(uri, body=action_body)
[ "def", "action", "(", "self", ",", "item", ",", "action_type", ",", "body", "=", "{", "}", ")", ":", "uri", "=", "\"/%s/%s/action\"", "%", "(", "self", ".", "uri_base", ",", "utils", ".", "get_id", "(", "item", ")", ")", "action_body", "=", "{", "action_type", ":", "body", "}", "return", "self", ".", "api", ".", "method_post", "(", "uri", ",", "body", "=", "action_body", ")" ]
Several API calls are lumped under the 'action' API. This is the generic handler for such calls.
[ "Several", "API", "calls", "are", "lumped", "under", "the", "action", "API", ".", "This", "is", "the", "generic", "handler", "for", "such", "calls", "." ]
python
train
pandas-dev/pandas
scripts/validate_docstrings.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L314-L328
def source_file_name(self): """ File name where the object is implemented (e.g. pandas/core/frame.py). """ try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname
[ "def", "source_file_name", "(", "self", ")", ":", "try", ":", "fname", "=", "inspect", ".", "getsourcefile", "(", "self", ".", "code_obj", ")", "except", "TypeError", ":", "# In some cases the object is something complex like a cython", "# object that can't be easily introspected. An it's better to", "# return the source code file of the object as None, than crash", "pass", "else", ":", "if", "fname", ":", "fname", "=", "os", ".", "path", ".", "relpath", "(", "fname", ",", "BASE_PATH", ")", "return", "fname" ]
File name where the object is implemented (e.g. pandas/core/frame.py).
[ "File", "name", "where", "the", "object", "is", "implemented", "(", "e", ".", "g", ".", "pandas", "/", "core", "/", "frame", ".", "py", ")", "." ]
python
train
ChrisCummins/labm8
system.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/system.py#L237-L284
def which(program, path=None): """ Returns the full path of shell commands. Replicates the functionality of system which (1) command. Looks for the named program in the directories indicated in the $PATH environment variable, and returns the full path if found. Examples: >>> system.which("ls") "/bin/ls" >>> system.which("/bin/ls") "/bin/ls" >>> system.which("not-a-real-command") None >>> system.which("ls", path=("/usr/bin", "/bin")) "/bin/ls" Arguments: program (str): The name of the program to look for. Can be an absolute path. path (sequence of str, optional): A list of directories to look for the pgoram in. Default value is system $PATH. Returns: str: Full path to program if found, else None. """ # If path is not given, read the $PATH environment variable. path = path or os.environ["PATH"].split(os.pathsep) abspath = True if os.path.split(program)[0] else False if abspath: if fs.isexe(program): return program else: for directory in path: # De-quote directories. directory = directory.strip('"') exe_file = os.path.join(directory, program) if fs.isexe(exe_file): return exe_file return None
[ "def", "which", "(", "program", ",", "path", "=", "None", ")", ":", "# If path is not given, read the $PATH environment variable.", "path", "=", "path", "or", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", "abspath", "=", "True", "if", "os", ".", "path", ".", "split", "(", "program", ")", "[", "0", "]", "else", "False", "if", "abspath", ":", "if", "fs", ".", "isexe", "(", "program", ")", ":", "return", "program", "else", ":", "for", "directory", "in", "path", ":", "# De-quote directories.", "directory", "=", "directory", ".", "strip", "(", "'\"'", ")", "exe_file", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "program", ")", "if", "fs", ".", "isexe", "(", "exe_file", ")", ":", "return", "exe_file", "return", "None" ]
Returns the full path of shell commands. Replicates the functionality of system which (1) command. Looks for the named program in the directories indicated in the $PATH environment variable, and returns the full path if found. Examples: >>> system.which("ls") "/bin/ls" >>> system.which("/bin/ls") "/bin/ls" >>> system.which("not-a-real-command") None >>> system.which("ls", path=("/usr/bin", "/bin")) "/bin/ls" Arguments: program (str): The name of the program to look for. Can be an absolute path. path (sequence of str, optional): A list of directories to look for the pgoram in. Default value is system $PATH. Returns: str: Full path to program if found, else None.
[ "Returns", "the", "full", "path", "of", "shell", "commands", "." ]
python
train
staticdev/django-sorting-bootstrap
sorting_bootstrap/util.py
https://github.com/staticdev/django-sorting-bootstrap/blob/cfdc6e671b1b57aad04e44b041b9df10ee8288d3/sorting_bootstrap/util.py#L17-L64
def label_for_field(name, model, return_attr=False): """ Returns a sensible label for a field name. The name can be a callable, property (but not created with @property decorator) or the name of an object's attribute, as well as a genuine fields. If return_attr is True, the resolved attribute (which could be a callable) is also returned. This will be None if (and only if) the name refers to a field. """ attr = None try: field = model._meta.get_field_by_name(name)[0] if isinstance(field, RelatedObject): label = field.opts.verbose_name else: label = field.verbose_name except models.FieldDoesNotExist: if name == "__unicode__": label = force_text(model._meta.verbose_name) attr = six.text_type elif name == "__str__": label = force_str(model._meta.verbose_name) attr = bytes else: if callable(name): attr = name elif hasattr(model, name): attr = getattr(model, name) else: message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name) raise AttributeError(message) if hasattr(attr, "short_description"): label = attr.short_description elif (isinstance(attr, property) and hasattr(attr, "fget") and hasattr(attr.fget, "short_description")): label = attr.fget.short_description elif callable(attr): if attr.__name__ == "<lambda>": label = "--" else: label = pretty_name(attr.__name__) else: label = pretty_name(name) if return_attr: return (label, attr) else: return label
[ "def", "label_for_field", "(", "name", ",", "model", ",", "return_attr", "=", "False", ")", ":", "attr", "=", "None", "try", ":", "field", "=", "model", ".", "_meta", ".", "get_field_by_name", "(", "name", ")", "[", "0", "]", "if", "isinstance", "(", "field", ",", "RelatedObject", ")", ":", "label", "=", "field", ".", "opts", ".", "verbose_name", "else", ":", "label", "=", "field", ".", "verbose_name", "except", "models", ".", "FieldDoesNotExist", ":", "if", "name", "==", "\"__unicode__\"", ":", "label", "=", "force_text", "(", "model", ".", "_meta", ".", "verbose_name", ")", "attr", "=", "six", ".", "text_type", "elif", "name", "==", "\"__str__\"", ":", "label", "=", "force_str", "(", "model", ".", "_meta", ".", "verbose_name", ")", "attr", "=", "bytes", "else", ":", "if", "callable", "(", "name", ")", ":", "attr", "=", "name", "elif", "hasattr", "(", "model", ",", "name", ")", ":", "attr", "=", "getattr", "(", "model", ",", "name", ")", "else", ":", "message", "=", "\"Unable to lookup '%s' on %s\"", "%", "(", "name", ",", "model", ".", "_meta", ".", "object_name", ")", "raise", "AttributeError", "(", "message", ")", "if", "hasattr", "(", "attr", ",", "\"short_description\"", ")", ":", "label", "=", "attr", ".", "short_description", "elif", "(", "isinstance", "(", "attr", ",", "property", ")", "and", "hasattr", "(", "attr", ",", "\"fget\"", ")", "and", "hasattr", "(", "attr", ".", "fget", ",", "\"short_description\"", ")", ")", ":", "label", "=", "attr", ".", "fget", ".", "short_description", "elif", "callable", "(", "attr", ")", ":", "if", "attr", ".", "__name__", "==", "\"<lambda>\"", ":", "label", "=", "\"--\"", "else", ":", "label", "=", "pretty_name", "(", "attr", ".", "__name__", ")", "else", ":", "label", "=", "pretty_name", "(", "name", ")", "if", "return_attr", ":", "return", "(", "label", ",", "attr", ")", "else", ":", "return", "label" ]
Returns a sensible label for a field name. The name can be a callable, property (but not created with @property decorator) or the name of an object's attribute, as well as a genuine fields. If return_attr is True, the resolved attribute (which could be a callable) is also returned. This will be None if (and only if) the name refers to a field.
[ "Returns", "a", "sensible", "label", "for", "a", "field", "name", ".", "The", "name", "can", "be", "a", "callable", "property", "(", "but", "not", "created", "with" ]
python
train
crate/crash
src/crate/crash/command.py
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L321-L333
def _show_tables(self, *args): """ print the existing tables within the 'doc' schema """ v = self.connection.lowest_server_version schema_name = \ "table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name" table_filter = \ " AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else "" self._exec("SELECT format('%s.%s', {schema}, table_name) AS name " "FROM information_schema.tables " "WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')" "{table_filter}" .format(schema=schema_name, table_filter=table_filter))
[ "def", "_show_tables", "(", "self", ",", "*", "args", ")", ":", "v", "=", "self", ".", "connection", ".", "lowest_server_version", "schema_name", "=", "\"table_schema\"", "if", "v", ">=", "TABLE_SCHEMA_MIN_VERSION", "else", "\"schema_name\"", "table_filter", "=", "\" AND table_type = 'BASE TABLE'\"", "if", "v", ">=", "TABLE_TYPE_MIN_VERSION", "else", "\"\"", "self", ".", "_exec", "(", "\"SELECT format('%s.%s', {schema}, table_name) AS name \"", "\"FROM information_schema.tables \"", "\"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')\"", "\"{table_filter}\"", ".", "format", "(", "schema", "=", "schema_name", ",", "table_filter", "=", "table_filter", ")", ")" ]
print the existing tables within the 'doc' schema
[ "print", "the", "existing", "tables", "within", "the", "doc", "schema" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/continuous.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/continuous.py#L109-L152
def readWiggleLine(self, line): """ Read a wiggle line. If it is a data line, add values to the protocol object. """ if(line.isspace() or line.startswith("#") or line.startswith("browser") or line.startswith("track")): return elif line.startswith("variableStep"): self._mode = self._VARIABLE_STEP self.parseStep(line) return elif line.startswith("fixedStep"): self._mode = self._FIXED_STEP self.parseStep(line) return elif self._mode is None: raise ValueError("Unexpected input line: %s" % line.strip()) if self._queryReference != self._reference: return # read data lines fields = line.split() if self._mode == self._VARIABLE_STEP: start = int(fields[0])-1 # to 0-based val = float(fields[1]) else: start = self._start self._start += self._step val = float(fields[0]) if start < self._queryEnd and start > self._queryStart: if self._position is None: self._position = start self._data.start = start # fill gap while self._position < start: self._data.values.append(float('NaN')) self._position += 1 for _ in xrange(self._span): self._data.values.append(val) self._position += self._span
[ "def", "readWiggleLine", "(", "self", ",", "line", ")", ":", "if", "(", "line", ".", "isspace", "(", ")", "or", "line", ".", "startswith", "(", "\"#\"", ")", "or", "line", ".", "startswith", "(", "\"browser\"", ")", "or", "line", ".", "startswith", "(", "\"track\"", ")", ")", ":", "return", "elif", "line", ".", "startswith", "(", "\"variableStep\"", ")", ":", "self", ".", "_mode", "=", "self", ".", "_VARIABLE_STEP", "self", ".", "parseStep", "(", "line", ")", "return", "elif", "line", ".", "startswith", "(", "\"fixedStep\"", ")", ":", "self", ".", "_mode", "=", "self", ".", "_FIXED_STEP", "self", ".", "parseStep", "(", "line", ")", "return", "elif", "self", ".", "_mode", "is", "None", ":", "raise", "ValueError", "(", "\"Unexpected input line: %s\"", "%", "line", ".", "strip", "(", ")", ")", "if", "self", ".", "_queryReference", "!=", "self", ".", "_reference", ":", "return", "# read data lines", "fields", "=", "line", ".", "split", "(", ")", "if", "self", ".", "_mode", "==", "self", ".", "_VARIABLE_STEP", ":", "start", "=", "int", "(", "fields", "[", "0", "]", ")", "-", "1", "# to 0-based", "val", "=", "float", "(", "fields", "[", "1", "]", ")", "else", ":", "start", "=", "self", ".", "_start", "self", ".", "_start", "+=", "self", ".", "_step", "val", "=", "float", "(", "fields", "[", "0", "]", ")", "if", "start", "<", "self", ".", "_queryEnd", "and", "start", ">", "self", ".", "_queryStart", ":", "if", "self", ".", "_position", "is", "None", ":", "self", ".", "_position", "=", "start", "self", ".", "_data", ".", "start", "=", "start", "# fill gap", "while", "self", ".", "_position", "<", "start", ":", "self", ".", "_data", ".", "values", ".", "append", "(", "float", "(", "'NaN'", ")", ")", "self", ".", "_position", "+=", "1", "for", "_", "in", "xrange", "(", "self", ".", "_span", ")", ":", "self", ".", "_data", ".", "values", ".", "append", "(", "val", ")", "self", ".", "_position", "+=", "self", ".", "_span" ]
Read a wiggle line. If it is a data line, add values to the protocol object.
[ "Read", "a", "wiggle", "line", ".", "If", "it", "is", "a", "data", "line", "add", "values", "to", "the", "protocol", "object", "." ]
python
train