repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tadashi-aikawa/owlmixin
owlmixin/owlcollections.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/owlcollections.py#L50-L61
def emap(self, func): """ :param func: :type func: T, int -> U :rtype: TList[U] Usage: >>> TList([10, 20, 30, 40, 50]).emap(lambda x, i: (x+1, i)) [(11, 0), (21, 1), (31, 2), (41, 3), (51, 4)] """ return TList([func(x, i) for i, x in enumerate(self)])
[ "def", "emap", "(", "self", ",", "func", ")", ":", "return", "TList", "(", "[", "func", "(", "x", ",", "i", ")", "for", "i", ",", "x", "in", "enumerate", "(", "self", ")", "]", ")" ]
:param func: :type func: T, int -> U :rtype: TList[U] Usage: >>> TList([10, 20, 30, 40, 50]).emap(lambda x, i: (x+1, i)) [(11, 0), (21, 1), (31, 2), (41, 3), (51, 4)]
[ ":", "param", "func", ":", ":", "type", "func", ":", "T", "int", "-", ">", "U", ":", "rtype", ":", "TList", "[", "U", "]" ]
python
train
Clinical-Genomics/scout
scout/utils/date.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/utils/date.py#L19-L50
def get_date(date, date_format = None): """Return a datetime object if there is a valid date Raise exception if date is not valid Return todays date if no date where added Args: date(str) date_format(str) Returns: date_obj(datetime.datetime) """ date_obj = datetime.datetime.now() if date: if date_format: date_obj = datetime.datetime.strptime(date, date_format) else: if match_date(date): if len(date.split('-')) == 3: date = date.split('-') elif len(date.split(' ')) == 3: date = date.split(' ') elif len(date.split('.')) == 3: date = date.split('.') else: date = date.split('/') date_obj = datetime.datetime(*(int(number) for number in date)) else: raise ValueError("Date %s is invalid" % date) return date_obj
[ "def", "get_date", "(", "date", ",", "date_format", "=", "None", ")", ":", "date_obj", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "if", "date", ":", "if", "date_format", ":", "date_obj", "=", "datetime", ".", "datetime", ".", "strptime", "(", "date", ",", "date_format", ")", "else", ":", "if", "match_date", "(", "date", ")", ":", "if", "len", "(", "date", ".", "split", "(", "'-'", ")", ")", "==", "3", ":", "date", "=", "date", ".", "split", "(", "'-'", ")", "elif", "len", "(", "date", ".", "split", "(", "' '", ")", ")", "==", "3", ":", "date", "=", "date", ".", "split", "(", "' '", ")", "elif", "len", "(", "date", ".", "split", "(", "'.'", ")", ")", "==", "3", ":", "date", "=", "date", ".", "split", "(", "'.'", ")", "else", ":", "date", "=", "date", ".", "split", "(", "'/'", ")", "date_obj", "=", "datetime", ".", "datetime", "(", "*", "(", "int", "(", "number", ")", "for", "number", "in", "date", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Date %s is invalid\"", "%", "date", ")", "return", "date_obj" ]
Return a datetime object if there is a valid date Raise exception if date is not valid Return todays date if no date where added Args: date(str) date_format(str) Returns: date_obj(datetime.datetime)
[ "Return", "a", "datetime", "object", "if", "there", "is", "a", "valid", "date" ]
python
test
AshleySetter/optoanalysis
optoanalysis/optoanalysis/Saleae/Saleae.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/Saleae/Saleae.py#L4-L7
def get_chunks(Array, Chunksize): """Generator that yields chunks of size ChunkSize""" for i in range(0, len(Array), Chunksize): yield Array[i:i + Chunksize]
[ "def", "get_chunks", "(", "Array", ",", "Chunksize", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "Array", ")", ",", "Chunksize", ")", ":", "yield", "Array", "[", "i", ":", "i", "+", "Chunksize", "]" ]
Generator that yields chunks of size ChunkSize
[ "Generator", "that", "yields", "chunks", "of", "size", "ChunkSize" ]
python
train
ssato/python-anyconfig
src/anyconfig/backend/yaml/ruamel_yaml.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/yaml/ruamel_yaml.py#L73-L93
def yml_fnc(fname, *args, **options): """ :param fname: "load" or "dump", not checked but it should be OK. see also :func:`yml_load` and :func:`yml_dump` :param args: [stream] for load or [cnf, stream] for dump :param options: keyword args may contain "ac_safe" to load/dump safely """ options = common.filter_from_options("ac_dict", options) if "ac_safe" in options: options["typ"] = "safe" # Override it. iopts = anyconfig.utils.filter_options(_YAML_INIT_KWARGS, options) oopts = anyconfig.utils.filter_options(_YAML_INSTANCE_MEMBERS, options) yml = ryaml.YAML(**iopts) for attr, val in oopts.items(): setattr(yml, attr, val) # e.g. yml.preserve_quotes = True return getattr(yml, fname)(*args)
[ "def", "yml_fnc", "(", "fname", ",", "*", "args", ",", "*", "*", "options", ")", ":", "options", "=", "common", ".", "filter_from_options", "(", "\"ac_dict\"", ",", "options", ")", "if", "\"ac_safe\"", "in", "options", ":", "options", "[", "\"typ\"", "]", "=", "\"safe\"", "# Override it.", "iopts", "=", "anyconfig", ".", "utils", ".", "filter_options", "(", "_YAML_INIT_KWARGS", ",", "options", ")", "oopts", "=", "anyconfig", ".", "utils", ".", "filter_options", "(", "_YAML_INSTANCE_MEMBERS", ",", "options", ")", "yml", "=", "ryaml", ".", "YAML", "(", "*", "*", "iopts", ")", "for", "attr", ",", "val", "in", "oopts", ".", "items", "(", ")", ":", "setattr", "(", "yml", ",", "attr", ",", "val", ")", "# e.g. yml.preserve_quotes = True", "return", "getattr", "(", "yml", ",", "fname", ")", "(", "*", "args", ")" ]
:param fname: "load" or "dump", not checked but it should be OK. see also :func:`yml_load` and :func:`yml_dump` :param args: [stream] for load or [cnf, stream] for dump :param options: keyword args may contain "ac_safe" to load/dump safely
[ ":", "param", "fname", ":", "load", "or", "dump", "not", "checked", "but", "it", "should", "be", "OK", ".", "see", "also", ":", "func", ":", "yml_load", "and", ":", "func", ":", "yml_dump", ":", "param", "args", ":", "[", "stream", "]", "for", "load", "or", "[", "cnf", "stream", "]", "for", "dump", ":", "param", "options", ":", "keyword", "args", "may", "contain", "ac_safe", "to", "load", "/", "dump", "safely" ]
python
train
timkpaine/pyEX
pyEX/stocks.py
https://github.com/timkpaine/pyEX/blob/91cf751dafdb208a0c8b5377945e5808b99f94ba/pyEX/stocks.py#L992-L1008
def keyStatsDF(symbol, token='', version=''): '''Key Stats about company https://iexcloud.io/docs/api/#key-stats 8am, 9am ET Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result ''' s = keyStats(symbol, token, version) df = _statsToDF(s) return df
[ "def", "keyStatsDF", "(", "symbol", ",", "token", "=", "''", ",", "version", "=", "''", ")", ":", "s", "=", "keyStats", "(", "symbol", ",", "token", ",", "version", ")", "df", "=", "_statsToDF", "(", "s", ")", "return", "df" ]
Key Stats about company https://iexcloud.io/docs/api/#key-stats 8am, 9am ET Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: DataFrame: result
[ "Key", "Stats", "about", "company" ]
python
valid
wbond/ocspbuilder
ocspbuilder/__init__.py
https://github.com/wbond/ocspbuilder/blob/0b853af4ed6bf8bded1ddf235e9e64ea78708456/ocspbuilder/__init__.py#L627-L650
def certificate_issuer(self, value): """ An asn1crypto.x509.Certificate object of the issuer of the certificate. This should only be set if the OCSP responder is not the issuer of the certificate, but instead a special certificate only for OCSP responses. """ if value is not None: is_oscrypto = isinstance(value, asymmetric.Certificate) if not is_oscrypto and not isinstance(value, x509.Certificate): raise TypeError(_pretty_message( ''' certificate_issuer must be an instance of asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate, not %s ''', _type_name(value) )) if is_oscrypto: value = value.asn1 self._certificate_issuer = value
[ "def", "certificate_issuer", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "is_oscrypto", "=", "isinstance", "(", "value", ",", "asymmetric", ".", "Certificate", ")", "if", "not", "is_oscrypto", "and", "not", "isinstance", "(", "value", ",", "x509", ".", "Certificate", ")", ":", "raise", "TypeError", "(", "_pretty_message", "(", "'''\n certificate_issuer must be an instance of\n asn1crypto.x509.Certificate or\n oscrypto.asymmetric.Certificate, not %s\n '''", ",", "_type_name", "(", "value", ")", ")", ")", "if", "is_oscrypto", ":", "value", "=", "value", ".", "asn1", "self", ".", "_certificate_issuer", "=", "value" ]
An asn1crypto.x509.Certificate object of the issuer of the certificate. This should only be set if the OCSP responder is not the issuer of the certificate, but instead a special certificate only for OCSP responses.
[ "An", "asn1crypto", ".", "x509", ".", "Certificate", "object", "of", "the", "issuer", "of", "the", "certificate", ".", "This", "should", "only", "be", "set", "if", "the", "OCSP", "responder", "is", "not", "the", "issuer", "of", "the", "certificate", "but", "instead", "a", "special", "certificate", "only", "for", "OCSP", "responses", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/cfg.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/cfg.py#L153-L173
def loadYAML (filename=None, data=None): """Loads either the given YAML configuration file or YAML data. Returns None if there was an error reading from the configuration file and logs an error message via ait.core.log.error(). """ config = None try: if filename: data = open(filename, 'rt') config = yaml.load(data) if type(data) is file: data.close() except IOError, e: msg = 'Could not read AIT configuration file "%s": %s' log.error(msg, filename, str(e)) return config
[ "def", "loadYAML", "(", "filename", "=", "None", ",", "data", "=", "None", ")", ":", "config", "=", "None", "try", ":", "if", "filename", ":", "data", "=", "open", "(", "filename", ",", "'rt'", ")", "config", "=", "yaml", ".", "load", "(", "data", ")", "if", "type", "(", "data", ")", "is", "file", ":", "data", ".", "close", "(", ")", "except", "IOError", ",", "e", ":", "msg", "=", "'Could not read AIT configuration file \"%s\": %s'", "log", ".", "error", "(", "msg", ",", "filename", ",", "str", "(", "e", ")", ")", "return", "config" ]
Loads either the given YAML configuration file or YAML data. Returns None if there was an error reading from the configuration file and logs an error message via ait.core.log.error().
[ "Loads", "either", "the", "given", "YAML", "configuration", "file", "or", "YAML", "data", "." ]
python
train
vtkiorg/vtki
vtki/filters.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/filters.py#L878-L894
def point_data_to_cell_data(dataset, pass_point_data=False): """Transforms point data (i.e., data specified per node) into cell data (i.e., data specified within cells). Optionally, the input point data can be passed through to the output. See aslo: :func:`vtki.DataSetFilters.cell_data_to_point_data` Parameters ---------- pass_point_data : bool If enabled, pass the input point data through to the output """ alg = vtk.vtkPointDataToCellData() alg.SetInputDataObject(dataset) alg.SetPassPointData(pass_point_data) alg.Update() return _get_output(alg, active_scalar=dataset.active_scalar_name)
[ "def", "point_data_to_cell_data", "(", "dataset", ",", "pass_point_data", "=", "False", ")", ":", "alg", "=", "vtk", ".", "vtkPointDataToCellData", "(", ")", "alg", ".", "SetInputDataObject", "(", "dataset", ")", "alg", ".", "SetPassPointData", "(", "pass_point_data", ")", "alg", ".", "Update", "(", ")", "return", "_get_output", "(", "alg", ",", "active_scalar", "=", "dataset", ".", "active_scalar_name", ")" ]
Transforms point data (i.e., data specified per node) into cell data (i.e., data specified within cells). Optionally, the input point data can be passed through to the output. See aslo: :func:`vtki.DataSetFilters.cell_data_to_point_data` Parameters ---------- pass_point_data : bool If enabled, pass the input point data through to the output
[ "Transforms", "point", "data", "(", "i", ".", "e", ".", "data", "specified", "per", "node", ")", "into", "cell", "data", "(", "i", ".", "e", ".", "data", "specified", "within", "cells", ")", ".", "Optionally", "the", "input", "point", "data", "can", "be", "passed", "through", "to", "the", "output", "." ]
python
train
CGATOxford/UMI-tools
umi_tools/network.py
https://github.com/CGATOxford/UMI-tools/blob/c4b5d84aac391d59916d294f8f4f8f5378abcfbe/umi_tools/network.py#L249-L269
def _group_directional(self, clusters, adj_list, counts): ''' return groups for directional method''' observed = set() groups = [] for cluster in clusters: if len(cluster) == 1: groups.append(list(cluster)) observed.update(cluster) else: cluster = sorted(cluster, key=lambda x: counts[x], reverse=True) # need to remove any node which has already been observed temp_cluster = [] for node in cluster: if node not in observed: temp_cluster.append(node) observed.add(node) groups.append(temp_cluster) return groups
[ "def", "_group_directional", "(", "self", ",", "clusters", ",", "adj_list", ",", "counts", ")", ":", "observed", "=", "set", "(", ")", "groups", "=", "[", "]", "for", "cluster", "in", "clusters", ":", "if", "len", "(", "cluster", ")", "==", "1", ":", "groups", ".", "append", "(", "list", "(", "cluster", ")", ")", "observed", ".", "update", "(", "cluster", ")", "else", ":", "cluster", "=", "sorted", "(", "cluster", ",", "key", "=", "lambda", "x", ":", "counts", "[", "x", "]", ",", "reverse", "=", "True", ")", "# need to remove any node which has already been observed", "temp_cluster", "=", "[", "]", "for", "node", "in", "cluster", ":", "if", "node", "not", "in", "observed", ":", "temp_cluster", ".", "append", "(", "node", ")", "observed", ".", "add", "(", "node", ")", "groups", ".", "append", "(", "temp_cluster", ")", "return", "groups" ]
return groups for directional method
[ "return", "groups", "for", "directional", "method" ]
python
train
cggh/scikit-allel
allel/stats/diversity.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/diversity.py#L492-L590
def windowed_divergence(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise divergence mpd = mean_pairwise_difference_between(ac1, ac2, fill=0) # sum in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base dxy, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return dxy, windows, n_bases, counts
[ "def", "windowed_divergence", "(", "pos", ",", "ac1", ",", "ac2", ",", "size", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ",", "windows", "=", "None", ",", "is_accessible", "=", "None", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "pos", "=", "SortedIndex", "(", "pos", ",", "copy", "=", "False", ")", "is_accessible", "=", "asarray_ndim", "(", "is_accessible", ",", "1", ",", "allow_none", "=", "True", ")", "# calculate mean pairwise divergence", "mpd", "=", "mean_pairwise_difference_between", "(", "ac1", ",", "ac2", ",", "fill", "=", "0", ")", "# sum in windows", "mpd_sum", ",", "windows", ",", "counts", "=", "windowed_statistic", "(", "pos", ",", "values", "=", "mpd", ",", "statistic", "=", "np", ".", "sum", ",", "size", "=", "size", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "step", "=", "step", ",", "windows", "=", "windows", ",", "fill", "=", "0", ")", "# calculate value per base", "dxy", ",", "n_bases", "=", "per_base", "(", "mpd_sum", ",", "windows", ",", "is_accessible", "=", "is_accessible", ",", "fill", "=", "fill", ")", "return", "dxy", ",", "windows", ",", "n_bases", ",", "counts" ]
Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2])
[ "Estimate", "nucleotide", "divergence", "between", "two", "populations", "in", "windows", "over", "a", "single", "chromosome", "/", "contig", "." ]
python
train
Gandi/gandi.cli
gandi/cli/core/params.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/core/params.py#L28-L39
def choices(self): """ Retrieve choices from API if possible""" if not self._choices: gandi = self.gandi or GandiContextHelper() self._choices = self._get_choices(gandi) if not self._choices: api = gandi.get_api_connector() gandi.echo('Please check that you are connecting to the good ' "api '%s' and that it's running." % (api.host)) sys.exit(1) return self._choices
[ "def", "choices", "(", "self", ")", ":", "if", "not", "self", ".", "_choices", ":", "gandi", "=", "self", ".", "gandi", "or", "GandiContextHelper", "(", ")", "self", ".", "_choices", "=", "self", ".", "_get_choices", "(", "gandi", ")", "if", "not", "self", ".", "_choices", ":", "api", "=", "gandi", ".", "get_api_connector", "(", ")", "gandi", ".", "echo", "(", "'Please check that you are connecting to the good '", "\"api '%s' and that it's running.\"", "%", "(", "api", ".", "host", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "self", ".", "_choices" ]
Retrieve choices from API if possible
[ "Retrieve", "choices", "from", "API", "if", "possible" ]
python
train
gc3-uzh-ch/elasticluster
elasticluster/cluster.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/cluster.py#L1301-L1319
def start(self): """ Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node. """ log.info("Starting node `%s` from image `%s` with flavor %s ...", self.name, self.image_id, self.flavor) self.instance_id = self._cloud_provider.start_instance( self.user_key_name, self.user_key_public, self.user_key_private, self.security_group, self.flavor, self.image_id, self.image_userdata, username=self.image_user, node_name=("%s-%s" % (self.cluster_name, self.name)), **self.extra) log.debug("Node `%s` has instance ID `%s`", self.name, self.instance_id)
[ "def", "start", "(", "self", ")", ":", "log", ".", "info", "(", "\"Starting node `%s` from image `%s` with flavor %s ...\"", ",", "self", ".", "name", ",", "self", ".", "image_id", ",", "self", ".", "flavor", ")", "self", ".", "instance_id", "=", "self", ".", "_cloud_provider", ".", "start_instance", "(", "self", ".", "user_key_name", ",", "self", ".", "user_key_public", ",", "self", ".", "user_key_private", ",", "self", ".", "security_group", ",", "self", ".", "flavor", ",", "self", ".", "image_id", ",", "self", ".", "image_userdata", ",", "username", "=", "self", ".", "image_user", ",", "node_name", "=", "(", "\"%s-%s\"", "%", "(", "self", ".", "cluster_name", ",", "self", ".", "name", ")", ")", ",", "*", "*", "self", ".", "extra", ")", "log", ".", "debug", "(", "\"Node `%s` has instance ID `%s`\"", ",", "self", ".", "name", ",", "self", ".", "instance_id", ")" ]
Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node.
[ "Start", "the", "node", "on", "the", "cloud", "using", "the", "given", "instance", "properties", "." ]
python
train
inveniosoftware-contrib/invenio-groups
examples/app.py
https://github.com/inveniosoftware-contrib/invenio-groups/blob/109481d6b02701db00b72223dd4a65e167c589a6/examples/app.py#L83-L103
def users(): """Load default users and groups.""" from invenio_groups.models import Group, Membership, \ PrivacyPolicy, SubscriptionPolicy admin = accounts.datastore.create_user( email='[email protected]', password=encrypt_password('123456'), active=True, ) reader = accounts.datastore.create_user( email='[email protected]', password=encrypt_password('123456'), active=True, ) admins = Group.create(name='admins', admins=[admin]) for i in range(10): Group.create(name='group-{0}'.format(i), admins=[admin]) Membership.create(admins, reader) db.session.commit()
[ "def", "users", "(", ")", ":", "from", "invenio_groups", ".", "models", "import", "Group", ",", "Membership", ",", "PrivacyPolicy", ",", "SubscriptionPolicy", "admin", "=", "accounts", ".", "datastore", ".", "create_user", "(", "email", "=", "'[email protected]'", ",", "password", "=", "encrypt_password", "(", "'123456'", ")", ",", "active", "=", "True", ",", ")", "reader", "=", "accounts", ".", "datastore", ".", "create_user", "(", "email", "=", "'[email protected]'", ",", "password", "=", "encrypt_password", "(", "'123456'", ")", ",", "active", "=", "True", ",", ")", "admins", "=", "Group", ".", "create", "(", "name", "=", "'admins'", ",", "admins", "=", "[", "admin", "]", ")", "for", "i", "in", "range", "(", "10", ")", ":", "Group", ".", "create", "(", "name", "=", "'group-{0}'", ".", "format", "(", "i", ")", ",", "admins", "=", "[", "admin", "]", ")", "Membership", ".", "create", "(", "admins", ",", "reader", ")", "db", ".", "session", ".", "commit", "(", ")" ]
Load default users and groups.
[ "Load", "default", "users", "and", "groups", "." ]
python
valid
ejeschke/ginga
ginga/rv/Control.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/Control.py#L1867-L1889
def make_viewer(self, vname, channel): """Make a viewer whose type name is `vname` and add it to `channel`. """ if vname not in self.viewer_db: raise ValueError("I don't know how to build a '%s' viewer" % ( vname)) stk_w = channel.widget bnch = self.viewer_db[vname] viewer = bnch.vclass(logger=self.logger, settings=channel.settings) stk_w.add_widget(viewer.get_widget(), title=vname) # let the GUI respond to this widget addition self.update_pending() # let the channel object do any necessary initialization channel.connect_viewer(viewer) # finally, let the viewer do any viewer-side initialization viewer.initialize_channel(self, channel)
[ "def", "make_viewer", "(", "self", ",", "vname", ",", "channel", ")", ":", "if", "vname", "not", "in", "self", ".", "viewer_db", ":", "raise", "ValueError", "(", "\"I don't know how to build a '%s' viewer\"", "%", "(", "vname", ")", ")", "stk_w", "=", "channel", ".", "widget", "bnch", "=", "self", ".", "viewer_db", "[", "vname", "]", "viewer", "=", "bnch", ".", "vclass", "(", "logger", "=", "self", ".", "logger", ",", "settings", "=", "channel", ".", "settings", ")", "stk_w", ".", "add_widget", "(", "viewer", ".", "get_widget", "(", ")", ",", "title", "=", "vname", ")", "# let the GUI respond to this widget addition", "self", ".", "update_pending", "(", ")", "# let the channel object do any necessary initialization", "channel", ".", "connect_viewer", "(", "viewer", ")", "# finally, let the viewer do any viewer-side initialization", "viewer", ".", "initialize_channel", "(", "self", ",", "channel", ")" ]
Make a viewer whose type name is `vname` and add it to `channel`.
[ "Make", "a", "viewer", "whose", "type", "name", "is", "vname", "and", "add", "it", "to", "channel", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/data_generators/generator_utils.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/generator_utils.py#L65-L89
def generate_files_distributed(generator, output_name, output_dir, num_shards=1, max_cases=None, task_id=0): """generate_files but with a single writer writing to shard task_id.""" assert task_id < num_shards output_filename = sharded_name(output_name, task_id, num_shards) output_file = os.path.join(output_dir, output_filename) tf.logging.info("Writing to file %s", output_file) writer = tf.python_io.TFRecordWriter(output_file) counter = 0 for case in generator: if counter % 100000 == 0: tf.logging.info("Generating case %d for %s." % (counter, output_name)) counter += 1 if max_cases and counter > max_cases: break example = to_example(case) writer.write(example.SerializeToString()) writer.close() return output_file
[ "def", "generate_files_distributed", "(", "generator", ",", "output_name", ",", "output_dir", ",", "num_shards", "=", "1", ",", "max_cases", "=", "None", ",", "task_id", "=", "0", ")", ":", "assert", "task_id", "<", "num_shards", "output_filename", "=", "sharded_name", "(", "output_name", ",", "task_id", ",", "num_shards", ")", "output_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "output_filename", ")", "tf", ".", "logging", ".", "info", "(", "\"Writing to file %s\"", ",", "output_file", ")", "writer", "=", "tf", ".", "python_io", ".", "TFRecordWriter", "(", "output_file", ")", "counter", "=", "0", "for", "case", "in", "generator", ":", "if", "counter", "%", "100000", "==", "0", ":", "tf", ".", "logging", ".", "info", "(", "\"Generating case %d for %s.\"", "%", "(", "counter", ",", "output_name", ")", ")", "counter", "+=", "1", "if", "max_cases", "and", "counter", ">", "max_cases", ":", "break", "example", "=", "to_example", "(", "case", ")", "writer", ".", "write", "(", "example", ".", "SerializeToString", "(", ")", ")", "writer", ".", "close", "(", ")", "return", "output_file" ]
generate_files but with a single writer writing to shard task_id.
[ "generate_files", "but", "with", "a", "single", "writer", "writing", "to", "shard", "task_id", "." ]
python
train
NASA-AMMOS/AIT-Core
ait/core/api.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/api.py#L307-L315
def extendleft(self, iterable): """Extend the left side of this GeventDeque by appending elements from the iterable argument. Note, the series of left appends results in reversing the order of elements in the iterable argument. """ self._deque.extendleft(iterable) if len(self._deque) > 0: self.notEmpty.set()
[ "def", "extendleft", "(", "self", ",", "iterable", ")", ":", "self", ".", "_deque", ".", "extendleft", "(", "iterable", ")", "if", "len", "(", "self", ".", "_deque", ")", ">", "0", ":", "self", ".", "notEmpty", ".", "set", "(", ")" ]
Extend the left side of this GeventDeque by appending elements from the iterable argument. Note, the series of left appends results in reversing the order of elements in the iterable argument.
[ "Extend", "the", "left", "side", "of", "this", "GeventDeque", "by", "appending", "elements", "from", "the", "iterable", "argument", ".", "Note", "the", "series", "of", "left", "appends", "results", "in", "reversing", "the", "order", "of", "elements", "in", "the", "iterable", "argument", "." ]
python
train
tradenity/python-sdk
tradenity/resources/return_line_item.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/return_line_item.py#L748-L770
def list_all_return_line_items(cls, **kwargs): """List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_return_line_items_with_http_info(**kwargs) else: (data) = cls._list_all_return_line_items_with_http_info(**kwargs) return data
[ "def", "list_all_return_line_items", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_return_line_items_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_return_line_items_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
List ReturnLineItems Return a list of ReturnLineItems This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_return_line_items(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[ReturnLineItem] If the method is called asynchronously, returns the request thread.
[ "List", "ReturnLineItems" ]
python
train
python-gitlab/python-gitlab
gitlab/mixins.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/mixins.py#L575-L595
def participants(self, **kwargs): """List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants """ path = '%s/%s/participants' % (self.manager.path, self.get_id()) return self.manager.gitlab.http_get(path, **kwargs)
[ "def", "participants", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'%s/%s/participants'", "%", "(", "self", ".", "manager", ".", "path", ",", "self", ".", "get_id", "(", ")", ")", "return", "self", ".", "manager", ".", "gitlab", ".", "http_get", "(", "path", ",", "*", "*", "kwargs", ")" ]
List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants
[ "List", "the", "participants", "." ]
python
train
aleju/imgaug
imgaug/augmentables/bbs.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/bbs.py#L718-L736
def to_keypoints(self): """ Convert the corners of the bounding box to keypoints (clockwise, starting at top left). Returns ------- list of imgaug.Keypoint Corners of the bounding box as keypoints. """ # TODO get rid of this deferred import from imgaug.augmentables.kps import Keypoint return [ Keypoint(x=self.x1, y=self.y1), Keypoint(x=self.x2, y=self.y1), Keypoint(x=self.x2, y=self.y2), Keypoint(x=self.x1, y=self.y2) ]
[ "def", "to_keypoints", "(", "self", ")", ":", "# TODO get rid of this deferred import", "from", "imgaug", ".", "augmentables", ".", "kps", "import", "Keypoint", "return", "[", "Keypoint", "(", "x", "=", "self", ".", "x1", ",", "y", "=", "self", ".", "y1", ")", ",", "Keypoint", "(", "x", "=", "self", ".", "x2", ",", "y", "=", "self", ".", "y1", ")", ",", "Keypoint", "(", "x", "=", "self", ".", "x2", ",", "y", "=", "self", ".", "y2", ")", ",", "Keypoint", "(", "x", "=", "self", ".", "x1", ",", "y", "=", "self", ".", "y2", ")", "]" ]
Convert the corners of the bounding box to keypoints (clockwise, starting at top left). Returns ------- list of imgaug.Keypoint Corners of the bounding box as keypoints.
[ "Convert", "the", "corners", "of", "the", "bounding", "box", "to", "keypoints", "(", "clockwise", "starting", "at", "top", "left", ")", "." ]
python
valid
manns/pyspread
pyspread/src/gui/_grid.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1440-L1458
def OnResizeGridDialog(self, event): """Resizes current grid by appending/deleting rows, cols and tables""" # Get grid dimensions new_shape = self.interfaces.get_dimensions_from_user(no_dim=3) if new_shape is None: return with undo.group(_("Resize grid")): self.grid.actions.change_grid_shape(new_shape) statustext = _("Grid dimensions changed to {shape}.") statustext = statustext.format(shape=new_shape) post_command_event(self.grid.main_window, self.grid.StatusBarMsg, text=statustext) event.Skip()
[ "def", "OnResizeGridDialog", "(", "self", ",", "event", ")", ":", "# Get grid dimensions", "new_shape", "=", "self", ".", "interfaces", ".", "get_dimensions_from_user", "(", "no_dim", "=", "3", ")", "if", "new_shape", "is", "None", ":", "return", "with", "undo", ".", "group", "(", "_", "(", "\"Resize grid\"", ")", ")", ":", "self", ".", "grid", ".", "actions", ".", "change_grid_shape", "(", "new_shape", ")", "statustext", "=", "_", "(", "\"Grid dimensions changed to {shape}.\"", ")", "statustext", "=", "statustext", ".", "format", "(", "shape", "=", "new_shape", ")", "post_command_event", "(", "self", ".", "grid", ".", "main_window", ",", "self", ".", "grid", ".", "StatusBarMsg", ",", "text", "=", "statustext", ")", "event", ".", "Skip", "(", ")" ]
Resizes current grid by appending/deleting rows, cols and tables
[ "Resizes", "current", "grid", "by", "appending", "/", "deleting", "rows", "cols", "and", "tables" ]
python
train
NLeSC/noodles
noodles/prov/sqlite.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/prov/sqlite.py#L309-L314
def job_exists(self, prov): """Check if a job exists in the database.""" with self.lock: self.cur.execute('select * from "jobs" where "prov" = ?;', (prov,)) rec = self.cur.fetchone() return rec is not None
[ "def", "job_exists", "(", "self", ",", "prov", ")", ":", "with", "self", ".", "lock", ":", "self", ".", "cur", ".", "execute", "(", "'select * from \"jobs\" where \"prov\" = ?;'", ",", "(", "prov", ",", ")", ")", "rec", "=", "self", ".", "cur", ".", "fetchone", "(", ")", "return", "rec", "is", "not", "None" ]
Check if a job exists in the database.
[ "Check", "if", "a", "job", "exists", "in", "the", "database", "." ]
python
train
codelv/enaml-native
src/enamlnative/android/android_utils.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_utils.py#L56-L78
def toggle_keyboard(cls, flag=HIDE_IMPLICIT_ONLY): """ Toggle the keyboard on and off Parameters ---------- flag: int Flag to send to toggleSoftInput Returns -------- result: future Resolves when the toggle is complete """ app = AndroidApplication.instance() f = app.create_future() def on_ready(ims): ims.toggleSoftInput(flag, 0) f.set_result(True) cls.get().then(on_ready) return f
[ "def", "toggle_keyboard", "(", "cls", ",", "flag", "=", "HIDE_IMPLICIT_ONLY", ")", ":", "app", "=", "AndroidApplication", ".", "instance", "(", ")", "f", "=", "app", ".", "create_future", "(", ")", "def", "on_ready", "(", "ims", ")", ":", "ims", ".", "toggleSoftInput", "(", "flag", ",", "0", ")", "f", ".", "set_result", "(", "True", ")", "cls", ".", "get", "(", ")", ".", "then", "(", "on_ready", ")", "return", "f" ]
Toggle the keyboard on and off Parameters ---------- flag: int Flag to send to toggleSoftInput Returns -------- result: future Resolves when the toggle is complete
[ "Toggle", "the", "keyboard", "on", "and", "off", "Parameters", "----------", "flag", ":", "int", "Flag", "to", "send", "to", "toggleSoftInput", "Returns", "--------", "result", ":", "future", "Resolves", "when", "the", "toggle", "is", "complete" ]
python
train
barryp/py-amqplib
amqplib/client_0_8/channel.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L675-L844
def exchange_declare(self, exchange, type, passive=False, durable=False, auto_delete=True, internal=False, nowait=False, arguments=None, ticket=None): """ declare exchange, create if needed This method creates an exchange if it does not already exist, and if the exchange exists, verifies that it is of the correct and expected class. RULE: The server SHOULD support a minimum of 16 exchanges per virtual host and ideally, impose no limit except as defined by available resources. PARAMETERS: exchange: shortstr RULE: Exchange names starting with "amq." are reserved for predeclared and standardised exchanges. If the client attempts to create an exchange starting with "amq.", the server MUST raise a channel exception with reply code 403 (access refused). type: shortstr exchange type Each exchange belongs to one of a set of exchange types implemented by the server. The exchange types define the functionality of the exchange - i.e. how messages are routed through it. It is not valid or meaningful to attempt to change the type of an existing exchange. RULE: If the exchange already exists with a different type, the server MUST raise a connection exception with a reply code 507 (not allowed). RULE: If the server does not support the requested exchange type it MUST raise a connection exception with a reply code 503 (command invalid). passive: boolean do not create exchange If set, the server will not create the exchange. The client can use this to check whether an exchange exists without modifying the server state. RULE: If set, and the exchange does not already exist, the server MUST raise a channel exception with reply code 404 (not found). durable: boolean request a durable exchange If set when creating a new exchange, the exchange will be marked as durable. Durable exchanges remain active when a server restarts. Non-durable exchanges (transient exchanges) are purged if/when a server restarts. RULE: The server MUST support both durable and transient exchanges. RULE: The server MUST ignore the durable field if the exchange already exists. auto_delete: boolean auto-delete when unused If set, the exchange is deleted when all queues have finished using it. RULE: The server SHOULD allow for a reasonable delay between the point when it determines that an exchange is not being used (or no longer used), and the point when it deletes the exchange. At the least it must allow a client to create an exchange and then bind a queue to it, with a small but non-zero delay between these two actions. RULE: The server MUST ignore the auto-delete field if the exchange already exists. internal: boolean create internal exchange If set, the exchange may not be used directly by publishers, but only when bound to other exchanges. Internal exchanges are used to construct wiring that is not visible to applications. nowait: boolean do not send a reply method If set, the server will not respond to the method. The client should not wait for a reply method. If the server could not complete the method it will raise a channel or connection exception. arguments: table arguments for declaration A set of arguments for the declaration. The syntax and semantics of these arguments depends on the server implementation. This field is ignored if passive is True. ticket: short When a client defines a new exchange, this belongs to the access realm of the ticket used. All further work done with that exchange must be done with an access ticket for the same realm. RULE: The client MUST provide a valid access ticket giving "active" access to the realm in which the exchange exists or will be created, or "passive" access if the if-exists flag is set. """ if arguments is None: arguments = {} args = AMQPWriter() if ticket is not None: args.write_short(ticket) else: args.write_short(self.default_ticket) args.write_shortstr(exchange) args.write_shortstr(type) args.write_bit(passive) args.write_bit(durable) args.write_bit(auto_delete) args.write_bit(internal) args.write_bit(nowait) args.write_table(arguments) self._send_method((40, 10), args) if not nowait: return self.wait(allowed_methods=[ (40, 11), # Channel.exchange_declare_ok ])
[ "def", "exchange_declare", "(", "self", ",", "exchange", ",", "type", ",", "passive", "=", "False", ",", "durable", "=", "False", ",", "auto_delete", "=", "True", ",", "internal", "=", "False", ",", "nowait", "=", "False", ",", "arguments", "=", "None", ",", "ticket", "=", "None", ")", ":", "if", "arguments", "is", "None", ":", "arguments", "=", "{", "}", "args", "=", "AMQPWriter", "(", ")", "if", "ticket", "is", "not", "None", ":", "args", ".", "write_short", "(", "ticket", ")", "else", ":", "args", ".", "write_short", "(", "self", ".", "default_ticket", ")", "args", ".", "write_shortstr", "(", "exchange", ")", "args", ".", "write_shortstr", "(", "type", ")", "args", ".", "write_bit", "(", "passive", ")", "args", ".", "write_bit", "(", "durable", ")", "args", ".", "write_bit", "(", "auto_delete", ")", "args", ".", "write_bit", "(", "internal", ")", "args", ".", "write_bit", "(", "nowait", ")", "args", ".", "write_table", "(", "arguments", ")", "self", ".", "_send_method", "(", "(", "40", ",", "10", ")", ",", "args", ")", "if", "not", "nowait", ":", "return", "self", ".", "wait", "(", "allowed_methods", "=", "[", "(", "40", ",", "11", ")", ",", "# Channel.exchange_declare_ok", "]", ")" ]
declare exchange, create if needed This method creates an exchange if it does not already exist, and if the exchange exists, verifies that it is of the correct and expected class. RULE: The server SHOULD support a minimum of 16 exchanges per virtual host and ideally, impose no limit except as defined by available resources. PARAMETERS: exchange: shortstr RULE: Exchange names starting with "amq." are reserved for predeclared and standardised exchanges. If the client attempts to create an exchange starting with "amq.", the server MUST raise a channel exception with reply code 403 (access refused). type: shortstr exchange type Each exchange belongs to one of a set of exchange types implemented by the server. The exchange types define the functionality of the exchange - i.e. how messages are routed through it. It is not valid or meaningful to attempt to change the type of an existing exchange. RULE: If the exchange already exists with a different type, the server MUST raise a connection exception with a reply code 507 (not allowed). RULE: If the server does not support the requested exchange type it MUST raise a connection exception with a reply code 503 (command invalid). passive: boolean do not create exchange If set, the server will not create the exchange. The client can use this to check whether an exchange exists without modifying the server state. RULE: If set, and the exchange does not already exist, the server MUST raise a channel exception with reply code 404 (not found). durable: boolean request a durable exchange If set when creating a new exchange, the exchange will be marked as durable. Durable exchanges remain active when a server restarts. Non-durable exchanges (transient exchanges) are purged if/when a server restarts. RULE: The server MUST support both durable and transient exchanges. RULE: The server MUST ignore the durable field if the exchange already exists. auto_delete: boolean auto-delete when unused If set, the exchange is deleted when all queues have finished using it. RULE: The server SHOULD allow for a reasonable delay between the point when it determines that an exchange is not being used (or no longer used), and the point when it deletes the exchange. At the least it must allow a client to create an exchange and then bind a queue to it, with a small but non-zero delay between these two actions. RULE: The server MUST ignore the auto-delete field if the exchange already exists. internal: boolean create internal exchange If set, the exchange may not be used directly by publishers, but only when bound to other exchanges. Internal exchanges are used to construct wiring that is not visible to applications. nowait: boolean do not send a reply method If set, the server will not respond to the method. The client should not wait for a reply method. If the server could not complete the method it will raise a channel or connection exception. arguments: table arguments for declaration A set of arguments for the declaration. The syntax and semantics of these arguments depends on the server implementation. This field is ignored if passive is True. ticket: short When a client defines a new exchange, this belongs to the access realm of the ticket used. All further work done with that exchange must be done with an access ticket for the same realm. RULE: The client MUST provide a valid access ticket giving "active" access to the realm in which the exchange exists or will be created, or "passive" access if the if-exists flag is set.
[ "declare", "exchange", "create", "if", "needed" ]
python
train
monarch-initiative/dipper
dipper/utils/DipperUtil.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/utils/DipperUtil.py#L111-L143
def is_omim_disease(gene_id): """ Process omim equivalencies by examining the monarch ontology scigraph As an alternative we could examine mondo.owl, since the ontology scigraph imports the output of this script which creates an odd circular dependency (even though we're querying mondo.owl through scigraph) :param graph: rdfLib graph object :param gene_id: ncbi gene id as curie :param omim_id: omim id as curie :return: None """ SCIGRAPH_BASE = 'https://scigraph-ontology-dev.monarchinitiative.org/scigraph/graph/' session = requests.Session() adapter = requests.adapters.HTTPAdapter(max_retries=10) session.mount('https://', adapter) isOmimDisease = False url = SCIGRAPH_BASE + gene_id + '.json' response = session.get(url) try: results = response.json() if 'nodes' in results and len(results['nodes']) > 0: if 'meta' in results['nodes'][0] \ and 'category' in results['nodes'][0]['meta'] \ and 'disease' in results['nodes'][0]['meta']['category']: LOG.info("%s is a disease, skipping", gene_id) isOmimDisease = True except ValueError: pass return isOmimDisease
[ "def", "is_omim_disease", "(", "gene_id", ")", ":", "SCIGRAPH_BASE", "=", "'https://scigraph-ontology-dev.monarchinitiative.org/scigraph/graph/'", "session", "=", "requests", ".", "Session", "(", ")", "adapter", "=", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "10", ")", "session", ".", "mount", "(", "'https://'", ",", "adapter", ")", "isOmimDisease", "=", "False", "url", "=", "SCIGRAPH_BASE", "+", "gene_id", "+", "'.json'", "response", "=", "session", ".", "get", "(", "url", ")", "try", ":", "results", "=", "response", ".", "json", "(", ")", "if", "'nodes'", "in", "results", "and", "len", "(", "results", "[", "'nodes'", "]", ")", ">", "0", ":", "if", "'meta'", "in", "results", "[", "'nodes'", "]", "[", "0", "]", "and", "'category'", "in", "results", "[", "'nodes'", "]", "[", "0", "]", "[", "'meta'", "]", "and", "'disease'", "in", "results", "[", "'nodes'", "]", "[", "0", "]", "[", "'meta'", "]", "[", "'category'", "]", ":", "LOG", ".", "info", "(", "\"%s is a disease, skipping\"", ",", "gene_id", ")", "isOmimDisease", "=", "True", "except", "ValueError", ":", "pass", "return", "isOmimDisease" ]
Process omim equivalencies by examining the monarch ontology scigraph As an alternative we could examine mondo.owl, since the ontology scigraph imports the output of this script which creates an odd circular dependency (even though we're querying mondo.owl through scigraph) :param graph: rdfLib graph object :param gene_id: ncbi gene id as curie :param omim_id: omim id as curie :return: None
[ "Process", "omim", "equivalencies", "by", "examining", "the", "monarch", "ontology", "scigraph", "As", "an", "alternative", "we", "could", "examine", "mondo", ".", "owl", "since", "the", "ontology", "scigraph", "imports", "the", "output", "of", "this", "script", "which", "creates", "an", "odd", "circular", "dependency", "(", "even", "though", "we", "re", "querying", "mondo", ".", "owl", "through", "scigraph", ")" ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_spinn3r_feed_storage.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_spinn3r_feed_storage.py#L301-L332
def _make_content_item(node, mime_type=None, alternate_data=None): """Create a ContentItem from a node in the spinn3r data tree. The ContentItem is created with raw data set to ``node.data``, decompressed if the node's encoding is 'zlib', and UTF-8 normalized, with a MIME type from ``node.mime_type``. ``node`` the actual node from the spinn3r protobuf data ``mime_type`` string MIME type to use (defaults to ``node.mime_type``) ``alternate_data`` alternate (compressed) data to use, if ``node.data`` is missing or can't be decompressed """ raw = node.data if getattr(node, 'encoding', None) == 'zlib': try: raw = zlib.decompress(node.data) except Exception, exc: if alternate_data is not None: try: raw = zlib.decompress(alternate_data) except Exception: raise exc # the original exception else: raise if mime_type is None: mime_type = node.mime_type raw = raw.decode('utf8').encode('utf8') return streamcorpus.ContentItem(raw=raw, media_type=mime_type)
[ "def", "_make_content_item", "(", "node", ",", "mime_type", "=", "None", ",", "alternate_data", "=", "None", ")", ":", "raw", "=", "node", ".", "data", "if", "getattr", "(", "node", ",", "'encoding'", ",", "None", ")", "==", "'zlib'", ":", "try", ":", "raw", "=", "zlib", ".", "decompress", "(", "node", ".", "data", ")", "except", "Exception", ",", "exc", ":", "if", "alternate_data", "is", "not", "None", ":", "try", ":", "raw", "=", "zlib", ".", "decompress", "(", "alternate_data", ")", "except", "Exception", ":", "raise", "exc", "# the original exception", "else", ":", "raise", "if", "mime_type", "is", "None", ":", "mime_type", "=", "node", ".", "mime_type", "raw", "=", "raw", ".", "decode", "(", "'utf8'", ")", ".", "encode", "(", "'utf8'", ")", "return", "streamcorpus", ".", "ContentItem", "(", "raw", "=", "raw", ",", "media_type", "=", "mime_type", ")" ]
Create a ContentItem from a node in the spinn3r data tree. The ContentItem is created with raw data set to ``node.data``, decompressed if the node's encoding is 'zlib', and UTF-8 normalized, with a MIME type from ``node.mime_type``. ``node`` the actual node from the spinn3r protobuf data ``mime_type`` string MIME type to use (defaults to ``node.mime_type``) ``alternate_data`` alternate (compressed) data to use, if ``node.data`` is missing or can't be decompressed
[ "Create", "a", "ContentItem", "from", "a", "node", "in", "the", "spinn3r", "data", "tree", "." ]
python
test
niemasd/TreeSwift
treeswift/Tree.py
https://github.com/niemasd/TreeSwift/blob/7e0cbc770fcc2ee1194ef7c2a0ab9fb82f089917/treeswift/Tree.py#L1199-L1223
def read_tree_dendropy(tree): '''Create a TreeSwift tree from a DendroPy tree Args: ``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object Returns: ``Tree``: A TreeSwift tree created from ``tree`` ''' out = Tree(); d2t = dict() if not hasattr(tree, 'preorder_node_iter') or not hasattr(tree, 'seed_node') or not hasattr(tree, 'is_rooted'): raise TypeError("tree must be a DendroPy Tree object") if tree.is_rooted != True: out.is_rooted = False for node in tree.preorder_node_iter(): if node == tree.seed_node: curr = out.root else: curr = Node(); d2t[node.parent_node].add_child(curr) d2t[node] = curr; curr.edge_length = node.edge_length if hasattr(node, 'taxon') and node.taxon is not None: curr.label = node.taxon.label else: curr.label = node.label return out
[ "def", "read_tree_dendropy", "(", "tree", ")", ":", "out", "=", "Tree", "(", ")", "d2t", "=", "dict", "(", ")", "if", "not", "hasattr", "(", "tree", ",", "'preorder_node_iter'", ")", "or", "not", "hasattr", "(", "tree", ",", "'seed_node'", ")", "or", "not", "hasattr", "(", "tree", ",", "'is_rooted'", ")", ":", "raise", "TypeError", "(", "\"tree must be a DendroPy Tree object\"", ")", "if", "tree", ".", "is_rooted", "!=", "True", ":", "out", ".", "is_rooted", "=", "False", "for", "node", "in", "tree", ".", "preorder_node_iter", "(", ")", ":", "if", "node", "==", "tree", ".", "seed_node", ":", "curr", "=", "out", ".", "root", "else", ":", "curr", "=", "Node", "(", ")", "d2t", "[", "node", ".", "parent_node", "]", ".", "add_child", "(", "curr", ")", "d2t", "[", "node", "]", "=", "curr", "curr", ".", "edge_length", "=", "node", ".", "edge_length", "if", "hasattr", "(", "node", ",", "'taxon'", ")", "and", "node", ".", "taxon", "is", "not", "None", ":", "curr", ".", "label", "=", "node", ".", "taxon", ".", "label", "else", ":", "curr", ".", "label", "=", "node", ".", "label", "return", "out" ]
Create a TreeSwift tree from a DendroPy tree Args: ``tree`` (``dendropy.datamodel.treemodel``): A Dendropy ``Tree`` object Returns: ``Tree``: A TreeSwift tree created from ``tree``
[ "Create", "a", "TreeSwift", "tree", "from", "a", "DendroPy", "tree" ]
python
train
senaite/senaite.core
bika/lims/api/snapshot.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/snapshot.py#L319-L342
def compare_snapshots(snapshot_a, snapshot_b, raw=False): """Returns a diff of two given snapshots (dictionaries) :param snapshot_a: First snapshot :param snapshot_b: Second snapshot :param raw: True to compare the raw values, e.g. UIDs :returns: Dictionary of field/value pairs that differ """ if not all(map(lambda x: isinstance(x, dict), [snapshot_a, snapshot_b])): return {} diffs = {} for key_a, value_a in snapshot_a.iteritems(): # skip fieds starting with _ or __ if key_a.startswith("_"): continue # get the value of the second snapshot value_b = snapshot_b.get(key_a) # get the diff between the two values diff = diff_values(value_a, value_b, raw=raw) if diff is not None: diffs[key_a] = diff return diffs
[ "def", "compare_snapshots", "(", "snapshot_a", ",", "snapshot_b", ",", "raw", "=", "False", ")", ":", "if", "not", "all", "(", "map", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "dict", ")", ",", "[", "snapshot_a", ",", "snapshot_b", "]", ")", ")", ":", "return", "{", "}", "diffs", "=", "{", "}", "for", "key_a", ",", "value_a", "in", "snapshot_a", ".", "iteritems", "(", ")", ":", "# skip fieds starting with _ or __", "if", "key_a", ".", "startswith", "(", "\"_\"", ")", ":", "continue", "# get the value of the second snapshot", "value_b", "=", "snapshot_b", ".", "get", "(", "key_a", ")", "# get the diff between the two values", "diff", "=", "diff_values", "(", "value_a", ",", "value_b", ",", "raw", "=", "raw", ")", "if", "diff", "is", "not", "None", ":", "diffs", "[", "key_a", "]", "=", "diff", "return", "diffs" ]
Returns a diff of two given snapshots (dictionaries) :param snapshot_a: First snapshot :param snapshot_b: Second snapshot :param raw: True to compare the raw values, e.g. UIDs :returns: Dictionary of field/value pairs that differ
[ "Returns", "a", "diff", "of", "two", "given", "snapshots", "(", "dictionaries", ")" ]
python
train
noxdafox/clipspy
clips/facts.py
https://github.com/noxdafox/clipspy/blob/b22d71a6da821c1715d8fa00d7d75cabc09ed364/clips/facts.py#L365-L367
def watch(self, flag): """Whether or not the Template is being watched.""" lib.EnvSetDeftemplateWatch(self._env, int(flag), self._tpl)
[ "def", "watch", "(", "self", ",", "flag", ")", ":", "lib", ".", "EnvSetDeftemplateWatch", "(", "self", ".", "_env", ",", "int", "(", "flag", ")", ",", "self", ".", "_tpl", ")" ]
Whether or not the Template is being watched.
[ "Whether", "or", "not", "the", "Template", "is", "being", "watched", "." ]
python
train
kanboard/python-api-client
kanboard/client.py
https://github.com/kanboard/python-api-client/blob/a1e81094bb399a9a3f4f14de67406e1d2bbee393/kanboard/client.py#L106-L135
def execute(self, method, **kwargs): """ Call remote API procedure Args: method: Procedure name kwargs: Procedure named arguments Returns: Procedure result Raises: urllib2.HTTPError: Any HTTP error (Python 2) urllib.error.HTTPError: Any HTTP error (Python 3) """ payload = { 'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': kwargs } credentials = base64.b64encode('{}:{}'.format(self._username, self._password).encode()) auth_header_prefix = 'Basic ' if self._auth_header == DEFAULT_AUTH_HEADER else '' headers = { self._auth_header: auth_header_prefix + credentials.decode(), 'Content-Type': 'application/json', } return self._do_request(headers, payload)
[ "def", "execute", "(", "self", ",", "method", ",", "*", "*", "kwargs", ")", ":", "payload", "=", "{", "'id'", ":", "1", ",", "'jsonrpc'", ":", "'2.0'", ",", "'method'", ":", "method", ",", "'params'", ":", "kwargs", "}", "credentials", "=", "base64", ".", "b64encode", "(", "'{}:{}'", ".", "format", "(", "self", ".", "_username", ",", "self", ".", "_password", ")", ".", "encode", "(", ")", ")", "auth_header_prefix", "=", "'Basic '", "if", "self", ".", "_auth_header", "==", "DEFAULT_AUTH_HEADER", "else", "''", "headers", "=", "{", "self", ".", "_auth_header", ":", "auth_header_prefix", "+", "credentials", ".", "decode", "(", ")", ",", "'Content-Type'", ":", "'application/json'", ",", "}", "return", "self", ".", "_do_request", "(", "headers", ",", "payload", ")" ]
Call remote API procedure Args: method: Procedure name kwargs: Procedure named arguments Returns: Procedure result Raises: urllib2.HTTPError: Any HTTP error (Python 2) urllib.error.HTTPError: Any HTTP error (Python 3)
[ "Call", "remote", "API", "procedure" ]
python
train
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L510-L519
def last(self, n=1): """ Get the last element of an array. Passing **n** will return the last N values in the array. The **guard** check allows it to work with `_.map`. """ res = self.obj[-n:] if len(res) is 1: res = res[0] return self._wrap(res)
[ "def", "last", "(", "self", ",", "n", "=", "1", ")", ":", "res", "=", "self", ".", "obj", "[", "-", "n", ":", "]", "if", "len", "(", "res", ")", "is", "1", ":", "res", "=", "res", "[", "0", "]", "return", "self", ".", "_wrap", "(", "res", ")" ]
Get the last element of an array. Passing **n** will return the last N values in the array. The **guard** check allows it to work with `_.map`.
[ "Get", "the", "last", "element", "of", "an", "array", ".", "Passing", "**", "n", "**", "will", "return", "the", "last", "N", "values", "in", "the", "array", ".", "The", "**", "guard", "**", "check", "allows", "it", "to", "work", "with", "_", ".", "map", "." ]
python
train
saltstack/salt
salt/modules/kernelpkg_linux_yum.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kernelpkg_linux_yum.py#L64-L81
def list_installed(): ''' Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed ''' result = __salt__['pkg.version'](_package_name(), versions_as_list=True) if result is None: return [] if six.PY2: return sorted(result, cmp=_cmp_version) else: return sorted(result, key=functools.cmp_to_key(_cmp_version))
[ "def", "list_installed", "(", ")", ":", "result", "=", "__salt__", "[", "'pkg.version'", "]", "(", "_package_name", "(", ")", ",", "versions_as_list", "=", "True", ")", "if", "result", "is", "None", ":", "return", "[", "]", "if", "six", ".", "PY2", ":", "return", "sorted", "(", "result", ",", "cmp", "=", "_cmp_version", ")", "else", ":", "return", "sorted", "(", "result", ",", "key", "=", "functools", ".", "cmp_to_key", "(", "_cmp_version", ")", ")" ]
Return a list of all installed kernels. CLI Example: .. code-block:: bash salt '*' kernelpkg.list_installed
[ "Return", "a", "list", "of", "all", "installed", "kernels", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/views/mixins/templates.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/views/mixins/templates.py#L159-L165
def get_default_base_template_names(self): """ Return the Entry.template value. """ return [self.object.detail_template, '%s.html' % self.object.slug, '%s_%s' % (self.object.slug, self.object.detail_template)]
[ "def", "get_default_base_template_names", "(", "self", ")", ":", "return", "[", "self", ".", "object", ".", "detail_template", ",", "'%s.html'", "%", "self", ".", "object", ".", "slug", ",", "'%s_%s'", "%", "(", "self", ".", "object", ".", "slug", ",", "self", ".", "object", ".", "detail_template", ")", "]" ]
Return the Entry.template value.
[ "Return", "the", "Entry", ".", "template", "value", "." ]
python
train
ultrabug/py3status
py3status/formatter.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/formatter.py#L176-L222
def build_block(self, format_string): """ Parse the format string into blocks containing Literals, Placeholders etc that we can cache and reuse. """ first_block = Block(None, py3_wrapper=self.py3_wrapper) block = first_block # Tokenize the format string and process them for token in self.tokens(format_string): value = token.group(0) if token.group("block_start"): # Create new block block = block.new_block() elif token.group("block_end"): # Close block setting any valid state as needed # and return to parent block to continue if not block.parent: raise Exception("Too many `]`") block = block.parent elif token.group("switch"): # a new option has been created block = block.switch() elif token.group("placeholder"): # Found a {placeholder} key = token.group("key") format = token.group("format") block.add(Placeholder(key, format)) elif token.group("literal"): block.add(Literal(value)) elif token.group("lost_brace"): # due to how parsing happens we can get a lonesome } # eg in format_string '{{something}' this fixes that issue block.add(Literal(value)) elif token.group("command"): # a block command has been found block.set_commands(token.group("command")) elif token.group("escaped"): # escaped characters add unescaped values if value[0] in ["\\", "{", "}"]: value = value[1:] block.add(Literal(value)) if block.parent: raise Exception("Block not closed") # add to the cache self.block_cache[format_string] = first_block
[ "def", "build_block", "(", "self", ",", "format_string", ")", ":", "first_block", "=", "Block", "(", "None", ",", "py3_wrapper", "=", "self", ".", "py3_wrapper", ")", "block", "=", "first_block", "# Tokenize the format string and process them", "for", "token", "in", "self", ".", "tokens", "(", "format_string", ")", ":", "value", "=", "token", ".", "group", "(", "0", ")", "if", "token", ".", "group", "(", "\"block_start\"", ")", ":", "# Create new block", "block", "=", "block", ".", "new_block", "(", ")", "elif", "token", ".", "group", "(", "\"block_end\"", ")", ":", "# Close block setting any valid state as needed", "# and return to parent block to continue", "if", "not", "block", ".", "parent", ":", "raise", "Exception", "(", "\"Too many `]`\"", ")", "block", "=", "block", ".", "parent", "elif", "token", ".", "group", "(", "\"switch\"", ")", ":", "# a new option has been created", "block", "=", "block", ".", "switch", "(", ")", "elif", "token", ".", "group", "(", "\"placeholder\"", ")", ":", "# Found a {placeholder}", "key", "=", "token", ".", "group", "(", "\"key\"", ")", "format", "=", "token", ".", "group", "(", "\"format\"", ")", "block", ".", "add", "(", "Placeholder", "(", "key", ",", "format", ")", ")", "elif", "token", ".", "group", "(", "\"literal\"", ")", ":", "block", ".", "add", "(", "Literal", "(", "value", ")", ")", "elif", "token", ".", "group", "(", "\"lost_brace\"", ")", ":", "# due to how parsing happens we can get a lonesome }", "# eg in format_string '{{something}' this fixes that issue", "block", ".", "add", "(", "Literal", "(", "value", ")", ")", "elif", "token", ".", "group", "(", "\"command\"", ")", ":", "# a block command has been found", "block", ".", "set_commands", "(", "token", ".", "group", "(", "\"command\"", ")", ")", "elif", "token", ".", "group", "(", "\"escaped\"", ")", ":", "# escaped characters add unescaped values", "if", "value", "[", "0", "]", "in", "[", "\"\\\\\"", ",", "\"{\"", ",", "\"}\"", "]", ":", "value", "=", "value", "[", "1", ":", "]", "block", ".", "add", "(", "Literal", "(", "value", ")", ")", "if", "block", ".", "parent", ":", "raise", "Exception", "(", "\"Block not closed\"", ")", "# add to the cache", "self", ".", "block_cache", "[", "format_string", "]", "=", "first_block" ]
Parse the format string into blocks containing Literals, Placeholders etc that we can cache and reuse.
[ "Parse", "the", "format", "string", "into", "blocks", "containing", "Literals", "Placeholders", "etc", "that", "we", "can", "cache", "and", "reuse", "." ]
python
train
saltstack/salt
salt/modules/zypperpkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zypperpkg.py#L2660-L2723
def resolve_capabilities(pkgs, refresh=False, root=None, **kwargs): ''' .. versionadded:: 2018.3.0 Convert name provides in ``pkgs`` into real package names if ``resolve_capabilities`` parameter is set to True. In case of ``resolve_capabilities`` is set to False the package list is returned unchanged. refresh force a refresh if set to True. If set to False (default) it depends on zypper if a refresh is executed. root operate on a different root directory. resolve_capabilities If this option is set to True the input will be checked if a package with this name exists. If not, this function will search for a package which provides this name. If one is found the output is exchanged with the real package name. In case this option is set to False (Default) the input will be returned unchanged. CLI Examples: .. code-block:: bash salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl ''' if refresh: refresh_db(root) ret = list() for pkg in pkgs: if isinstance(pkg, dict): name = next(iter(pkg)) version = pkg[name] else: name = pkg version = None if kwargs.get('resolve_capabilities', False): try: search(name, root=root, match='exact') except CommandExecutionError: # no package this such a name found # search for a package which provides this name try: result = search(name, root=root, provides=True, match='exact') if len(result) == 1: name = next(iter(result.keys())) elif len(result) > 1: log.warning("Found ambiguous match for capability '%s'.", pkg) except CommandExecutionError as exc: # when search throws an exception stay with original name and version log.debug("Search failed with: %s", exc) if version: ret.append({name: version}) else: ret.append(name) return ret
[ "def", "resolve_capabilities", "(", "pkgs", ",", "refresh", "=", "False", ",", "root", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "refresh", ":", "refresh_db", "(", "root", ")", "ret", "=", "list", "(", ")", "for", "pkg", "in", "pkgs", ":", "if", "isinstance", "(", "pkg", ",", "dict", ")", ":", "name", "=", "next", "(", "iter", "(", "pkg", ")", ")", "version", "=", "pkg", "[", "name", "]", "else", ":", "name", "=", "pkg", "version", "=", "None", "if", "kwargs", ".", "get", "(", "'resolve_capabilities'", ",", "False", ")", ":", "try", ":", "search", "(", "name", ",", "root", "=", "root", ",", "match", "=", "'exact'", ")", "except", "CommandExecutionError", ":", "# no package this such a name found", "# search for a package which provides this name", "try", ":", "result", "=", "search", "(", "name", ",", "root", "=", "root", ",", "provides", "=", "True", ",", "match", "=", "'exact'", ")", "if", "len", "(", "result", ")", "==", "1", ":", "name", "=", "next", "(", "iter", "(", "result", ".", "keys", "(", ")", ")", ")", "elif", "len", "(", "result", ")", ">", "1", ":", "log", ".", "warning", "(", "\"Found ambiguous match for capability '%s'.\"", ",", "pkg", ")", "except", "CommandExecutionError", "as", "exc", ":", "# when search throws an exception stay with original name and version", "log", ".", "debug", "(", "\"Search failed with: %s\"", ",", "exc", ")", "if", "version", ":", "ret", ".", "append", "(", "{", "name", ":", "version", "}", ")", "else", ":", "ret", ".", "append", "(", "name", ")", "return", "ret" ]
.. versionadded:: 2018.3.0 Convert name provides in ``pkgs`` into real package names if ``resolve_capabilities`` parameter is set to True. In case of ``resolve_capabilities`` is set to False the package list is returned unchanged. refresh force a refresh if set to True. If set to False (default) it depends on zypper if a refresh is executed. root operate on a different root directory. resolve_capabilities If this option is set to True the input will be checked if a package with this name exists. If not, this function will search for a package which provides this name. If one is found the output is exchanged with the real package name. In case this option is set to False (Default) the input will be returned unchanged. CLI Examples: .. code-block:: bash salt '*' pkg.resolve_capabilities resolve_capabilities=True w3m_ssl
[ "..", "versionadded", "::", "2018", ".", "3", ".", "0" ]
python
train
inasafe/inasafe
safe/report/expressions/map_report.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/expressions/map_report.py#L491-L495
def unknown_source_text_element(feature, parent): """Retrieve reference title header string from definitions.""" _ = feature, parent # NOQA header = unknown_source_text['string_format'] return header.capitalize()
[ "def", "unknown_source_text_element", "(", "feature", ",", "parent", ")", ":", "_", "=", "feature", ",", "parent", "# NOQA", "header", "=", "unknown_source_text", "[", "'string_format'", "]", "return", "header", ".", "capitalize", "(", ")" ]
Retrieve reference title header string from definitions.
[ "Retrieve", "reference", "title", "header", "string", "from", "definitions", "." ]
python
train
learningequality/ricecooker
ricecooker/config.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/config.py#L184-L192
def init_file_mapping_store(): """ init_file_mapping_store: creates log to keep track of downloaded files Args: None Returns: None """ # Make storage directory for restore files if it doesn't already exist path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION) if not os.path.exists(path): os.makedirs(path)
[ "def", "init_file_mapping_store", "(", ")", ":", "# Make storage directory for restore files if it doesn't already exist", "path", "=", "os", ".", "path", ".", "join", "(", "RESTORE_DIRECTORY", ",", "FILE_STORE_LOCATION", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "(", "path", ")" ]
init_file_mapping_store: creates log to keep track of downloaded files Args: None Returns: None
[ "init_file_mapping_store", ":", "creates", "log", "to", "keep", "track", "of", "downloaded", "files", "Args", ":", "None", "Returns", ":", "None" ]
python
train
deshima-dev/decode
decode/core/__init__.py
https://github.com/deshima-dev/decode/blob/e789e174cd316e7ec8bc55be7009ad35baced3c0/decode/core/__init__.py#L22-L24
def scalarcoords(self): """A dictionary of values that don't label any axes (point-like).""" return {k: v.values for k, v in self.coords.items() if v.dims==()}
[ "def", "scalarcoords", "(", "self", ")", ":", "return", "{", "k", ":", "v", ".", "values", "for", "k", ",", "v", "in", "self", ".", "coords", ".", "items", "(", ")", "if", "v", ".", "dims", "==", "(", ")", "}" ]
A dictionary of values that don't label any axes (point-like).
[ "A", "dictionary", "of", "values", "that", "don", "t", "label", "any", "axes", "(", "point", "-", "like", ")", "." ]
python
train
micolous/python-slackrealtime
src/slackrealtime/session.py
https://github.com/micolous/python-slackrealtime/blob/e9c94416f979a6582110ebba09c147de2bfe20a1/src/slackrealtime/session.py#L208-L221
def request_session(token, url=None): """ Requests a WebSocket session for the Real-Time Messaging API. Returns a SessionMetadata object containing the information retrieved from the API call. """ if url is None: api = SlackApi() else: api = SlackApi(url) response = api.rtm.start(token=token) return SessionMetadata(response, api, token)
[ "def", "request_session", "(", "token", ",", "url", "=", "None", ")", ":", "if", "url", "is", "None", ":", "api", "=", "SlackApi", "(", ")", "else", ":", "api", "=", "SlackApi", "(", "url", ")", "response", "=", "api", ".", "rtm", ".", "start", "(", "token", "=", "token", ")", "return", "SessionMetadata", "(", "response", ",", "api", ",", "token", ")" ]
Requests a WebSocket session for the Real-Time Messaging API. Returns a SessionMetadata object containing the information retrieved from the API call.
[ "Requests", "a", "WebSocket", "session", "for", "the", "Real", "-", "Time", "Messaging", "API", ".", "Returns", "a", "SessionMetadata", "object", "containing", "the", "information", "retrieved", "from", "the", "API", "call", "." ]
python
train
googlefonts/ufo2ft
Lib/ufo2ft/featureCompiler.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/featureCompiler.py#L30-L58
def parseLayoutFeatures(font): """ Parse OpenType layout features in the UFO and return a feaLib.ast.FeatureFile instance. """ featxt = tounicode(font.features.text or "", "utf-8") if not featxt: return ast.FeatureFile() buf = UnicodeIO(featxt) # the path is used by the lexer to resolve 'include' statements # and print filename in error messages. For the UFO spec, this # should be the path of the UFO, not the inner features.fea: # https://github.com/unified-font-object/ufo-spec/issues/55 ufoPath = font.path if ufoPath is not None: buf.name = ufoPath glyphNames = set(font.keys()) try: parser = Parser(buf, glyphNames) doc = parser.parse() except IncludedFeaNotFound as e: if ufoPath and os.path.exists(os.path.join(ufoPath, e.args[0])): logger.warning( "Please change the file name in the include(...); " "statement to be relative to the UFO itself, " "instead of relative to the 'features.fea' file " "contained in it." ) raise return doc
[ "def", "parseLayoutFeatures", "(", "font", ")", ":", "featxt", "=", "tounicode", "(", "font", ".", "features", ".", "text", "or", "\"\"", ",", "\"utf-8\"", ")", "if", "not", "featxt", ":", "return", "ast", ".", "FeatureFile", "(", ")", "buf", "=", "UnicodeIO", "(", "featxt", ")", "# the path is used by the lexer to resolve 'include' statements", "# and print filename in error messages. For the UFO spec, this", "# should be the path of the UFO, not the inner features.fea:", "# https://github.com/unified-font-object/ufo-spec/issues/55", "ufoPath", "=", "font", ".", "path", "if", "ufoPath", "is", "not", "None", ":", "buf", ".", "name", "=", "ufoPath", "glyphNames", "=", "set", "(", "font", ".", "keys", "(", ")", ")", "try", ":", "parser", "=", "Parser", "(", "buf", ",", "glyphNames", ")", "doc", "=", "parser", ".", "parse", "(", ")", "except", "IncludedFeaNotFound", "as", "e", ":", "if", "ufoPath", "and", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "ufoPath", ",", "e", ".", "args", "[", "0", "]", ")", ")", ":", "logger", ".", "warning", "(", "\"Please change the file name in the include(...); \"", "\"statement to be relative to the UFO itself, \"", "\"instead of relative to the 'features.fea' file \"", "\"contained in it.\"", ")", "raise", "return", "doc" ]
Parse OpenType layout features in the UFO and return a feaLib.ast.FeatureFile instance.
[ "Parse", "OpenType", "layout", "features", "in", "the", "UFO", "and", "return", "a", "feaLib", ".", "ast", ".", "FeatureFile", "instance", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L405-L417
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_portsgroup_rbridgeid(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fibrechannel_interface_info = ET.Element("show_fibrechannel_interface_info") config = show_fibrechannel_interface_info output = ET.SubElement(show_fibrechannel_interface_info, "output") show_fibrechannel_interface = ET.SubElement(output, "show-fibrechannel-interface") portsgroup_rbridgeid = ET.SubElement(show_fibrechannel_interface, "portsgroup-rbridgeid") portsgroup_rbridgeid.text = kwargs.pop('portsgroup_rbridgeid') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_fibrechannel_interface_info_output_show_fibrechannel_interface_portsgroup_rbridgeid", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_fibrechannel_interface_info", "=", "ET", ".", "Element", "(", "\"show_fibrechannel_interface_info\"", ")", "config", "=", "show_fibrechannel_interface_info", "output", "=", "ET", ".", "SubElement", "(", "show_fibrechannel_interface_info", ",", "\"output\"", ")", "show_fibrechannel_interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-fibrechannel-interface\"", ")", "portsgroup_rbridgeid", "=", "ET", ".", "SubElement", "(", "show_fibrechannel_interface", ",", "\"portsgroup-rbridgeid\"", ")", "portsgroup_rbridgeid", ".", "text", "=", "kwargs", ".", "pop", "(", "'portsgroup_rbridgeid'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
PyCQA/pylint
pylint/checkers/variables.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/variables.py#L67-L77
def _is_from_future_import(stmt, name): """Check if the name is a future import from another module.""" try: module = stmt.do_import_module(stmt.modname) except astroid.AstroidBuildingException: return None for local_node in module.locals.get(name, []): if isinstance(local_node, astroid.ImportFrom) and local_node.modname == FUTURE: return True return None
[ "def", "_is_from_future_import", "(", "stmt", ",", "name", ")", ":", "try", ":", "module", "=", "stmt", ".", "do_import_module", "(", "stmt", ".", "modname", ")", "except", "astroid", ".", "AstroidBuildingException", ":", "return", "None", "for", "local_node", "in", "module", ".", "locals", ".", "get", "(", "name", ",", "[", "]", ")", ":", "if", "isinstance", "(", "local_node", ",", "astroid", ".", "ImportFrom", ")", "and", "local_node", ".", "modname", "==", "FUTURE", ":", "return", "True", "return", "None" ]
Check if the name is a future import from another module.
[ "Check", "if", "the", "name", "is", "a", "future", "import", "from", "another", "module", "." ]
python
test
Diaoul/subliminal
subliminal/utils.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L93-L122
def sanitize(string, ignore_characters=None): """Sanitize a string to strip special characters. :param str string: the string to sanitize. :param set ignore_characters: characters to ignore. :return: the sanitized string. :rtype: str """ # only deal with strings if string is None: return ignore_characters = ignore_characters or set() # replace some characters with one space characters = {'-', ':', '(', ')', '.'} - ignore_characters if characters: string = re.sub(r'[%s]' % re.escape(''.join(characters)), ' ', string) # remove some characters characters = {'\''} - ignore_characters if characters: string = re.sub(r'[%s]' % re.escape(''.join(characters)), '', string) # replace multiple spaces with one string = re.sub(r'\s+', ' ', string) # strip and lower case return string.strip().lower()
[ "def", "sanitize", "(", "string", ",", "ignore_characters", "=", "None", ")", ":", "# only deal with strings", "if", "string", "is", "None", ":", "return", "ignore_characters", "=", "ignore_characters", "or", "set", "(", ")", "# replace some characters with one space", "characters", "=", "{", "'-'", ",", "':'", ",", "'('", ",", "')'", ",", "'.'", "}", "-", "ignore_characters", "if", "characters", ":", "string", "=", "re", ".", "sub", "(", "r'[%s]'", "%", "re", ".", "escape", "(", "''", ".", "join", "(", "characters", ")", ")", ",", "' '", ",", "string", ")", "# remove some characters", "characters", "=", "{", "'\\''", "}", "-", "ignore_characters", "if", "characters", ":", "string", "=", "re", ".", "sub", "(", "r'[%s]'", "%", "re", ".", "escape", "(", "''", ".", "join", "(", "characters", ")", ")", ",", "''", ",", "string", ")", "# replace multiple spaces with one", "string", "=", "re", ".", "sub", "(", "r'\\s+'", ",", "' '", ",", "string", ")", "# strip and lower case", "return", "string", ".", "strip", "(", ")", ".", "lower", "(", ")" ]
Sanitize a string to strip special characters. :param str string: the string to sanitize. :param set ignore_characters: characters to ignore. :return: the sanitized string. :rtype: str
[ "Sanitize", "a", "string", "to", "strip", "special", "characters", "." ]
python
train
xtrementl/focus
focus/plugin/modules/tasks.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/tasks.py#L377-L412
def execute(self, env, args): """ Removes a task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser. """ # extract args task_name = args.task_name force = args.force if env.task.active and env.task.name == task_name: raise errors.ActiveTask if not env.task.exists(task_name): raise errors.TaskNotFound(task_name) if force: env.task.remove(task_name) else: try: while True: prompt = ('Are you sure you want to delete "{0}" (y/n)? ' .format(task_name)) resp = env.io.prompt(prompt, newline=False).lower() if resp in ('y', 'n'): if resp == 'y': env.task.remove(task_name) break except KeyboardInterrupt: pass
[ "def", "execute", "(", "self", ",", "env", ",", "args", ")", ":", "# extract args", "task_name", "=", "args", ".", "task_name", "force", "=", "args", ".", "force", "if", "env", ".", "task", ".", "active", "and", "env", ".", "task", ".", "name", "==", "task_name", ":", "raise", "errors", ".", "ActiveTask", "if", "not", "env", ".", "task", ".", "exists", "(", "task_name", ")", ":", "raise", "errors", ".", "TaskNotFound", "(", "task_name", ")", "if", "force", ":", "env", ".", "task", ".", "remove", "(", "task_name", ")", "else", ":", "try", ":", "while", "True", ":", "prompt", "=", "(", "'Are you sure you want to delete \"{0}\" (y/n)? '", ".", "format", "(", "task_name", ")", ")", "resp", "=", "env", ".", "io", ".", "prompt", "(", "prompt", ",", "newline", "=", "False", ")", ".", "lower", "(", ")", "if", "resp", "in", "(", "'y'", ",", "'n'", ")", ":", "if", "resp", "==", "'y'", ":", "env", ".", "task", ".", "remove", "(", "task_name", ")", "break", "except", "KeyboardInterrupt", ":", "pass" ]
Removes a task. `env` Runtime ``Environment`` instance. `args` Arguments object from arg parser.
[ "Removes", "a", "task", "." ]
python
train
bunq/sdk_python
bunq/sdk/model/generated/endpoint.py
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/model/generated/endpoint.py#L16081-L16101
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._year is not None: return False if self._alias_user is not None: return False return True
[ "def", "is_all_field_none", "(", "self", ")", ":", "if", "self", ".", "_id_", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_created", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_updated", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_year", "is", "not", "None", ":", "return", "False", "if", "self", ".", "_alias_user", "is", "not", "None", ":", "return", "False", "return", "True" ]
:rtype: bool
[ ":", "rtype", ":", "bool" ]
python
train
UCL-INGI/INGInious
inginious/frontend/accessible_time.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/accessible_time.py#L98-L100
def is_always_accessible(self): """ Returns true if the course/task is always accessible """ return self._val[0] == datetime.min and self._val[1] == datetime.max
[ "def", "is_always_accessible", "(", "self", ")", ":", "return", "self", ".", "_val", "[", "0", "]", "==", "datetime", ".", "min", "and", "self", ".", "_val", "[", "1", "]", "==", "datetime", ".", "max" ]
Returns true if the course/task is always accessible
[ "Returns", "true", "if", "the", "course", "/", "task", "is", "always", "accessible" ]
python
train
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_output.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_output.py#L71-L88
def cmd_output_sysid(self, args): '''add new output for a specific MAVLink sysID''' sysid = int(args[0]) device = args[1] print("Adding output %s for sysid %u" % (device, sysid)) try: conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system) conn.mav.srcComponent = self.settings.source_component except Exception: print("Failed to connect to %s" % device) return try: mp_util.child_fd_list_add(conn.port.fileno()) except Exception: pass if sysid in self.mpstate.sysid_outputs: self.mpstate.sysid_outputs[sysid].close() self.mpstate.sysid_outputs[sysid] = conn
[ "def", "cmd_output_sysid", "(", "self", ",", "args", ")", ":", "sysid", "=", "int", "(", "args", "[", "0", "]", ")", "device", "=", "args", "[", "1", "]", "print", "(", "\"Adding output %s for sysid %u\"", "%", "(", "device", ",", "sysid", ")", ")", "try", ":", "conn", "=", "mavutil", ".", "mavlink_connection", "(", "device", ",", "input", "=", "False", ",", "source_system", "=", "self", ".", "settings", ".", "source_system", ")", "conn", ".", "mav", ".", "srcComponent", "=", "self", ".", "settings", ".", "source_component", "except", "Exception", ":", "print", "(", "\"Failed to connect to %s\"", "%", "device", ")", "return", "try", ":", "mp_util", ".", "child_fd_list_add", "(", "conn", ".", "port", ".", "fileno", "(", ")", ")", "except", "Exception", ":", "pass", "if", "sysid", "in", "self", ".", "mpstate", ".", "sysid_outputs", ":", "self", ".", "mpstate", ".", "sysid_outputs", "[", "sysid", "]", ".", "close", "(", ")", "self", ".", "mpstate", ".", "sysid_outputs", "[", "sysid", "]", "=", "conn" ]
add new output for a specific MAVLink sysID
[ "add", "new", "output", "for", "a", "specific", "MAVLink", "sysID" ]
python
train
Qiskit/qiskit-terra
qiskit/dagcircuit/dagcircuit.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/dagcircuit/dagcircuit.py#L930-L937
def edges(self, nodes=None): """Iterator for node values. Yield: node: the node. """ for source_node, dest_node, edge_data in self._multi_graph.edges(nodes, data=True): yield source_node, dest_node, edge_data
[ "def", "edges", "(", "self", ",", "nodes", "=", "None", ")", ":", "for", "source_node", ",", "dest_node", ",", "edge_data", "in", "self", ".", "_multi_graph", ".", "edges", "(", "nodes", ",", "data", "=", "True", ")", ":", "yield", "source_node", ",", "dest_node", ",", "edge_data" ]
Iterator for node values. Yield: node: the node.
[ "Iterator", "for", "node", "values", "." ]
python
test
bcbio/bcbio-nextgen
bcbio/ngsalign/tophat.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/tophat.py#L173-L187
def _fix_mates(orig_file, out_file, ref_file, config): """Fix problematic unmapped mate pairs in TopHat output. TopHat 2.0.9 appears to have issues with secondary reads: https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo This cleans the input file to only keep properly mapped pairs, providing a general fix that will handle correctly mapped secondary reads as well. """ if not file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: samtools = config_utils.get_program("samtools", config) cmd = "{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Fix mate pairs in TopHat output", {}) return out_file
[ "def", "_fix_mates", "(", "orig_file", ",", "out_file", ",", "ref_file", ",", "config", ")", ":", "if", "not", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", "samtools", "=", "config_utils", ".", "get_program", "(", "\"samtools\"", ",", "config", ")", "cmd", "=", "\"{samtools} view -bS -h -t {ref_file}.fai -F 8 {orig_file} > {tx_out_file}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Fix mate pairs in TopHat output\"", ",", "{", "}", ")", "return", "out_file" ]
Fix problematic unmapped mate pairs in TopHat output. TopHat 2.0.9 appears to have issues with secondary reads: https://groups.google.com/forum/#!topic/tuxedo-tools-users/puLfDNbN9bo This cleans the input file to only keep properly mapped pairs, providing a general fix that will handle correctly mapped secondary reads as well.
[ "Fix", "problematic", "unmapped", "mate", "pairs", "in", "TopHat", "output", "." ]
python
train
refindlyllc/rets
rets/parsers/get_object.py
https://github.com/refindlyllc/rets/blob/c615dfc272cff0825fd3b50863c46afc3e33916f/rets/parsers/get_object.py#L39-L73
def _get_multiparts(response): """ From this 'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8' get this --874e43d27ec6d83f30f37841bdaf90c7 """ boundary = None for part in response.headers.get('Content-Type', '').split(';'): if 'boundary=' in part: boundary = '--{}'.format(part.split('=', 1)[1].strip('\"')) break if not boundary: raise ParseError("Was not able to find the boundary between objects in a multipart response") if response.content is None: return [] response_string = response.content if six.PY3: # Python3 returns bytes, decode for string operations response_string = response_string.decode('latin-1') # help bad responses be more multipart compliant whole_body = response_string.strip('\r\n') no_front_boundary = whole_body.strip(boundary) # The boundary comes with some characters multi_parts = [] for part in no_front_boundary.split(boundary): multi_parts.append(part.strip('\r\n')) return multi_parts
[ "def", "_get_multiparts", "(", "response", ")", ":", "boundary", "=", "None", "for", "part", "in", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", ".", "split", "(", "';'", ")", ":", "if", "'boundary='", "in", "part", ":", "boundary", "=", "'--{}'", ".", "format", "(", "part", ".", "split", "(", "'='", ",", "1", ")", "[", "1", "]", ".", "strip", "(", "'\\\"'", ")", ")", "break", "if", "not", "boundary", ":", "raise", "ParseError", "(", "\"Was not able to find the boundary between objects in a multipart response\"", ")", "if", "response", ".", "content", "is", "None", ":", "return", "[", "]", "response_string", "=", "response", ".", "content", "if", "six", ".", "PY3", ":", "# Python3 returns bytes, decode for string operations", "response_string", "=", "response_string", ".", "decode", "(", "'latin-1'", ")", "# help bad responses be more multipart compliant", "whole_body", "=", "response_string", ".", "strip", "(", "'\\r\\n'", ")", "no_front_boundary", "=", "whole_body", ".", "strip", "(", "boundary", ")", "# The boundary comes with some characters", "multi_parts", "=", "[", "]", "for", "part", "in", "no_front_boundary", ".", "split", "(", "boundary", ")", ":", "multi_parts", ".", "append", "(", "part", ".", "strip", "(", "'\\r\\n'", ")", ")", "return", "multi_parts" ]
From this 'multipart/parallel; boundary="874e43d27ec6d83f30f37841bdaf90c7"; charset=utf-8' get this --874e43d27ec6d83f30f37841bdaf90c7
[ "From", "this", "multipart", "/", "parallel", ";", "boundary", "=", "874e43d27ec6d83f30f37841bdaf90c7", ";", "charset", "=", "utf", "-", "8", "get", "this", "--", "874e43d27ec6d83f30f37841bdaf90c7" ]
python
train
PyProphet/pyprophet
pyprophet/main.py
https://github.com/PyProphet/pyprophet/blob/f546ad171750cd7685afbde6785fe71f82cadb35/pyprophet/main.py#L112-L122
def ipf(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep): """ Infer peptidoforms after scoring of MS1, MS2 and transition-level data. """ if outfile is None: outfile = infile else: outfile = outfile infer_peptidoforms(infile, outfile, ipf_ms1_scoring, ipf_ms2_scoring, ipf_h0, ipf_grouped_fdr, ipf_max_precursor_pep, ipf_max_peakgroup_pep, ipf_max_precursor_peakgroup_pep, ipf_max_transition_pep)
[ "def", "ipf", "(", "infile", ",", "outfile", ",", "ipf_ms1_scoring", ",", "ipf_ms2_scoring", ",", "ipf_h0", ",", "ipf_grouped_fdr", ",", "ipf_max_precursor_pep", ",", "ipf_max_peakgroup_pep", ",", "ipf_max_precursor_peakgroup_pep", ",", "ipf_max_transition_pep", ")", ":", "if", "outfile", "is", "None", ":", "outfile", "=", "infile", "else", ":", "outfile", "=", "outfile", "infer_peptidoforms", "(", "infile", ",", "outfile", ",", "ipf_ms1_scoring", ",", "ipf_ms2_scoring", ",", "ipf_h0", ",", "ipf_grouped_fdr", ",", "ipf_max_precursor_pep", ",", "ipf_max_peakgroup_pep", ",", "ipf_max_precursor_peakgroup_pep", ",", "ipf_max_transition_pep", ")" ]
Infer peptidoforms after scoring of MS1, MS2 and transition-level data.
[ "Infer", "peptidoforms", "after", "scoring", "of", "MS1", "MS2", "and", "transition", "-", "level", "data", "." ]
python
test
google/transitfeed
merge.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/merge.py#L265-L281
def _GenerateNotices(self): """Generate a summary of any notices. Returns: The generated HTML as a string. """ items = [] for e in self._notices: d = e.GetDictToFormat() if 'url' in d.keys(): d['url'] = '<a href="%(url)s">%(url)s</a>' % d items.append('<li class="notice">%s</li>' % e.FormatProblem(d).replace('\n', '<br>')) if items: return '<h2>Notices:</h2>\n<ul>%s</ul>\n' % '\n'.join(items) else: return ''
[ "def", "_GenerateNotices", "(", "self", ")", ":", "items", "=", "[", "]", "for", "e", "in", "self", ".", "_notices", ":", "d", "=", "e", ".", "GetDictToFormat", "(", ")", "if", "'url'", "in", "d", ".", "keys", "(", ")", ":", "d", "[", "'url'", "]", "=", "'<a href=\"%(url)s\">%(url)s</a>'", "%", "d", "items", ".", "append", "(", "'<li class=\"notice\">%s</li>'", "%", "e", ".", "FormatProblem", "(", "d", ")", ".", "replace", "(", "'\\n'", ",", "'<br>'", ")", ")", "if", "items", ":", "return", "'<h2>Notices:</h2>\\n<ul>%s</ul>\\n'", "%", "'\\n'", ".", "join", "(", "items", ")", "else", ":", "return", "''" ]
Generate a summary of any notices. Returns: The generated HTML as a string.
[ "Generate", "a", "summary", "of", "any", "notices", "." ]
python
train
genialis/resolwe
resolwe/flow/serializers/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/serializers/data.py#L91-L98
def _serialize_items(self, serializer, kind, items): """Return serialized items or list of ids, depending on `hydrate_XXX` query param.""" if self.request and self.request.query_params.get('hydrate_{}'.format(kind), False): serializer = serializer(items, many=True, read_only=True) serializer.bind(kind, self) return serializer.data else: return [item.id for item in items]
[ "def", "_serialize_items", "(", "self", ",", "serializer", ",", "kind", ",", "items", ")", ":", "if", "self", ".", "request", "and", "self", ".", "request", ".", "query_params", ".", "get", "(", "'hydrate_{}'", ".", "format", "(", "kind", ")", ",", "False", ")", ":", "serializer", "=", "serializer", "(", "items", ",", "many", "=", "True", ",", "read_only", "=", "True", ")", "serializer", ".", "bind", "(", "kind", ",", "self", ")", "return", "serializer", ".", "data", "else", ":", "return", "[", "item", ".", "id", "for", "item", "in", "items", "]" ]
Return serialized items or list of ids, depending on `hydrate_XXX` query param.
[ "Return", "serialized", "items", "or", "list", "of", "ids", "depending", "on", "hydrate_XXX", "query", "param", "." ]
python
train
trevisanj/f311
f311/explorer/util.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/explorer/util.py#L14-L34
def cut_spectrum(sp, l0, lf): """ Cuts spectrum given a wavelength interval, leaving origina intact Args: sp: Spectrum instance l0: initial wavelength lf: final wavelength Returns: Spectrum: cut spectrum """ if l0 >= lf: raise ValueError("l0 must be lower than lf") idx0 = np.argmin(np.abs(sp.x - l0)) idx1 = np.argmin(np.abs(sp.x - lf)) out = copy.deepcopy(sp) out.x = out.x[idx0:idx1] out.y = out.y[idx0:idx1] return out
[ "def", "cut_spectrum", "(", "sp", ",", "l0", ",", "lf", ")", ":", "if", "l0", ">=", "lf", ":", "raise", "ValueError", "(", "\"l0 must be lower than lf\"", ")", "idx0", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "sp", ".", "x", "-", "l0", ")", ")", "idx1", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "sp", ".", "x", "-", "lf", ")", ")", "out", "=", "copy", ".", "deepcopy", "(", "sp", ")", "out", ".", "x", "=", "out", ".", "x", "[", "idx0", ":", "idx1", "]", "out", ".", "y", "=", "out", ".", "y", "[", "idx0", ":", "idx1", "]", "return", "out" ]
Cuts spectrum given a wavelength interval, leaving origina intact Args: sp: Spectrum instance l0: initial wavelength lf: final wavelength Returns: Spectrum: cut spectrum
[ "Cuts", "spectrum", "given", "a", "wavelength", "interval", "leaving", "origina", "intact" ]
python
train
scopus-api/scopus
scopus/abstract_retrieval.py
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/abstract_retrieval.py#L38-L47
def authkeywords(self): """List of author-provided keywords of the abstract.""" keywords = self._json['authkeywords'] if keywords is None: return None else: try: return [d['$'] for d in keywords['author-keyword']] except TypeError: # Singleton keyword return [keywords['author-keyword']['$']]
[ "def", "authkeywords", "(", "self", ")", ":", "keywords", "=", "self", ".", "_json", "[", "'authkeywords'", "]", "if", "keywords", "is", "None", ":", "return", "None", "else", ":", "try", ":", "return", "[", "d", "[", "'$'", "]", "for", "d", "in", "keywords", "[", "'author-keyword'", "]", "]", "except", "TypeError", ":", "# Singleton keyword", "return", "[", "keywords", "[", "'author-keyword'", "]", "[", "'$'", "]", "]" ]
List of author-provided keywords of the abstract.
[ "List", "of", "author", "-", "provided", "keywords", "of", "the", "abstract", "." ]
python
train
chrisrink10/basilisp
src/basilisp/lang/runtime.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/runtime.py#L838-L866
def nth(coll, i, notfound=__nth_sentinel): """Returns the ith element of coll (0-indexed), if it exists. None otherwise. If i is out of bounds, throws an IndexError unless notfound is specified.""" if coll is None: return None try: return coll[i] except IndexError as ex: if notfound is not __nth_sentinel: return notfound raise ex except TypeError as ex: # Log these at TRACE so they don't gum up the DEBUG logs since most # cases where this exception occurs are not bugs. logger.log(TRACE, "Ignored %s: %s", type(ex).__name__, ex) try: for j, e in enumerate(coll): if i == j: return e if notfound is not __nth_sentinel: return notfound raise IndexError(f"Index {i} out of bounds") except TypeError: pass raise TypeError(f"nth not supported on object of type {type(coll)}")
[ "def", "nth", "(", "coll", ",", "i", ",", "notfound", "=", "__nth_sentinel", ")", ":", "if", "coll", "is", "None", ":", "return", "None", "try", ":", "return", "coll", "[", "i", "]", "except", "IndexError", "as", "ex", ":", "if", "notfound", "is", "not", "__nth_sentinel", ":", "return", "notfound", "raise", "ex", "except", "TypeError", "as", "ex", ":", "# Log these at TRACE so they don't gum up the DEBUG logs since most", "# cases where this exception occurs are not bugs.", "logger", ".", "log", "(", "TRACE", ",", "\"Ignored %s: %s\"", ",", "type", "(", "ex", ")", ".", "__name__", ",", "ex", ")", "try", ":", "for", "j", ",", "e", "in", "enumerate", "(", "coll", ")", ":", "if", "i", "==", "j", ":", "return", "e", "if", "notfound", "is", "not", "__nth_sentinel", ":", "return", "notfound", "raise", "IndexError", "(", "f\"Index {i} out of bounds\"", ")", "except", "TypeError", ":", "pass", "raise", "TypeError", "(", "f\"nth not supported on object of type {type(coll)}\"", ")" ]
Returns the ith element of coll (0-indexed), if it exists. None otherwise. If i is out of bounds, throws an IndexError unless notfound is specified.
[ "Returns", "the", "ith", "element", "of", "coll", "(", "0", "-", "indexed", ")", "if", "it", "exists", ".", "None", "otherwise", ".", "If", "i", "is", "out", "of", "bounds", "throws", "an", "IndexError", "unless", "notfound", "is", "specified", "." ]
python
test
dcwatson/drill
drill.py
https://github.com/dcwatson/drill/blob/b8a30ec0fd5b5bf55154bd44c1c75f5f5945691b/drill.py#L328-L375
def _match(self, pred): """ Helper function to determine if this node matches the given predicate. """ if not pred: return True # Strip off the [ and ] pred = pred[1:-1] if pred.startswith('@'): # An attribute predicate checks the existence (and optionally value) of an attribute on this tag. pred = pred[1:] if '=' in pred: attr, value = pred.split('=', 1) if value[0] in ('"', "'"): value = value[1:] if value[-1] in ('"', "'"): value = value[:-1] return self.attrs.get(attr) == value else: return pred in self.attrs elif num_re.match(pred): # An index predicate checks whether we are the n-th child of our parent (0-based). index = int(pred) if index < 0: if self.parent: # For negative indexes, count from the end of the list. return self.index == (len(self.parent._children) + index) else: # If we're the root node, the only index we could be is 0. return index == 0 else: return index == self.index else: if '=' in pred: tag, value = pred.split('=', 1) if value[0] in ('"', "'"): value = value[1:] if value[-1] in ('"', "'"): value = value[:-1] for c in self._children: if c.tagname == tag and c.data == value: return True else: # A plain [tag] predicate means we match if we have a child with tagname "tag". for c in self._children: if c.tagname == pred: return True return False
[ "def", "_match", "(", "self", ",", "pred", ")", ":", "if", "not", "pred", ":", "return", "True", "# Strip off the [ and ]", "pred", "=", "pred", "[", "1", ":", "-", "1", "]", "if", "pred", ".", "startswith", "(", "'@'", ")", ":", "# An attribute predicate checks the existence (and optionally value) of an attribute on this tag.", "pred", "=", "pred", "[", "1", ":", "]", "if", "'='", "in", "pred", ":", "attr", ",", "value", "=", "pred", ".", "split", "(", "'='", ",", "1", ")", "if", "value", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "value", "=", "value", "[", "1", ":", "]", "if", "value", "[", "-", "1", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "value", "=", "value", "[", ":", "-", "1", "]", "return", "self", ".", "attrs", ".", "get", "(", "attr", ")", "==", "value", "else", ":", "return", "pred", "in", "self", ".", "attrs", "elif", "num_re", ".", "match", "(", "pred", ")", ":", "# An index predicate checks whether we are the n-th child of our parent (0-based).", "index", "=", "int", "(", "pred", ")", "if", "index", "<", "0", ":", "if", "self", ".", "parent", ":", "# For negative indexes, count from the end of the list.", "return", "self", ".", "index", "==", "(", "len", "(", "self", ".", "parent", ".", "_children", ")", "+", "index", ")", "else", ":", "# If we're the root node, the only index we could be is 0.", "return", "index", "==", "0", "else", ":", "return", "index", "==", "self", ".", "index", "else", ":", "if", "'='", "in", "pred", ":", "tag", ",", "value", "=", "pred", ".", "split", "(", "'='", ",", "1", ")", "if", "value", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "value", "=", "value", "[", "1", ":", "]", "if", "value", "[", "-", "1", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "value", "=", "value", "[", ":", "-", "1", "]", "for", "c", "in", "self", ".", "_children", ":", "if", "c", ".", "tagname", "==", "tag", "and", "c", ".", "data", "==", "value", ":", "return", "True", "else", ":", "# A plain [tag] predicate means we match if we have a child with tagname \"tag\".", "for", "c", "in", "self", ".", "_children", ":", "if", "c", ".", "tagname", "==", "pred", ":", "return", "True", "return", "False" ]
Helper function to determine if this node matches the given predicate.
[ "Helper", "function", "to", "determine", "if", "this", "node", "matches", "the", "given", "predicate", "." ]
python
valid
SITools2/pySitools2_1.0
sitools2/core/query.py
https://github.com/SITools2/pySitools2_1.0/blob/acd13198162456ba401a0b923af989bb29feb3b6/sitools2/core/query.py#L255-L262
def __validInputs(self): """Validates the inputs of the constructor.""" #if not isinstance(self.__column, Column): # raise Sitools2Exception("column must be an instance of Column") if self.__type not in self.__TYPE: raise Sitools2Exception("Type must be one of these values : numeric, string, boolean") if self.__comparison not in self.__COMPARISON: raise Sitools2Exception("Comparison must be one of these values : LT, GT, EQ, LIKE, IN, NOTIN")
[ "def", "__validInputs", "(", "self", ")", ":", "#if not isinstance(self.__column, Column):", "# raise Sitools2Exception(\"column must be an instance of Column\") ", "if", "self", ".", "__type", "not", "in", "self", ".", "__TYPE", ":", "raise", "Sitools2Exception", "(", "\"Type must be one of these values : numeric, string, boolean\"", ")", "if", "self", ".", "__comparison", "not", "in", "self", ".", "__COMPARISON", ":", "raise", "Sitools2Exception", "(", "\"Comparison must be one of these values : LT, GT, EQ, LIKE, IN, NOTIN\"", ")" ]
Validates the inputs of the constructor.
[ "Validates", "the", "inputs", "of", "the", "constructor", "." ]
python
train
idmillington/layout
layout/datatypes/parse_dimensions.py
https://github.com/idmillington/layout/blob/c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5/layout/datatypes/parse_dimensions.py#L87-L116
def _split_dimension(text): """ Returns the number and unit from the given piece of text as a pair. >>> _split_dimension('1pt') (1, 'pt') >>> _split_dimension('1 pt') (1, 'pt') >>> _split_dimension('1 \tpt') (1, 'pt') >>> _split_dimension('1 \tpt ') (1, 'pt') >>> _split_dimension(' 1 \tpt ') (1, 'pt') >>> _split_dimension('3') (3, None) >>> _split_dimension('-12.43mm') (-12.43, 'mm') >>> _split_dimension('-12.43"') (-12.43, '"') """ match = _dimension_finder.match(text) if not match: raise DimensionError("Can't parse dimension '%s'." % text) number = match.group(1) unit = match.group(4) if '.' in number: return (float(number), unit) else: return (int(number), unit)
[ "def", "_split_dimension", "(", "text", ")", ":", "match", "=", "_dimension_finder", ".", "match", "(", "text", ")", "if", "not", "match", ":", "raise", "DimensionError", "(", "\"Can't parse dimension '%s'.\"", "%", "text", ")", "number", "=", "match", ".", "group", "(", "1", ")", "unit", "=", "match", ".", "group", "(", "4", ")", "if", "'.'", "in", "number", ":", "return", "(", "float", "(", "number", ")", ",", "unit", ")", "else", ":", "return", "(", "int", "(", "number", ")", ",", "unit", ")" ]
Returns the number and unit from the given piece of text as a pair. >>> _split_dimension('1pt') (1, 'pt') >>> _split_dimension('1 pt') (1, 'pt') >>> _split_dimension('1 \tpt') (1, 'pt') >>> _split_dimension('1 \tpt ') (1, 'pt') >>> _split_dimension(' 1 \tpt ') (1, 'pt') >>> _split_dimension('3') (3, None) >>> _split_dimension('-12.43mm') (-12.43, 'mm') >>> _split_dimension('-12.43"') (-12.43, '"')
[ "Returns", "the", "number", "and", "unit", "from", "the", "given", "piece", "of", "text", "as", "a", "pair", "." ]
python
train
ggravlingen/pytradfri
pytradfri/resource.py
https://github.com/ggravlingen/pytradfri/blob/63750fa8fb27158c013d24865cdaa7fb82b3ab53/pytradfri/resource.py#L67-L75
def update(self): """ Update the group. Returns a Command. """ def process_result(result): self.raw = result return Command('get', self.path, process_result=process_result)
[ "def", "update", "(", "self", ")", ":", "def", "process_result", "(", "result", ")", ":", "self", ".", "raw", "=", "result", "return", "Command", "(", "'get'", ",", "self", ".", "path", ",", "process_result", "=", "process_result", ")" ]
Update the group. Returns a Command.
[ "Update", "the", "group", "." ]
python
train
JHowell45/helium-cli
helium/helium_functions/convert_youtube_url.py
https://github.com/JHowell45/helium-cli/blob/8decc2f410a17314440eeed411a4b19dd4b4e780/helium/helium_functions/convert_youtube_url.py#L9-L37
def convert_youtube_url(youtube_url, no_controls, autoplay): """Use this function to convert the youtube URL. This function is used for converting the youtube URL so that it can be used correctly with Helium. It means that Helium will know the next video in the playlist. :param youtube_url: the URL of the youtube playlist video. :type youtube_url: str :param no_controls: whether or not to show controls in the Helium app. :type no_controls: bool :param autoplay: whether or not to play the next video in the playlist after the current video finishes. :type autoplay: bool :return: the new correct youtube URL. :rtype: str """ for section in youtube_url.split('&'): if 'list' in section: playlist_id = section.split('list=')[1] break return ( 'https://www.youtube.com/embed/videoseries?{0}&{1}&' 'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'.format( '' if autoplay else 'autoplay=1', 'controls=0' if no_controls else '', str(playlist_id) ) )
[ "def", "convert_youtube_url", "(", "youtube_url", ",", "no_controls", ",", "autoplay", ")", ":", "for", "section", "in", "youtube_url", ".", "split", "(", "'&'", ")", ":", "if", "'list'", "in", "section", ":", "playlist_id", "=", "section", ".", "split", "(", "'list='", ")", "[", "1", "]", "break", "return", "(", "'https://www.youtube.com/embed/videoseries?{0}&{1}&'", "'loop=1&html5=1&showinfo=0&listType=playlist&list={2}'", ".", "format", "(", "''", "if", "autoplay", "else", "'autoplay=1'", ",", "'controls=0'", "if", "no_controls", "else", "''", ",", "str", "(", "playlist_id", ")", ")", ")" ]
Use this function to convert the youtube URL. This function is used for converting the youtube URL so that it can be used correctly with Helium. It means that Helium will know the next video in the playlist. :param youtube_url: the URL of the youtube playlist video. :type youtube_url: str :param no_controls: whether or not to show controls in the Helium app. :type no_controls: bool :param autoplay: whether or not to play the next video in the playlist after the current video finishes. :type autoplay: bool :return: the new correct youtube URL. :rtype: str
[ "Use", "this", "function", "to", "convert", "the", "youtube", "URL", "." ]
python
train
Asana/python-asana
asana/resources/gen/workspaces.py
https://github.com/Asana/python-asana/blob/6deb7a34495db23f44858e53b6bb2c9eccff7872/asana/resources/gen/workspaces.py#L99-L112
def remove_user(self, workspace, params={}, **options): """The user making this call must be an admin in the workspace. Returns an empty data record. Parameters ---------- workspace : {Id} The workspace or organization to invite the user to. [data] : {Object} Data for the request - user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request. """ path = "/workspaces/%s/removeUser" % (workspace) return self.client.post(path, params, **options)
[ "def", "remove_user", "(", "self", ",", "workspace", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/workspaces/%s/removeUser\"", "%", "(", "workspace", ")", "return", "self", ".", "client", ".", "post", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
The user making this call must be an admin in the workspace. Returns an empty data record. Parameters ---------- workspace : {Id} The workspace or organization to invite the user to. [data] : {Object} Data for the request - user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request.
[ "The", "user", "making", "this", "call", "must", "be", "an", "admin", "in", "the", "workspace", ".", "Returns", "an", "empty", "data", "record", "." ]
python
train
resync/resync
resync/sitemap.py
https://github.com/resync/resync/blob/98292c17b2c00f2d6f5191c6ab51fef8c292a018/resync/sitemap.py#L85-L138
def resources_as_xml(self, resources, sitemapindex=False, fh=None): """Write or return XML for a set of resources in sitemap format. Arguments: - resources - either an iterable or iterator of Resource objects; if there an md attribute this will go to <rs:md> if there an ln attribute this will go to <rs:ln> - sitemapindex - set True to write sitemapindex instead of sitemap - fh - write to filehandle fh instead of returning string """ # element names depending on sitemapindex or not root_element = ('sitemapindex' if (sitemapindex) else 'urlset') item_element = ('sitemap' if (sitemapindex) else 'url') # namespaces and other settings namespaces = {'xmlns': SITEMAP_NS, 'xmlns:rs': RS_NS} root = Element(root_element, namespaces) if (self.pretty_xml): root.text = "\n" # <rs:ln> if (hasattr(resources, 'ln')): for ln in resources.ln: self.add_element_with_atts_to_etree(root, 'rs:ln', ln) # <rs:md> if (hasattr(resources, 'md')): self.add_element_with_atts_to_etree(root, 'rs:md', resources.md) # <url> entries from either an iterable or an iterator for r in resources: e = self.resource_etree_element(r, element_name=item_element) root.append(e) # have tree, now serialize tree = ElementTree(root) xml_buf = None if (fh is None): xml_buf = io.StringIO() fh = xml_buf if (sys.version_info >= (3, 0)): tree.write( fh, encoding='unicode', xml_declaration=True, method='xml') elif (sys.version_info >= (2, 7)): tree.write( fh, encoding='UTF-8', xml_declaration=True, method='xml') else: # python2.6 tree.write(fh, encoding='UTF-8') if (xml_buf is not None): if (sys.version_info >= (3, 0)): return(xml_buf.getvalue()) else: return(xml_buf.getvalue().decode('utf-8'))
[ "def", "resources_as_xml", "(", "self", ",", "resources", ",", "sitemapindex", "=", "False", ",", "fh", "=", "None", ")", ":", "# element names depending on sitemapindex or not", "root_element", "=", "(", "'sitemapindex'", "if", "(", "sitemapindex", ")", "else", "'urlset'", ")", "item_element", "=", "(", "'sitemap'", "if", "(", "sitemapindex", ")", "else", "'url'", ")", "# namespaces and other settings", "namespaces", "=", "{", "'xmlns'", ":", "SITEMAP_NS", ",", "'xmlns:rs'", ":", "RS_NS", "}", "root", "=", "Element", "(", "root_element", ",", "namespaces", ")", "if", "(", "self", ".", "pretty_xml", ")", ":", "root", ".", "text", "=", "\"\\n\"", "# <rs:ln>", "if", "(", "hasattr", "(", "resources", ",", "'ln'", ")", ")", ":", "for", "ln", "in", "resources", ".", "ln", ":", "self", ".", "add_element_with_atts_to_etree", "(", "root", ",", "'rs:ln'", ",", "ln", ")", "# <rs:md>", "if", "(", "hasattr", "(", "resources", ",", "'md'", ")", ")", ":", "self", ".", "add_element_with_atts_to_etree", "(", "root", ",", "'rs:md'", ",", "resources", ".", "md", ")", "# <url> entries from either an iterable or an iterator", "for", "r", "in", "resources", ":", "e", "=", "self", ".", "resource_etree_element", "(", "r", ",", "element_name", "=", "item_element", ")", "root", ".", "append", "(", "e", ")", "# have tree, now serialize", "tree", "=", "ElementTree", "(", "root", ")", "xml_buf", "=", "None", "if", "(", "fh", "is", "None", ")", ":", "xml_buf", "=", "io", ".", "StringIO", "(", ")", "fh", "=", "xml_buf", "if", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ")", ":", "tree", ".", "write", "(", "fh", ",", "encoding", "=", "'unicode'", ",", "xml_declaration", "=", "True", ",", "method", "=", "'xml'", ")", "elif", "(", "sys", ".", "version_info", ">=", "(", "2", ",", "7", ")", ")", ":", "tree", ".", "write", "(", "fh", ",", "encoding", "=", "'UTF-8'", ",", "xml_declaration", "=", "True", ",", "method", "=", "'xml'", ")", "else", ":", "# python2.6", "tree", ".", "write", "(", "fh", ",", "encoding", "=", "'UTF-8'", ")", "if", "(", "xml_buf", "is", "not", "None", ")", ":", "if", "(", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ")", ":", "return", "(", "xml_buf", ".", "getvalue", "(", ")", ")", "else", ":", "return", "(", "xml_buf", ".", "getvalue", "(", ")", ".", "decode", "(", "'utf-8'", ")", ")" ]
Write or return XML for a set of resources in sitemap format. Arguments: - resources - either an iterable or iterator of Resource objects; if there an md attribute this will go to <rs:md> if there an ln attribute this will go to <rs:ln> - sitemapindex - set True to write sitemapindex instead of sitemap - fh - write to filehandle fh instead of returning string
[ "Write", "or", "return", "XML", "for", "a", "set", "of", "resources", "in", "sitemap", "format", "." ]
python
train
saltstack/salt
salt/modules/win_file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L515-L552
def get_uid(path, follow_symlinks=True): ''' Return the id of the user that owns a given file Symlinks are followed by default to mimic Unix behavior. Specify `follow_symlinks=False` to turn off this behavior. Args: path (str): The path to the file or directory follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: str: The uid of the owner CLI Example: .. code-block:: bash salt '*' file.get_uid c:\\temp\\test.txt salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False ''' if not os.path.exists(path): raise CommandExecutionError('Path not found: {0}'.format(path)) # Under Windows, if the path is a symlink, the user that owns the symlink is # returned, not the user that owns the file/directory the symlink is # pointing to. This behavior is *different* to *nix, therefore the symlink # is first resolved manually if necessary. Remember symlinks are only # supported on Windows Vista or later. if follow_symlinks and sys.getwindowsversion().major >= 6: path = _resolve_symlink(path) owner_sid = salt.utils.win_dacl.get_owner(path) return salt.utils.win_dacl.get_sid_string(owner_sid)
[ "def", "get_uid", "(", "path", ",", "follow_symlinks", "=", "True", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "CommandExecutionError", "(", "'Path not found: {0}'", ".", "format", "(", "path", ")", ")", "# Under Windows, if the path is a symlink, the user that owns the symlink is", "# returned, not the user that owns the file/directory the symlink is", "# pointing to. This behavior is *different* to *nix, therefore the symlink", "# is first resolved manually if necessary. Remember symlinks are only", "# supported on Windows Vista or later.", "if", "follow_symlinks", "and", "sys", ".", "getwindowsversion", "(", ")", ".", "major", ">=", "6", ":", "path", "=", "_resolve_symlink", "(", "path", ")", "owner_sid", "=", "salt", ".", "utils", ".", "win_dacl", ".", "get_owner", "(", "path", ")", "return", "salt", ".", "utils", ".", "win_dacl", ".", "get_sid_string", "(", "owner_sid", ")" ]
Return the id of the user that owns a given file Symlinks are followed by default to mimic Unix behavior. Specify `follow_symlinks=False` to turn off this behavior. Args: path (str): The path to the file or directory follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: str: The uid of the owner CLI Example: .. code-block:: bash salt '*' file.get_uid c:\\temp\\test.txt salt '*' file.get_uid c:\\temp\\test.txt follow_symlinks=False
[ "Return", "the", "id", "of", "the", "user", "that", "owns", "a", "given", "file" ]
python
train
tuomas2/automate
src/automate/system.py
https://github.com/tuomas2/automate/blob/d8a8cd03cd0da047e033a2d305f3f260f8c4e017/src/automate/system.py#L398-L429
def cleanup(self): """ Clean up before quitting """ self.pre_exit_trigger = True self.logger.info("Shutting down %s, please wait a moment.", self.name) for t in threading.enumerate(): if isinstance(t, TimerClass): t.cancel() self.logger.debug('Timers cancelled') for i in self.objects: i.cleanup() self.logger.debug('Sensors etc cleanups done') for ser in (i for i in self.services if isinstance(i, AbstractUserService)): ser.cleanup_system() self.logger.debug('User services cleaned up') if self.worker_thread.is_alive(): self.worker_thread.stop() self.logger.debug('Worker thread really stopped') for ser in (i for i in self.services if isinstance(i, AbstractSystemService)): ser.cleanup_system() self.logger.debug('System services cleaned up') threads = list(t.name for t in threading.enumerate() if t.is_alive() and not t.daemon) if threads: self.logger.info('After cleanup, we have still the following threads ' 'running: %s', ', '.join(threads))
[ "def", "cleanup", "(", "self", ")", ":", "self", ".", "pre_exit_trigger", "=", "True", "self", ".", "logger", ".", "info", "(", "\"Shutting down %s, please wait a moment.\"", ",", "self", ".", "name", ")", "for", "t", "in", "threading", ".", "enumerate", "(", ")", ":", "if", "isinstance", "(", "t", ",", "TimerClass", ")", ":", "t", ".", "cancel", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Timers cancelled'", ")", "for", "i", "in", "self", ".", "objects", ":", "i", ".", "cleanup", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Sensors etc cleanups done'", ")", "for", "ser", "in", "(", "i", "for", "i", "in", "self", ".", "services", "if", "isinstance", "(", "i", ",", "AbstractUserService", ")", ")", ":", "ser", ".", "cleanup_system", "(", ")", "self", ".", "logger", ".", "debug", "(", "'User services cleaned up'", ")", "if", "self", ".", "worker_thread", ".", "is_alive", "(", ")", ":", "self", ".", "worker_thread", ".", "stop", "(", ")", "self", ".", "logger", ".", "debug", "(", "'Worker thread really stopped'", ")", "for", "ser", "in", "(", "i", "for", "i", "in", "self", ".", "services", "if", "isinstance", "(", "i", ",", "AbstractSystemService", ")", ")", ":", "ser", ".", "cleanup_system", "(", ")", "self", ".", "logger", ".", "debug", "(", "'System services cleaned up'", ")", "threads", "=", "list", "(", "t", ".", "name", "for", "t", "in", "threading", ".", "enumerate", "(", ")", "if", "t", ".", "is_alive", "(", ")", "and", "not", "t", ".", "daemon", ")", "if", "threads", ":", "self", ".", "logger", ".", "info", "(", "'After cleanup, we have still the following threads '", "'running: %s'", ",", "', '", ".", "join", "(", "threads", ")", ")" ]
Clean up before quitting
[ "Clean", "up", "before", "quitting" ]
python
train
zomux/deepy
deepy/trainers/trainers.py
https://github.com/zomux/deepy/blob/090fbad22a08a809b12951cd0d4984f5bd432698/deepy/trainers/trainers.py#L65-L72
def optimization_updates(self, params, gradients): """ Return updates from optimization. """ updates, free_parameters = optimize_updates(params, gradients, self.config) self.network.free_parameters.extend(free_parameters) logging.info("Added %d free parameters for optimization" % len(free_parameters)) return updates
[ "def", "optimization_updates", "(", "self", ",", "params", ",", "gradients", ")", ":", "updates", ",", "free_parameters", "=", "optimize_updates", "(", "params", ",", "gradients", ",", "self", ".", "config", ")", "self", ".", "network", ".", "free_parameters", ".", "extend", "(", "free_parameters", ")", "logging", ".", "info", "(", "\"Added %d free parameters for optimization\"", "%", "len", "(", "free_parameters", ")", ")", "return", "updates" ]
Return updates from optimization.
[ "Return", "updates", "from", "optimization", "." ]
python
test
wummel/patool
patoolib/__init__.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/__init__.py#L749-L758
def repack_archive (archive, archive_new, verbosity=0, interactive=True): """Repack archive to different file and/or format.""" util.check_existing_filename(archive) util.check_new_filename(archive_new) if verbosity >= 0: util.log_info("Repacking %s to %s ..." % (archive, archive_new)) res = _repack_archive(archive, archive_new, verbosity=verbosity, interactive=interactive) if verbosity >= 0: util.log_info("... repacking successful.") return res
[ "def", "repack_archive", "(", "archive", ",", "archive_new", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "util", ".", "check_existing_filename", "(", "archive", ")", "util", ".", "check_new_filename", "(", "archive_new", ")", "if", "verbosity", ">=", "0", ":", "util", ".", "log_info", "(", "\"Repacking %s to %s ...\"", "%", "(", "archive", ",", "archive_new", ")", ")", "res", "=", "_repack_archive", "(", "archive", ",", "archive_new", ",", "verbosity", "=", "verbosity", ",", "interactive", "=", "interactive", ")", "if", "verbosity", ">=", "0", ":", "util", ".", "log_info", "(", "\"... repacking successful.\"", ")", "return", "res" ]
Repack archive to different file and/or format.
[ "Repack", "archive", "to", "different", "file", "and", "/", "or", "format", "." ]
python
train
MartinThoma/hwrt
hwrt/utils.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L699-L740
def evaluate_model(recording, model_folder, verbose=False): """Evaluate model for a single recording.""" from . import preprocess_dataset from . import features for target_folder in get_recognizer_folders(model_folder): # The source is later than the target. That means we need to # refresh the target if "preprocessed" in target_folder: logging.info("Start applying preprocessing methods...") t = target_folder _, _, preprocessing_queue = preprocess_dataset.get_parameters(t) handwriting = handwritten_data.HandwrittenData(recording) if verbose: handwriting.show() handwriting.preprocessing(preprocessing_queue) if verbose: logging.debug("After preprocessing: %s", handwriting.get_sorted_pointlist()) handwriting.show() elif "feature-files" in target_folder: logging.info("Create feature file...") infofile_path = os.path.join(target_folder, "info.yml") with open(infofile_path, 'r') as ymlfile: feature_description = yaml.load(ymlfile) feature_str_list = feature_description['features'] feature_list = features.get_features(feature_str_list) feature_count = sum(map(lambda n: n.get_dimension(), feature_list)) x = handwriting.feature_extraction(feature_list) # Create hdf5 _, output_filename = tempfile.mkstemp(suffix='.hdf5', text=True) create_hdf5(output_filename, feature_count, [(x, 0)]) elif "model" in target_folder: logfile, model_use = _evaluate_model_single_file(target_folder, output_filename) return logfile else: logging.info("'%s' not found", target_folder) os.remove(output_filename) os.remove(model_use)
[ "def", "evaluate_model", "(", "recording", ",", "model_folder", ",", "verbose", "=", "False", ")", ":", "from", ".", "import", "preprocess_dataset", "from", ".", "import", "features", "for", "target_folder", "in", "get_recognizer_folders", "(", "model_folder", ")", ":", "# The source is later than the target. That means we need to", "# refresh the target", "if", "\"preprocessed\"", "in", "target_folder", ":", "logging", ".", "info", "(", "\"Start applying preprocessing methods...\"", ")", "t", "=", "target_folder", "_", ",", "_", ",", "preprocessing_queue", "=", "preprocess_dataset", ".", "get_parameters", "(", "t", ")", "handwriting", "=", "handwritten_data", ".", "HandwrittenData", "(", "recording", ")", "if", "verbose", ":", "handwriting", ".", "show", "(", ")", "handwriting", ".", "preprocessing", "(", "preprocessing_queue", ")", "if", "verbose", ":", "logging", ".", "debug", "(", "\"After preprocessing: %s\"", ",", "handwriting", ".", "get_sorted_pointlist", "(", ")", ")", "handwriting", ".", "show", "(", ")", "elif", "\"feature-files\"", "in", "target_folder", ":", "logging", ".", "info", "(", "\"Create feature file...\"", ")", "infofile_path", "=", "os", ".", "path", ".", "join", "(", "target_folder", ",", "\"info.yml\"", ")", "with", "open", "(", "infofile_path", ",", "'r'", ")", "as", "ymlfile", ":", "feature_description", "=", "yaml", ".", "load", "(", "ymlfile", ")", "feature_str_list", "=", "feature_description", "[", "'features'", "]", "feature_list", "=", "features", ".", "get_features", "(", "feature_str_list", ")", "feature_count", "=", "sum", "(", "map", "(", "lambda", "n", ":", "n", ".", "get_dimension", "(", ")", ",", "feature_list", ")", ")", "x", "=", "handwriting", ".", "feature_extraction", "(", "feature_list", ")", "# Create hdf5", "_", ",", "output_filename", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'.hdf5'", ",", "text", "=", "True", ")", "create_hdf5", "(", "output_filename", ",", "feature_count", ",", "[", "(", "x", ",", "0", ")", "]", ")", "elif", "\"model\"", "in", "target_folder", ":", "logfile", ",", "model_use", "=", "_evaluate_model_single_file", "(", "target_folder", ",", "output_filename", ")", "return", "logfile", "else", ":", "logging", ".", "info", "(", "\"'%s' not found\"", ",", "target_folder", ")", "os", ".", "remove", "(", "output_filename", ")", "os", ".", "remove", "(", "model_use", ")" ]
Evaluate model for a single recording.
[ "Evaluate", "model", "for", "a", "single", "recording", "." ]
python
train
rodluger/everest
everest/missions/k2/pipelines.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pipelines.py#L137-L193
def get_cdpp(campaign, pipeline='everest2'): ''' Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". ''' # Imports from .k2 import CDPP from .utils import GetK2Campaign # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the CDPP try: _, flux = get(EPIC, pipeline=pipeline, campaign=campaign) mask = np.where(np.isnan(flux))[0] flux = np.delete(flux, mask) cdpp = CDPP(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>15.3f}".format(EPIC, 0), file=outfile) continue # Log to file print("{:>09d} {:>15.3f}".format(EPIC, cdpp), file=outfile)
[ "def", "get_cdpp", "(", "campaign", ",", "pipeline", "=", "'everest2'", ")", ":", "# Imports", "from", ".", "k2", "import", "CDPP", "from", ".", "utils", "import", "GetK2Campaign", "# Check pipeline", "assert", "pipeline", ".", "lower", "(", ")", "in", "Pipelines", ",", "'Invalid pipeline: `%s`.'", "%", "pipeline", "# Create file if it doesn't exist", "file", "=", "os", ".", "path", ".", "join", "(", "EVEREST_SRC", ",", "'missions'", ",", "'k2'", ",", "'tables'", ",", "'c%02d_%s.cdpp'", "%", "(", "int", "(", "campaign", ")", ",", "pipeline", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "open", "(", "file", ",", "'a'", ")", ".", "close", "(", ")", "# Get all EPIC stars", "stars", "=", "GetK2Campaign", "(", "campaign", ",", "epics_only", "=", "True", ")", "nstars", "=", "len", "(", "stars", ")", "# Remove ones we've done", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "done", "=", "np", ".", "loadtxt", "(", "file", ",", "dtype", "=", "float", ")", "if", "len", "(", "done", ")", ":", "done", "=", "[", "int", "(", "s", ")", "for", "s", "in", "done", "[", ":", ",", "0", "]", "]", "stars", "=", "list", "(", "set", "(", "stars", ")", "-", "set", "(", "done", ")", ")", "n", "=", "len", "(", "done", ")", "+", "1", "# Open the output file", "with", "open", "(", "file", ",", "'a'", ",", "1", ")", "as", "outfile", ":", "# Loop over all to get the CDPP", "for", "EPIC", "in", "stars", ":", "# Progress", "sys", ".", "stdout", ".", "write", "(", "'\\rRunning target %d/%d...'", "%", "(", "n", ",", "nstars", ")", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "n", "+=", "1", "# Get the CDPP", "try", ":", "_", ",", "flux", "=", "get", "(", "EPIC", ",", "pipeline", "=", "pipeline", ",", "campaign", "=", "campaign", ")", "mask", "=", "np", ".", "where", "(", "np", ".", "isnan", "(", "flux", ")", ")", "[", "0", "]", "flux", "=", "np", ".", "delete", "(", "flux", ",", "mask", ")", "cdpp", "=", "CDPP", "(", "flux", ")", "except", "(", "urllib", ".", "error", ".", "HTTPError", ",", "urllib", ".", "error", ".", "URLError", ",", "TypeError", ",", "ValueError", ",", "IndexError", ")", ":", "print", "(", "\"{:>09d} {:>15.3f}\"", ".", "format", "(", "EPIC", ",", "0", ")", ",", "file", "=", "outfile", ")", "continue", "# Log to file", "print", "(", "\"{:>09d} {:>15.3f}\"", ".", "format", "(", "EPIC", ",", "cdpp", ")", ",", "file", "=", "outfile", ")" ]
Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/".
[ "Computes", "the", "CDPP", "for", "a", "given", "campaign", "and", "a", "given", "pipeline", ".", "Stores", "the", "results", "in", "a", "file", "under", "/", "missions", "/", "k2", "/", "tables", "/", "." ]
python
train
coghost/izen
izen/crawler.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/crawler.py#L115-L128
def fmt_cookies(self, ck): """ :param ck: :type ck: :return: :rtype: """ cks = {} for c in ck.split(';'): a = c.split('=') if len(a) != 2: continue cks[a[0].replace(' ', '')] = a[1].replace(' ', '') self.cookies = cks
[ "def", "fmt_cookies", "(", "self", ",", "ck", ")", ":", "cks", "=", "{", "}", "for", "c", "in", "ck", ".", "split", "(", "';'", ")", ":", "a", "=", "c", ".", "split", "(", "'='", ")", "if", "len", "(", "a", ")", "!=", "2", ":", "continue", "cks", "[", "a", "[", "0", "]", ".", "replace", "(", "' '", ",", "''", ")", "]", "=", "a", "[", "1", "]", ".", "replace", "(", "' '", ",", "''", ")", "self", ".", "cookies", "=", "cks" ]
:param ck: :type ck: :return: :rtype:
[ ":", "param", "ck", ":", ":", "type", "ck", ":", ":", "return", ":", ":", "rtype", ":" ]
python
train
wolfhong/formic
build.py
https://github.com/wolfhong/formic/blob/0d81eb88dcbb6fa705194fc6ccf2993f4abbaa76/build.py#L134-L155
def configure_google_analytics(): """An optional task; if run, this will switch on Google Analystics, reporting documentation usage to Aviser. This is meant to be run only by Aviser when producing HTML for the main web site. """ f = open(os.path.join("doc", "_templates", "google-analytics.html"), "w") f.write("""<script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-31981784-1']); _gaq.push(['_setDomainName', 'aviser.asia']); _gaq.push(['_setAllowLinker', true]); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script>""") f.close()
[ "def", "configure_google_analytics", "(", ")", ":", "f", "=", "open", "(", "os", ".", "path", ".", "join", "(", "\"doc\"", ",", "\"_templates\"", ",", "\"google-analytics.html\"", ")", ",", "\"w\"", ")", "f", ".", "write", "(", "\"\"\"<script type=\"text/javascript\">\nvar _gaq = _gaq || [];\n_gaq.push(['_setAccount', 'UA-31981784-1']);\n_gaq.push(['_setDomainName', 'aviser.asia']);\n_gaq.push(['_setAllowLinker', true]);\n_gaq.push(['_trackPageview']);\n\n(function() {\n var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;\n ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';\n var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);\n})();\n</script>\"\"\"", ")", "f", ".", "close", "(", ")" ]
An optional task; if run, this will switch on Google Analystics, reporting documentation usage to Aviser. This is meant to be run only by Aviser when producing HTML for the main web site.
[ "An", "optional", "task", ";", "if", "run", "this", "will", "switch", "on", "Google", "Analystics", "reporting", "documentation", "usage", "to", "Aviser", "." ]
python
train
pybel/pybel
src/pybel/canonicalize.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/canonicalize.py#L35-L52
def postpend_location(bel_string: str, location_model) -> str: """Rip off the closing parentheses and adds canonicalized modification. I did this because writing a whole new parsing model for the data would be sad and difficult :param bel_string: BEL string representing node :param dict location_model: A dictionary containing keys :code:`pybel.constants.TO_LOC` and :code:`pybel.constants.FROM_LOC` :return: A part of a BEL string representing the location """ if not all(k in location_model for k in {NAMESPACE, NAME}): raise ValueError('Location model missing namespace and/or name keys: {}'.format(location_model)) return "{}, loc({}:{}))".format( bel_string[:-1], location_model[NAMESPACE], ensure_quotes(location_model[NAME]) )
[ "def", "postpend_location", "(", "bel_string", ":", "str", ",", "location_model", ")", "->", "str", ":", "if", "not", "all", "(", "k", "in", "location_model", "for", "k", "in", "{", "NAMESPACE", ",", "NAME", "}", ")", ":", "raise", "ValueError", "(", "'Location model missing namespace and/or name keys: {}'", ".", "format", "(", "location_model", ")", ")", "return", "\"{}, loc({}:{}))\"", ".", "format", "(", "bel_string", "[", ":", "-", "1", "]", ",", "location_model", "[", "NAMESPACE", "]", ",", "ensure_quotes", "(", "location_model", "[", "NAME", "]", ")", ")" ]
Rip off the closing parentheses and adds canonicalized modification. I did this because writing a whole new parsing model for the data would be sad and difficult :param bel_string: BEL string representing node :param dict location_model: A dictionary containing keys :code:`pybel.constants.TO_LOC` and :code:`pybel.constants.FROM_LOC` :return: A part of a BEL string representing the location
[ "Rip", "off", "the", "closing", "parentheses", "and", "adds", "canonicalized", "modification", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L2092-L2111
def Network_setBlockedURLs(self, urls): """ Function path: Network.setBlockedURLs Domain: Network Method name: setBlockedURLs WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'urls' (type: array) -> URL patterns to block. Wildcards ('*') are allowed. No return value. Description: Blocks URLs from loading. """ assert isinstance(urls, (list, tuple) ), "Argument 'urls' must be of type '['list', 'tuple']'. Received type: '%s'" % type( urls) subdom_funcs = self.synchronous_command('Network.setBlockedURLs', urls=urls) return subdom_funcs
[ "def", "Network_setBlockedURLs", "(", "self", ",", "urls", ")", ":", "assert", "isinstance", "(", "urls", ",", "(", "list", ",", "tuple", ")", ")", ",", "\"Argument 'urls' must be of type '['list', 'tuple']'. Received type: '%s'\"", "%", "type", "(", "urls", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'Network.setBlockedURLs'", ",", "urls", "=", "urls", ")", "return", "subdom_funcs" ]
Function path: Network.setBlockedURLs Domain: Network Method name: setBlockedURLs WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'urls' (type: array) -> URL patterns to block. Wildcards ('*') are allowed. No return value. Description: Blocks URLs from loading.
[ "Function", "path", ":", "Network", ".", "setBlockedURLs", "Domain", ":", "Network", "Method", "name", ":", "setBlockedURLs", "WARNING", ":", "This", "function", "is", "marked", "Experimental", "!", "Parameters", ":", "Required", "arguments", ":", "urls", "(", "type", ":", "array", ")", "-", ">", "URL", "patterns", "to", "block", ".", "Wildcards", "(", "*", ")", "are", "allowed", ".", "No", "return", "value", ".", "Description", ":", "Blocks", "URLs", "from", "loading", "." ]
python
train
nathankw/pulsarpy
pulsarpy/models.py
https://github.com/nathankw/pulsarpy/blob/359b040c0f2383b88c0b5548715fefd35f5f634c/pulsarpy/models.py#L634-L648
def write_response_html_to_file(response,filename): """ An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name. """ fout = open(filename,'w') if not str(response.status_code).startswith("2"): Model.debug_logger.debug(response.text) fout.write(response.text) fout.close()
[ "def", "write_response_html_to_file", "(", "response", ",", "filename", ")", ":", "fout", "=", "open", "(", "filename", ",", "'w'", ")", "if", "not", "str", "(", "response", ".", "status_code", ")", ".", "startswith", "(", "\"2\"", ")", ":", "Model", ".", "debug_logger", ".", "debug", "(", "response", ".", "text", ")", "fout", ".", "write", "(", "response", ".", "text", ")", "fout", ".", "close", "(", ")" ]
An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name.
[ "An", "aid", "in", "troubleshooting", "internal", "application", "errors", "i", ".", "e", ".", "<Response", "[", "500", "]", ">", "to", "be", "mainly", "beneficial", "when", "developing", "the", "server", "-", "side", "API", ".", "This", "method", "will", "write", "the", "response", "HTML", "for", "viewing", "the", "error", "details", "in", "the", "browesr", "." ]
python
train
hhromic/python-oslom-runner
oslom/runner.py
https://github.com/hhromic/python-oslom-runner/blob/f5991bd5014c65d0a9852641d51cbc344407d6a2/oslom/runner.py#L117-L133
def read_clusters(self, min_cluster_size): """Read and parse OSLOM clusters output file.""" num_found = 0 clusters = [] with open(self.get_path(OslomRunner.OUTPUT_FILE), "r") as reader: # Read the output file every two lines for line1, line2 in itertools.izip_longest(*[reader] * 2): info = OslomRunner.RE_INFOLINE.match(line1.strip()).groups() nodes = line2.strip().split(" ") if len(nodes) >= min_cluster_size: # Apply min_cluster_size clusters.append({ "id": int(info[0]), "bs": float(info[2]), "nodes": [{"id": self.id_remapper.get_str_id(int(n))} for n in nodes], }) num_found += 1 return {"num_found": num_found, "clusters": clusters}
[ "def", "read_clusters", "(", "self", ",", "min_cluster_size", ")", ":", "num_found", "=", "0", "clusters", "=", "[", "]", "with", "open", "(", "self", ".", "get_path", "(", "OslomRunner", ".", "OUTPUT_FILE", ")", ",", "\"r\"", ")", "as", "reader", ":", "# Read the output file every two lines", "for", "line1", ",", "line2", "in", "itertools", ".", "izip_longest", "(", "*", "[", "reader", "]", "*", "2", ")", ":", "info", "=", "OslomRunner", ".", "RE_INFOLINE", ".", "match", "(", "line1", ".", "strip", "(", ")", ")", ".", "groups", "(", ")", "nodes", "=", "line2", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "len", "(", "nodes", ")", ">=", "min_cluster_size", ":", "# Apply min_cluster_size", "clusters", ".", "append", "(", "{", "\"id\"", ":", "int", "(", "info", "[", "0", "]", ")", ",", "\"bs\"", ":", "float", "(", "info", "[", "2", "]", ")", ",", "\"nodes\"", ":", "[", "{", "\"id\"", ":", "self", ".", "id_remapper", ".", "get_str_id", "(", "int", "(", "n", ")", ")", "}", "for", "n", "in", "nodes", "]", ",", "}", ")", "num_found", "+=", "1", "return", "{", "\"num_found\"", ":", "num_found", ",", "\"clusters\"", ":", "clusters", "}" ]
Read and parse OSLOM clusters output file.
[ "Read", "and", "parse", "OSLOM", "clusters", "output", "file", "." ]
python
train
pyamg/pyamg
pyamg/gallery/elasticity.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/elasticity.py#L319-L331
def p12d_local(vertices, lame, mu): """Local stiffness matrix for P1 elements in 2d.""" assert(vertices.shape == (3, 2)) A = np.vstack((np.ones((1, 3)), vertices.T)) PhiGrad = inv(A)[:, 1:] # gradients of basis functions R = np.zeros((3, 6)) R[[[0], [2]], [0, 2, 4]] = PhiGrad.T R[[[2], [1]], [1, 3, 5]] = PhiGrad.T C = mu*np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]]) +\ lame*np.array([[1, 1, 0], [1, 1, 0], [0, 0, 0]]) K = det(A)/2.0*np.dot(np.dot(R.T, C), R) return K
[ "def", "p12d_local", "(", "vertices", ",", "lame", ",", "mu", ")", ":", "assert", "(", "vertices", ".", "shape", "==", "(", "3", ",", "2", ")", ")", "A", "=", "np", ".", "vstack", "(", "(", "np", ".", "ones", "(", "(", "1", ",", "3", ")", ")", ",", "vertices", ".", "T", ")", ")", "PhiGrad", "=", "inv", "(", "A", ")", "[", ":", ",", "1", ":", "]", "# gradients of basis functions", "R", "=", "np", ".", "zeros", "(", "(", "3", ",", "6", ")", ")", "R", "[", "[", "[", "0", "]", ",", "[", "2", "]", "]", ",", "[", "0", ",", "2", ",", "4", "]", "]", "=", "PhiGrad", ".", "T", "R", "[", "[", "[", "2", "]", ",", "[", "1", "]", "]", ",", "[", "1", ",", "3", ",", "5", "]", "]", "=", "PhiGrad", ".", "T", "C", "=", "mu", "*", "np", ".", "array", "(", "[", "[", "2", ",", "0", ",", "0", "]", ",", "[", "0", ",", "2", ",", "0", "]", ",", "[", "0", ",", "0", ",", "1", "]", "]", ")", "+", "lame", "*", "np", ".", "array", "(", "[", "[", "1", ",", "1", ",", "0", "]", ",", "[", "1", ",", "1", ",", "0", "]", ",", "[", "0", ",", "0", ",", "0", "]", "]", ")", "K", "=", "det", "(", "A", ")", "/", "2.0", "*", "np", ".", "dot", "(", "np", ".", "dot", "(", "R", ".", "T", ",", "C", ")", ",", "R", ")", "return", "K" ]
Local stiffness matrix for P1 elements in 2d.
[ "Local", "stiffness", "matrix", "for", "P1", "elements", "in", "2d", "." ]
python
train
gc3-uzh-ch/elasticluster
elasticluster/providers/gce.py
https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L213-L246
def _wait_until_done(self, response, wait=30): """Blocks until the operation status is done for the given operation. :param response: The response object used in a previous GCE call. :param int wait: Wait up to this number of seconds in between successive polling of the GCE status. """ gce = self._connect() status = response['status'] while status != 'DONE' and response: # wait a random amount of time (up to `wait` seconds) if wait: time.sleep(1 + random.randrange(wait)) operation_id = response['name'] # Identify if this is a per-zone resource if 'zone' in response: zone_name = response['zone'].split('/')[-1] request = gce.zoneOperations().get( project=self._project_id, operation=operation_id, zone=zone_name) else: request = gce.globalOperations().get( project=self._project_id, operation=operation_id) response = self._execute_request(request) if response: status = response['status'] return response
[ "def", "_wait_until_done", "(", "self", ",", "response", ",", "wait", "=", "30", ")", ":", "gce", "=", "self", ".", "_connect", "(", ")", "status", "=", "response", "[", "'status'", "]", "while", "status", "!=", "'DONE'", "and", "response", ":", "# wait a random amount of time (up to `wait` seconds)", "if", "wait", ":", "time", ".", "sleep", "(", "1", "+", "random", ".", "randrange", "(", "wait", ")", ")", "operation_id", "=", "response", "[", "'name'", "]", "# Identify if this is a per-zone resource", "if", "'zone'", "in", "response", ":", "zone_name", "=", "response", "[", "'zone'", "]", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", "request", "=", "gce", ".", "zoneOperations", "(", ")", ".", "get", "(", "project", "=", "self", ".", "_project_id", ",", "operation", "=", "operation_id", ",", "zone", "=", "zone_name", ")", "else", ":", "request", "=", "gce", ".", "globalOperations", "(", ")", ".", "get", "(", "project", "=", "self", ".", "_project_id", ",", "operation", "=", "operation_id", ")", "response", "=", "self", ".", "_execute_request", "(", "request", ")", "if", "response", ":", "status", "=", "response", "[", "'status'", "]", "return", "response" ]
Blocks until the operation status is done for the given operation. :param response: The response object used in a previous GCE call. :param int wait: Wait up to this number of seconds in between successive polling of the GCE status.
[ "Blocks", "until", "the", "operation", "status", "is", "done", "for", "the", "given", "operation", "." ]
python
train
google/dotty
efilter/transforms/solve.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L543-L560
def solve_each(expr, vars): """Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) for lhs_value in repeated.getvalues(lhs_values): result = solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)) if not result.value: # Each is required to return an actual boolean. return result._replace(value=False) return Result(True, ())
[ "def", "solve_each", "(", "expr", ",", "vars", ")", ":", "lhs_values", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "for", "lhs_value", "in", "repeated", ".", "getvalues", "(", "lhs_values", ")", ":", "result", "=", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")", "if", "not", "result", ".", "value", ":", "# Each is required to return an actual boolean.", "return", "result", ".", "_replace", "(", "value", "=", "False", ")", "return", "Result", "(", "True", ",", "(", ")", ")" ]
Return True if RHS evaluates to a true value with each state of LHS. If LHS evaluates to a normal IAssociative object then this is the same as a regular let-form, except the return value is always a boolean. If LHS evaluates to a repeared var (see efilter.protocols.repeated) of IAssociative objects then RHS will be evaluated with each state and True will be returned only if each result is true.
[ "Return", "True", "if", "RHS", "evaluates", "to", "a", "true", "value", "with", "each", "state", "of", "LHS", "." ]
python
train
helixyte/everest
everest/resources/descriptors.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/resources/descriptors.py#L143-L160
def make_relationship(self, relator, direction= RELATIONSHIP_DIRECTIONS.BIDIRECTIONAL): """ Create a relationship object for this attribute from the given relator and relationship direction. """ if IEntity.providedBy(relator): # pylint:disable=E1101 rel = DomainRelationship(relator, self, direction=direction) elif IResource.providedBy(relator): # pylint:disable=E1101 rel = ResourceRelationship(relator, self, direction=direction) else: raise ValueError('Invalid relator argument "%s" for ' 'relationship; must provide IEntity or ' 'IResource.' % relator) return rel
[ "def", "make_relationship", "(", "self", ",", "relator", ",", "direction", "=", "RELATIONSHIP_DIRECTIONS", ".", "BIDIRECTIONAL", ")", ":", "if", "IEntity", ".", "providedBy", "(", "relator", ")", ":", "# pylint:disable=E1101", "rel", "=", "DomainRelationship", "(", "relator", ",", "self", ",", "direction", "=", "direction", ")", "elif", "IResource", ".", "providedBy", "(", "relator", ")", ":", "# pylint:disable=E1101", "rel", "=", "ResourceRelationship", "(", "relator", ",", "self", ",", "direction", "=", "direction", ")", "else", ":", "raise", "ValueError", "(", "'Invalid relator argument \"%s\" for '", "'relationship; must provide IEntity or '", "'IResource.'", "%", "relator", ")", "return", "rel" ]
Create a relationship object for this attribute from the given relator and relationship direction.
[ "Create", "a", "relationship", "object", "for", "this", "attribute", "from", "the", "given", "relator", "and", "relationship", "direction", "." ]
python
train
openstack/quark
quark/plugin_modules/floating_ips.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/plugin_modules/floating_ips.py#L139-L170
def _create_flip(context, flip, port_fixed_ips): """Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None """ if port_fixed_ips: context.session.begin() try: ports = [val['port'] for val in port_fixed_ips.values()] flip = db_api.port_associate_ip(context, ports, flip, port_fixed_ips.keys()) for port_id in port_fixed_ips: fixed_ip = port_fixed_ips[port_id]['fixed_ip'] flip = db_api.floating_ip_associate_fixed_ip(context, flip, fixed_ip) flip_driver = registry.DRIVER_REGISTRY.get_driver() flip_driver.register_floating_ip(flip, port_fixed_ips) context.session.commit() except Exception: context.session.rollback() raise # alexm: Notify from this method for consistency with _delete_flip billing.notify(context, billing.IP_ASSOC, flip)
[ "def", "_create_flip", "(", "context", ",", "flip", ",", "port_fixed_ips", ")", ":", "if", "port_fixed_ips", ":", "context", ".", "session", ".", "begin", "(", ")", "try", ":", "ports", "=", "[", "val", "[", "'port'", "]", "for", "val", "in", "port_fixed_ips", ".", "values", "(", ")", "]", "flip", "=", "db_api", ".", "port_associate_ip", "(", "context", ",", "ports", ",", "flip", ",", "port_fixed_ips", ".", "keys", "(", ")", ")", "for", "port_id", "in", "port_fixed_ips", ":", "fixed_ip", "=", "port_fixed_ips", "[", "port_id", "]", "[", "'fixed_ip'", "]", "flip", "=", "db_api", ".", "floating_ip_associate_fixed_ip", "(", "context", ",", "flip", ",", "fixed_ip", ")", "flip_driver", "=", "registry", ".", "DRIVER_REGISTRY", ".", "get_driver", "(", ")", "flip_driver", ".", "register_floating_ip", "(", "flip", ",", "port_fixed_ips", ")", "context", ".", "session", ".", "commit", "(", ")", "except", "Exception", ":", "context", ".", "session", ".", "rollback", "(", ")", "raise", "# alexm: Notify from this method for consistency with _delete_flip", "billing", ".", "notify", "(", "context", ",", "billing", ".", "IP_ASSOC", ",", "flip", ")" ]
Associates the flip with ports and creates it with the flip driver :param context: neutron api request context. :param flip: quark.db.models.IPAddress object representing a floating IP :param port_fixed_ips: dictionary of the structure: {"<id of port>": {"port": <quark.db.models.Port>, "fixed_ip": "<fixed ip address>"}} :return: None
[ "Associates", "the", "flip", "with", "ports", "and", "creates", "it", "with", "the", "flip", "driver" ]
python
valid
materialsproject/pymatgen
pymatgen/core/lattice.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/lattice.py#L1274-L1330
def get_integer_index( miller_index: bool, round_dp: int = 4, verbose: bool = True ) -> Tuple[int, int, int]: """ Attempt to convert a vector of floats to whole numbers. Args: miller_index (list of float): A list miller indexes. round_dp (int, optional): The number of decimal places to round the miller index to. verbose (bool, optional): Whether to print warnings. Returns: (tuple): The Miller index. """ miller_index = np.asarray(miller_index) # deal with the case we have small irregular floats # that are all equal or factors of each other miller_index /= min([m for m in miller_index if m != 0]) miller_index /= np.max(np.abs(miller_index)) # deal with the case we have nice fractions md = [Fraction(n).limit_denominator(12).denominator for n in miller_index] miller_index *= reduce(lambda x, y: x * y, md) int_miller_index = np.int_(np.round(miller_index, 1)) miller_index /= np.abs(reduce(gcd, int_miller_index)) # round to a reasonable precision miller_index = np.array([round(h, round_dp) for h in miller_index]) # need to recalculate this after rounding as values may have changed int_miller_index = np.int_(np.round(miller_index, 1)) if np.any(np.abs(miller_index - int_miller_index) > 1e-6) and verbose: warnings.warn("Non-integer encountered in Miller index") else: miller_index = int_miller_index # minimise the number of negative indexes miller_index += 0 # converts -0 to 0 def n_minus(index): return len([h for h in index if h < 0]) if n_minus(miller_index) > n_minus(miller_index * -1): miller_index *= -1 # if only one index is negative, make sure it is the smallest # e.g. (-2 1 0) -> (2 -1 0) if ( sum(miller_index != 0) == 2 and n_minus(miller_index) == 1 and abs(min(miller_index)) > max(miller_index) ): miller_index *= -1 return tuple(miller_index)
[ "def", "get_integer_index", "(", "miller_index", ":", "bool", ",", "round_dp", ":", "int", "=", "4", ",", "verbose", ":", "bool", "=", "True", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "miller_index", "=", "np", ".", "asarray", "(", "miller_index", ")", "# deal with the case we have small irregular floats", "# that are all equal or factors of each other", "miller_index", "/=", "min", "(", "[", "m", "for", "m", "in", "miller_index", "if", "m", "!=", "0", "]", ")", "miller_index", "/=", "np", ".", "max", "(", "np", ".", "abs", "(", "miller_index", ")", ")", "# deal with the case we have nice fractions", "md", "=", "[", "Fraction", "(", "n", ")", ".", "limit_denominator", "(", "12", ")", ".", "denominator", "for", "n", "in", "miller_index", "]", "miller_index", "*=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "*", "y", ",", "md", ")", "int_miller_index", "=", "np", ".", "int_", "(", "np", ".", "round", "(", "miller_index", ",", "1", ")", ")", "miller_index", "/=", "np", ".", "abs", "(", "reduce", "(", "gcd", ",", "int_miller_index", ")", ")", "# round to a reasonable precision", "miller_index", "=", "np", ".", "array", "(", "[", "round", "(", "h", ",", "round_dp", ")", "for", "h", "in", "miller_index", "]", ")", "# need to recalculate this after rounding as values may have changed", "int_miller_index", "=", "np", ".", "int_", "(", "np", ".", "round", "(", "miller_index", ",", "1", ")", ")", "if", "np", ".", "any", "(", "np", ".", "abs", "(", "miller_index", "-", "int_miller_index", ")", ">", "1e-6", ")", "and", "verbose", ":", "warnings", ".", "warn", "(", "\"Non-integer encountered in Miller index\"", ")", "else", ":", "miller_index", "=", "int_miller_index", "# minimise the number of negative indexes", "miller_index", "+=", "0", "# converts -0 to 0", "def", "n_minus", "(", "index", ")", ":", "return", "len", "(", "[", "h", "for", "h", "in", "index", "if", "h", "<", "0", "]", ")", "if", "n_minus", "(", "miller_index", ")", ">", "n_minus", "(", "miller_index", "*", "-", "1", ")", ":", "miller_index", "*=", "-", "1", "# if only one index is negative, make sure it is the smallest", "# e.g. (-2 1 0) -> (2 -1 0)", "if", "(", "sum", "(", "miller_index", "!=", "0", ")", "==", "2", "and", "n_minus", "(", "miller_index", ")", "==", "1", "and", "abs", "(", "min", "(", "miller_index", ")", ")", ">", "max", "(", "miller_index", ")", ")", ":", "miller_index", "*=", "-", "1", "return", "tuple", "(", "miller_index", ")" ]
Attempt to convert a vector of floats to whole numbers. Args: miller_index (list of float): A list miller indexes. round_dp (int, optional): The number of decimal places to round the miller index to. verbose (bool, optional): Whether to print warnings. Returns: (tuple): The Miller index.
[ "Attempt", "to", "convert", "a", "vector", "of", "floats", "to", "whole", "numbers", "." ]
python
train
tanghaibao/jcvi
jcvi/annotation/reformat.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/reformat.py#L922-L931
def parse_prefix(identifier): """ Parse identifier such as a|c|le|d|li|re|or|AT4G00480.1 and return tuple of prefix string (separated at '|') and suffix (AGI identifier) """ pf, id = (), identifier if "|" in identifier: pf, id = tuple(identifier.split('|')[:-1]), identifier.split('|')[-1] return pf, id
[ "def", "parse_prefix", "(", "identifier", ")", ":", "pf", ",", "id", "=", "(", ")", ",", "identifier", "if", "\"|\"", "in", "identifier", ":", "pf", ",", "id", "=", "tuple", "(", "identifier", ".", "split", "(", "'|'", ")", "[", ":", "-", "1", "]", ")", ",", "identifier", ".", "split", "(", "'|'", ")", "[", "-", "1", "]", "return", "pf", ",", "id" ]
Parse identifier such as a|c|le|d|li|re|or|AT4G00480.1 and return tuple of prefix string (separated at '|') and suffix (AGI identifier)
[ "Parse", "identifier", "such", "as", "a|c|le|d|li|re|or|AT4G00480", ".", "1", "and", "return", "tuple", "of", "prefix", "string", "(", "separated", "at", "|", ")", "and", "suffix", "(", "AGI", "identifier", ")" ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/did.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/did.py#L576-L605
async def abbreviate_verkey(did: str, full_verkey: str) -> str: """ Retrieves abbreviated verkey if it is possible otherwise return full verkey. :param did: The DID. :param full_verkey: The DIDs verification key, :return: metadata: Either abbreviated or full verkey. """ logger = logging.getLogger(__name__) logger.debug("abbreviate_verkey: >>> did: %r, full_verkey: %r", did, full_verkey) if not hasattr(abbreviate_verkey, "cb"): logger.debug("abbreviate_verkey: Creating callback") abbreviate_verkey.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_did = c_char_p(did.encode('utf-8')) c_full_verkey = c_char_p(full_verkey.encode('utf-8')) metadata = await do_call('indy_abbreviate_verkey', c_did, c_full_verkey, abbreviate_verkey.cb) res = metadata.decode() logger.debug("abbreviate_verkey: <<< res: %r", res) return res
[ "async", "def", "abbreviate_verkey", "(", "did", ":", "str", ",", "full_verkey", ":", "str", ")", "->", "str", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"abbreviate_verkey: >>> did: %r, full_verkey: %r\"", ",", "did", ",", "full_verkey", ")", "if", "not", "hasattr", "(", "abbreviate_verkey", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"abbreviate_verkey: Creating callback\"", ")", "abbreviate_verkey", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ")", ")", "c_did", "=", "c_char_p", "(", "did", ".", "encode", "(", "'utf-8'", ")", ")", "c_full_verkey", "=", "c_char_p", "(", "full_verkey", ".", "encode", "(", "'utf-8'", ")", ")", "metadata", "=", "await", "do_call", "(", "'indy_abbreviate_verkey'", ",", "c_did", ",", "c_full_verkey", ",", "abbreviate_verkey", ".", "cb", ")", "res", "=", "metadata", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"abbreviate_verkey: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Retrieves abbreviated verkey if it is possible otherwise return full verkey. :param did: The DID. :param full_verkey: The DIDs verification key, :return: metadata: Either abbreviated or full verkey.
[ "Retrieves", "abbreviated", "verkey", "if", "it", "is", "possible", "otherwise", "return", "full", "verkey", "." ]
python
train
elifesciences/elife-tools
elifetools/json_rewrite.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/json_rewrite.py#L54-L60
def rewrite_references_json(json_content, rewrite_json): """ general purpose references json rewriting by matching the id value """ for ref in json_content: if ref.get("id") and ref.get("id") in rewrite_json: for key, value in iteritems(rewrite_json.get(ref.get("id"))): ref[key] = value return json_content
[ "def", "rewrite_references_json", "(", "json_content", ",", "rewrite_json", ")", ":", "for", "ref", "in", "json_content", ":", "if", "ref", ".", "get", "(", "\"id\"", ")", "and", "ref", ".", "get", "(", "\"id\"", ")", "in", "rewrite_json", ":", "for", "key", ",", "value", "in", "iteritems", "(", "rewrite_json", ".", "get", "(", "ref", ".", "get", "(", "\"id\"", ")", ")", ")", ":", "ref", "[", "key", "]", "=", "value", "return", "json_content" ]
general purpose references json rewriting by matching the id value
[ "general", "purpose", "references", "json", "rewriting", "by", "matching", "the", "id", "value" ]
python
train
fermiPy/fermipy
fermipy/castro.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/castro.py#L896-L968
def fit_spectrum(self, specFunc, initPars, freePars=None): """ Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum. """ if not isinstance(specFunc, SEDFunctor): specFunc = self.create_functor(specFunc, initPars, scale=specFunc.scale) if freePars is None: freePars = np.empty(len(initPars), dtype=bool) freePars.fill(True) initPars = np.array(initPars) freePars = np.array(freePars) def fToMin(x): xp = np.array(specFunc.params) xp[freePars] = x return self.__call__(specFunc(xp)) result = fmin(fToMin, initPars[freePars], disp=False, xtol=1e-6) out_pars = specFunc.params out_pars[freePars] = np.array(result) spec_vals = specFunc(out_pars) spec_npred = np.zeros(len(spec_vals)) if isinstance(specFunc, spectrum.SEDFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_flux elif isinstance(specFunc, spectrum.SEDEFluxFunctor): spec_npred = spec_vals * self.refSpec.ref_npred / self.refSpec.ref_eflux ts_spec = self.TS_spectrum(spec_vals) chi2_vals = self.chi2_vals(spec_vals) chi2_spec = np.sum(chi2_vals) pval_spec = stats.distributions.chi2.sf(chi2_spec, len(spec_vals)) return dict(params=out_pars, spec_vals=spec_vals, spec_npred=spec_npred, ts_spec=ts_spec, chi2_spec=chi2_spec, chi2_vals=chi2_vals, pval_spec=pval_spec)
[ "def", "fit_spectrum", "(", "self", ",", "specFunc", ",", "initPars", ",", "freePars", "=", "None", ")", ":", "if", "not", "isinstance", "(", "specFunc", ",", "SEDFunctor", ")", ":", "specFunc", "=", "self", ".", "create_functor", "(", "specFunc", ",", "initPars", ",", "scale", "=", "specFunc", ".", "scale", ")", "if", "freePars", "is", "None", ":", "freePars", "=", "np", ".", "empty", "(", "len", "(", "initPars", ")", ",", "dtype", "=", "bool", ")", "freePars", ".", "fill", "(", "True", ")", "initPars", "=", "np", ".", "array", "(", "initPars", ")", "freePars", "=", "np", ".", "array", "(", "freePars", ")", "def", "fToMin", "(", "x", ")", ":", "xp", "=", "np", ".", "array", "(", "specFunc", ".", "params", ")", "xp", "[", "freePars", "]", "=", "x", "return", "self", ".", "__call__", "(", "specFunc", "(", "xp", ")", ")", "result", "=", "fmin", "(", "fToMin", ",", "initPars", "[", "freePars", "]", ",", "disp", "=", "False", ",", "xtol", "=", "1e-6", ")", "out_pars", "=", "specFunc", ".", "params", "out_pars", "[", "freePars", "]", "=", "np", ".", "array", "(", "result", ")", "spec_vals", "=", "specFunc", "(", "out_pars", ")", "spec_npred", "=", "np", ".", "zeros", "(", "len", "(", "spec_vals", ")", ")", "if", "isinstance", "(", "specFunc", ",", "spectrum", ".", "SEDFluxFunctor", ")", ":", "spec_npred", "=", "spec_vals", "*", "self", ".", "refSpec", ".", "ref_npred", "/", "self", ".", "refSpec", ".", "ref_flux", "elif", "isinstance", "(", "specFunc", ",", "spectrum", ".", "SEDEFluxFunctor", ")", ":", "spec_npred", "=", "spec_vals", "*", "self", ".", "refSpec", ".", "ref_npred", "/", "self", ".", "refSpec", ".", "ref_eflux", "ts_spec", "=", "self", ".", "TS_spectrum", "(", "spec_vals", ")", "chi2_vals", "=", "self", ".", "chi2_vals", "(", "spec_vals", ")", "chi2_spec", "=", "np", ".", "sum", "(", "chi2_vals", ")", "pval_spec", "=", "stats", ".", "distributions", ".", "chi2", ".", "sf", "(", "chi2_spec", ",", "len", "(", "spec_vals", ")", ")", "return", "dict", "(", "params", "=", "out_pars", ",", "spec_vals", "=", "spec_vals", ",", "spec_npred", "=", "spec_npred", ",", "ts_spec", "=", "ts_spec", ",", "chi2_spec", "=", "chi2_spec", ",", "chi2_vals", "=", "chi2_vals", ",", "pval_spec", "=", "pval_spec", ")" ]
Fit for the free parameters of a spectral function Parameters ---------- specFunc : `~fermipy.spectrum.SpectralFunction` The Spectral Function initPars : `~numpy.ndarray` The initial values of the parameters freePars : `~numpy.ndarray` Boolean array indicating which parameters should be free in the fit. Returns ------- params : `~numpy.ndarray` Best-fit parameters. spec_vals : `~numpy.ndarray` The values of the best-fit spectral model in each energy bin. ts_spec : float The TS of the best-fit spectrum chi2_vals : `~numpy.ndarray` Array of chi-squared values for each energy bin. chi2_spec : float Global chi-squared value for the sum of all energy bins. pval_spec : float p-value of chi-squared for the best-fit spectrum.
[ "Fit", "for", "the", "free", "parameters", "of", "a", "spectral", "function" ]
python
train
aio-libs/aioftp
aioftp/client.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/client.py#L812-L829
async def exists(self, path): """ :py:func:`asyncio.coroutine` Check path for existence. :param path: path to check :type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath` :rtype: :py:class:`bool` """ try: await self.stat(path) return True except errors.StatusCodeError as e: if e.received_codes[-1].matches("550"): return False raise
[ "async", "def", "exists", "(", "self", ",", "path", ")", ":", "try", ":", "await", "self", ".", "stat", "(", "path", ")", "return", "True", "except", "errors", ".", "StatusCodeError", "as", "e", ":", "if", "e", ".", "received_codes", "[", "-", "1", "]", ".", "matches", "(", "\"550\"", ")", ":", "return", "False", "raise" ]
:py:func:`asyncio.coroutine` Check path for existence. :param path: path to check :type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath` :rtype: :py:class:`bool`
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
intake/intake
intake/gui/catalog/gui.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/gui/catalog/gui.py#L119-L130
def callback(self, cats): """When a catalog is selected, enable widgets that depend on that condition and do done_callback""" enable = bool(cats) if not enable: # close search if it is visible self.search.visible = False enable_widget(self.search_widget, enable) enable_widget(self.remove_widget, enable) if self.done_callback: self.done_callback(cats)
[ "def", "callback", "(", "self", ",", "cats", ")", ":", "enable", "=", "bool", "(", "cats", ")", "if", "not", "enable", ":", "# close search if it is visible", "self", ".", "search", ".", "visible", "=", "False", "enable_widget", "(", "self", ".", "search_widget", ",", "enable", ")", "enable_widget", "(", "self", ".", "remove_widget", ",", "enable", ")", "if", "self", ".", "done_callback", ":", "self", ".", "done_callback", "(", "cats", ")" ]
When a catalog is selected, enable widgets that depend on that condition and do done_callback
[ "When", "a", "catalog", "is", "selected", "enable", "widgets", "that", "depend", "on", "that", "condition", "and", "do", "done_callback" ]
python
train
nicolargo/glances
glances/config.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/config.py#L297-L302
def get_int_value(self, section, option, default=0): """Get the int value of an option, if it exists.""" try: return self.parser.getint(section, option) except NoOptionError: return int(default)
[ "def", "get_int_value", "(", "self", ",", "section", ",", "option", ",", "default", "=", "0", ")", ":", "try", ":", "return", "self", ".", "parser", ".", "getint", "(", "section", ",", "option", ")", "except", "NoOptionError", ":", "return", "int", "(", "default", ")" ]
Get the int value of an option, if it exists.
[ "Get", "the", "int", "value", "of", "an", "option", "if", "it", "exists", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1241-L1247
def system_resolve_data_objects(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/resolveDataObjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/resolveDataObjects """ return DXHTTPRequest('/system/resolveDataObjects', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_resolve_data_objects", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/resolveDataObjects'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /system/resolveDataObjects API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/System-Methods#API-method:-/system/resolveDataObjects
[ "Invokes", "the", "/", "system", "/", "resolveDataObjects", "API", "method", "." ]
python
train
Clinical-Genomics/scout
scout/adapter/mongo/hgnc.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/adapter/mongo/hgnc.py#L497-L513
def to_hgnc(self, hgnc_alias, build='37'): """Check if a hgnc symbol is an alias Return the correct hgnc symbol, if not existing return None Args: hgnc_alias(str) Returns: hgnc_symbol(str) """ result = self.hgnc_genes(hgnc_symbol=hgnc_alias, build=build) if result: for gene in result: return gene['hgnc_symbol'] else: return None
[ "def", "to_hgnc", "(", "self", ",", "hgnc_alias", ",", "build", "=", "'37'", ")", ":", "result", "=", "self", ".", "hgnc_genes", "(", "hgnc_symbol", "=", "hgnc_alias", ",", "build", "=", "build", ")", "if", "result", ":", "for", "gene", "in", "result", ":", "return", "gene", "[", "'hgnc_symbol'", "]", "else", ":", "return", "None" ]
Check if a hgnc symbol is an alias Return the correct hgnc symbol, if not existing return None Args: hgnc_alias(str) Returns: hgnc_symbol(str)
[ "Check", "if", "a", "hgnc", "symbol", "is", "an", "alias" ]
python
test
wtolson/gnsq
gnsq/nsqd.py
https://github.com/wtolson/gnsq/blob/0fd02578b2c9c5fa30626d78579db2a46c10edac/gnsq/nsqd.py#L205-L218
def connect(self): """Initialize connection to the nsqd.""" if self.state == DISCONNECTED: raise errors.NSQException('connection already closed') if self.is_connected: return stream = Stream(self.address, self.port, self.timeout) stream.connect() self.stream = stream self.state = CONNECTED self.send(nsq.MAGIC_V2)
[ "def", "connect", "(", "self", ")", ":", "if", "self", ".", "state", "==", "DISCONNECTED", ":", "raise", "errors", ".", "NSQException", "(", "'connection already closed'", ")", "if", "self", ".", "is_connected", ":", "return", "stream", "=", "Stream", "(", "self", ".", "address", ",", "self", ".", "port", ",", "self", ".", "timeout", ")", "stream", ".", "connect", "(", ")", "self", ".", "stream", "=", "stream", "self", ".", "state", "=", "CONNECTED", "self", ".", "send", "(", "nsq", ".", "MAGIC_V2", ")" ]
Initialize connection to the nsqd.
[ "Initialize", "connection", "to", "the", "nsqd", "." ]
python
train
Contraz/demosys-py
demosys/conf/__init__.py
https://github.com/Contraz/demosys-py/blob/6466128a3029c4d09631420ccce73024025bd5b6/demosys/conf/__init__.py#L69-L73
def add_texture_dir(self, directory): """Hack in texture directory""" dirs = list(self.TEXTURE_DIRS) dirs.append(directory) self.TEXTURE_DIRS = dirs
[ "def", "add_texture_dir", "(", "self", ",", "directory", ")", ":", "dirs", "=", "list", "(", "self", ".", "TEXTURE_DIRS", ")", "dirs", ".", "append", "(", "directory", ")", "self", ".", "TEXTURE_DIRS", "=", "dirs" ]
Hack in texture directory
[ "Hack", "in", "texture", "directory" ]
python
valid
googleapis/oauth2client
oauth2client/crypt.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/crypt.py#L154-L204
def _verify_time_range(payload_dict): """Verifies the issued at and expiration from a JWT payload. Makes sure the current time (in UTC) falls between the issued at and expiration for the JWT (with some skew allowed for via ``CLOCK_SKEW_SECS``). Args: payload_dict: dict, A dictionary containing a JWT payload. Raises: AppIdentityError: If there is no ``'iat'`` field in the payload dictionary. AppIdentityError: If there is no ``'exp'`` field in the payload dictionary. AppIdentityError: If the JWT expiration is too far in the future (i.e. if the expiration would imply a token lifetime longer than what is allowed.) AppIdentityError: If the token appears to have been issued in the future (up to clock skew). AppIdentityError: If the token appears to have expired in the past (up to clock skew). """ # Get the current time to use throughout. now = int(time.time()) # Make sure issued at and expiration are in the payload. issued_at = payload_dict.get('iat') if issued_at is None: raise AppIdentityError( 'No iat field in token: {0}'.format(payload_dict)) expiration = payload_dict.get('exp') if expiration is None: raise AppIdentityError( 'No exp field in token: {0}'.format(payload_dict)) # Make sure the expiration gives an acceptable token lifetime. if expiration >= now + MAX_TOKEN_LIFETIME_SECS: raise AppIdentityError( 'exp field too far in future: {0}'.format(payload_dict)) # Make sure (up to clock skew) that the token wasn't issued in the future. earliest = issued_at - CLOCK_SKEW_SECS if now < earliest: raise AppIdentityError('Token used too early, {0} < {1}: {2}'.format( now, earliest, payload_dict)) # Make sure (up to clock skew) that the token isn't already expired. latest = expiration + CLOCK_SKEW_SECS if now > latest: raise AppIdentityError('Token used too late, {0} > {1}: {2}'.format( now, latest, payload_dict))
[ "def", "_verify_time_range", "(", "payload_dict", ")", ":", "# Get the current time to use throughout.", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "# Make sure issued at and expiration are in the payload.", "issued_at", "=", "payload_dict", ".", "get", "(", "'iat'", ")", "if", "issued_at", "is", "None", ":", "raise", "AppIdentityError", "(", "'No iat field in token: {0}'", ".", "format", "(", "payload_dict", ")", ")", "expiration", "=", "payload_dict", ".", "get", "(", "'exp'", ")", "if", "expiration", "is", "None", ":", "raise", "AppIdentityError", "(", "'No exp field in token: {0}'", ".", "format", "(", "payload_dict", ")", ")", "# Make sure the expiration gives an acceptable token lifetime.", "if", "expiration", ">=", "now", "+", "MAX_TOKEN_LIFETIME_SECS", ":", "raise", "AppIdentityError", "(", "'exp field too far in future: {0}'", ".", "format", "(", "payload_dict", ")", ")", "# Make sure (up to clock skew) that the token wasn't issued in the future.", "earliest", "=", "issued_at", "-", "CLOCK_SKEW_SECS", "if", "now", "<", "earliest", ":", "raise", "AppIdentityError", "(", "'Token used too early, {0} < {1}: {2}'", ".", "format", "(", "now", ",", "earliest", ",", "payload_dict", ")", ")", "# Make sure (up to clock skew) that the token isn't already expired.", "latest", "=", "expiration", "+", "CLOCK_SKEW_SECS", "if", "now", ">", "latest", ":", "raise", "AppIdentityError", "(", "'Token used too late, {0} > {1}: {2}'", ".", "format", "(", "now", ",", "latest", ",", "payload_dict", ")", ")" ]
Verifies the issued at and expiration from a JWT payload. Makes sure the current time (in UTC) falls between the issued at and expiration for the JWT (with some skew allowed for via ``CLOCK_SKEW_SECS``). Args: payload_dict: dict, A dictionary containing a JWT payload. Raises: AppIdentityError: If there is no ``'iat'`` field in the payload dictionary. AppIdentityError: If there is no ``'exp'`` field in the payload dictionary. AppIdentityError: If the JWT expiration is too far in the future (i.e. if the expiration would imply a token lifetime longer than what is allowed.) AppIdentityError: If the token appears to have been issued in the future (up to clock skew). AppIdentityError: If the token appears to have expired in the past (up to clock skew).
[ "Verifies", "the", "issued", "at", "and", "expiration", "from", "a", "JWT", "payload", "." ]
python
valid
spyder-ide/spyder
spyder/plugins/help/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/help/plugin.py#L218-L225
def register_plugin(self): """Register plugin in Spyder's main window""" self.focus_changed.connect(self.main.plugin_focus_changed) self.main.add_dockwidget(self) self.main.console.set_help(self) self.internal_shell = self.main.console.shell self.console = self.main.console
[ "def", "register_plugin", "(", "self", ")", ":", "self", ".", "focus_changed", ".", "connect", "(", "self", ".", "main", ".", "plugin_focus_changed", ")", "self", ".", "main", ".", "add_dockwidget", "(", "self", ")", "self", ".", "main", ".", "console", ".", "set_help", "(", "self", ")", "self", ".", "internal_shell", "=", "self", ".", "main", ".", "console", ".", "shell", "self", ".", "console", "=", "self", ".", "main", ".", "console" ]
Register plugin in Spyder's main window
[ "Register", "plugin", "in", "Spyder", "s", "main", "window" ]
python
train
log2timeline/plaso
plaso/output/tln.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/tln.py#L173-L190
def _FormatNotes(self, event): """Formats the notes. Args: event (EventObject): event. Returns: str: formatted notes field. """ inode = event.inode if inode is None: inode = '-' notes = getattr(event, 'notes', '') if not notes: display_name = getattr(event, 'display_name', '') notes = 'File: {0:s} inode: {1!s}'.format(display_name, inode) return self._SanitizeField(notes)
[ "def", "_FormatNotes", "(", "self", ",", "event", ")", ":", "inode", "=", "event", ".", "inode", "if", "inode", "is", "None", ":", "inode", "=", "'-'", "notes", "=", "getattr", "(", "event", ",", "'notes'", ",", "''", ")", "if", "not", "notes", ":", "display_name", "=", "getattr", "(", "event", ",", "'display_name'", ",", "''", ")", "notes", "=", "'File: {0:s} inode: {1!s}'", ".", "format", "(", "display_name", ",", "inode", ")", "return", "self", ".", "_SanitizeField", "(", "notes", ")" ]
Formats the notes. Args: event (EventObject): event. Returns: str: formatted notes field.
[ "Formats", "the", "notes", "." ]
python
train
chrisspen/burlap
burlap/deploy.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/deploy.py#L32-L43
def get_component_order(component_names): """ Given a list of components, re-orders them according to inter-component dependencies so the most depended upon are first. """ assert isinstance(component_names, (tuple, list)) component_dependences = {} for _name in component_names: deps = set(manifest_deployers_befores.get(_name, [])) deps = deps.intersection(component_names) component_dependences[_name] = deps component_order = list(topological_sort(component_dependences.items())) return component_order
[ "def", "get_component_order", "(", "component_names", ")", ":", "assert", "isinstance", "(", "component_names", ",", "(", "tuple", ",", "list", ")", ")", "component_dependences", "=", "{", "}", "for", "_name", "in", "component_names", ":", "deps", "=", "set", "(", "manifest_deployers_befores", ".", "get", "(", "_name", ",", "[", "]", ")", ")", "deps", "=", "deps", ".", "intersection", "(", "component_names", ")", "component_dependences", "[", "_name", "]", "=", "deps", "component_order", "=", "list", "(", "topological_sort", "(", "component_dependences", ".", "items", "(", ")", ")", ")", "return", "component_order" ]
Given a list of components, re-orders them according to inter-component dependencies so the most depended upon are first.
[ "Given", "a", "list", "of", "components", "re", "-", "orders", "them", "according", "to", "inter", "-", "component", "dependencies", "so", "the", "most", "depended", "upon", "are", "first", "." ]
python
valid
Jaymon/prom
prom/interface/sqlite.py
https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/interface/sqlite.py#L553-L574
def _normalize_sort_SQL(self, field_name, field_vals, sort_dir_str): """ allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by """ fvi = None if sort_dir_str == 'ASC': fvi = (t for t in enumerate(field_vals)) else: fvi = (t for t in enumerate(reversed(field_vals))) query_sort_str = [' CASE {}'.format(self._normalize_name(field_name))] query_args = [] for i, v in fvi: query_sort_str.append(' WHEN {} THEN {}'.format(self.val_placeholder, i)) query_args.append(v) query_sort_str.append(' END') query_sort_str = "\n".join(query_sort_str) return query_sort_str, query_args
[ "def", "_normalize_sort_SQL", "(", "self", ",", "field_name", ",", "field_vals", ",", "sort_dir_str", ")", ":", "fvi", "=", "None", "if", "sort_dir_str", "==", "'ASC'", ":", "fvi", "=", "(", "t", "for", "t", "in", "enumerate", "(", "field_vals", ")", ")", "else", ":", "fvi", "=", "(", "t", "for", "t", "in", "enumerate", "(", "reversed", "(", "field_vals", ")", ")", ")", "query_sort_str", "=", "[", "' CASE {}'", ".", "format", "(", "self", ".", "_normalize_name", "(", "field_name", ")", ")", "]", "query_args", "=", "[", "]", "for", "i", ",", "v", "in", "fvi", ":", "query_sort_str", ".", "append", "(", "' WHEN {} THEN {}'", ".", "format", "(", "self", ".", "val_placeholder", ",", "i", ")", ")", "query_args", ".", "append", "(", "v", ")", "query_sort_str", ".", "append", "(", "' END'", ")", "query_sort_str", "=", "\"\\n\"", ".", "join", "(", "query_sort_str", ")", "return", "query_sort_str", ",", "query_args" ]
allow sorting by a set of values http://stackoverflow.com/questions/3303851/sqlite-and-custom-order-by
[ "allow", "sorting", "by", "a", "set", "of", "values" ]
python
train
arlyon/hyperion
hyperion/api/util.py
https://github.com/arlyon/hyperion/blob/d8de0388ba98b85ce472e0f49ac18fecb14d3343/hyperion/api/util.py#L16-L35
async def normalize_postcode_middleware(request, handler): """ If there is a postcode in the url it validates and normalizes it. """ postcode: Optional[str] = request.match_info.get('postcode', None) if postcode is None or postcode == "random": return await handler(request) elif not is_uk_postcode(postcode): raise web.HTTPNotFound(text="Invalid Postcode") postcode_processed = postcode.upper().replace(" ", "") if postcode_processed == postcode: return await handler(request) else: url_name = request.match_info.route.name url = request.app.router[url_name] params = dict(request.match_info) params['postcode'] = postcode_processed raise web.HTTPMovedPermanently(str(url.url_for(**params)))
[ "async", "def", "normalize_postcode_middleware", "(", "request", ",", "handler", ")", ":", "postcode", ":", "Optional", "[", "str", "]", "=", "request", ".", "match_info", ".", "get", "(", "'postcode'", ",", "None", ")", "if", "postcode", "is", "None", "or", "postcode", "==", "\"random\"", ":", "return", "await", "handler", "(", "request", ")", "elif", "not", "is_uk_postcode", "(", "postcode", ")", ":", "raise", "web", ".", "HTTPNotFound", "(", "text", "=", "\"Invalid Postcode\"", ")", "postcode_processed", "=", "postcode", ".", "upper", "(", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "postcode_processed", "==", "postcode", ":", "return", "await", "handler", "(", "request", ")", "else", ":", "url_name", "=", "request", ".", "match_info", ".", "route", ".", "name", "url", "=", "request", ".", "app", ".", "router", "[", "url_name", "]", "params", "=", "dict", "(", "request", ".", "match_info", ")", "params", "[", "'postcode'", "]", "=", "postcode_processed", "raise", "web", ".", "HTTPMovedPermanently", "(", "str", "(", "url", ".", "url_for", "(", "*", "*", "params", ")", ")", ")" ]
If there is a postcode in the url it validates and normalizes it.
[ "If", "there", "is", "a", "postcode", "in", "the", "url", "it", "validates", "and", "normalizes", "it", "." ]
python
test
facelessuser/backrefs
tools/unipropgen.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/tools/unipropgen.py#L220-L266
def gen_ccc(output, ascii_props=False, append=False, prefix=""): """Generate `canonical combining class` property.""" obj = {} with codecs.open(os.path.join(HOME, 'unicodedata', UNIVERSION, 'DerivedCombiningClass.txt'), 'r', 'utf-8') as uf: for line in uf: if not line.startswith('#'): data = line.split('#')[0].split(';') if len(data) < 2: continue span = create_span([int(i, 16) for i in data[0].strip().split('..')], is_bytes=ascii_props) if span is None: continue name = format_name(data[1]) if name not in obj: obj[name] = [] obj[name].extend(span) for x in range(0, 256): key = str(x) if key not in obj: obj[key] = [] for name in list(obj.keys()): s = set(obj[name]) obj[name] = sorted(s) not_explicitly_defined(obj, '0', is_bytes=ascii_props) # Convert characters values to ranges char2range(obj, is_bytes=ascii_props) with codecs.open(output, 'a' if append else 'w', 'utf-8') as f: if not append: f.write(HEADER) # Write out the Unicode properties f.write('%s_canonical_combining_class = {\n' % prefix) count = len(obj) - 1 i = 0 for k1, v1 in sorted(obj.items()): f.write(' "%s": "%s"' % (k1, v1)) if i == count: f.write('\n}\n') else: f.write(',\n') i += 1
[ "def", "gen_ccc", "(", "output", ",", "ascii_props", "=", "False", ",", "append", "=", "False", ",", "prefix", "=", "\"\"", ")", ":", "obj", "=", "{", "}", "with", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "HOME", ",", "'unicodedata'", ",", "UNIVERSION", ",", "'DerivedCombiningClass.txt'", ")", ",", "'r'", ",", "'utf-8'", ")", "as", "uf", ":", "for", "line", "in", "uf", ":", "if", "not", "line", ".", "startswith", "(", "'#'", ")", ":", "data", "=", "line", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "split", "(", "';'", ")", "if", "len", "(", "data", ")", "<", "2", ":", "continue", "span", "=", "create_span", "(", "[", "int", "(", "i", ",", "16", ")", "for", "i", "in", "data", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "'..'", ")", "]", ",", "is_bytes", "=", "ascii_props", ")", "if", "span", "is", "None", ":", "continue", "name", "=", "format_name", "(", "data", "[", "1", "]", ")", "if", "name", "not", "in", "obj", ":", "obj", "[", "name", "]", "=", "[", "]", "obj", "[", "name", "]", ".", "extend", "(", "span", ")", "for", "x", "in", "range", "(", "0", ",", "256", ")", ":", "key", "=", "str", "(", "x", ")", "if", "key", "not", "in", "obj", ":", "obj", "[", "key", "]", "=", "[", "]", "for", "name", "in", "list", "(", "obj", ".", "keys", "(", ")", ")", ":", "s", "=", "set", "(", "obj", "[", "name", "]", ")", "obj", "[", "name", "]", "=", "sorted", "(", "s", ")", "not_explicitly_defined", "(", "obj", ",", "'0'", ",", "is_bytes", "=", "ascii_props", ")", "# Convert characters values to ranges", "char2range", "(", "obj", ",", "is_bytes", "=", "ascii_props", ")", "with", "codecs", ".", "open", "(", "output", ",", "'a'", "if", "append", "else", "'w'", ",", "'utf-8'", ")", "as", "f", ":", "if", "not", "append", ":", "f", ".", "write", "(", "HEADER", ")", "# Write out the Unicode properties", "f", ".", "write", "(", "'%s_canonical_combining_class = {\\n'", "%", "prefix", ")", "count", "=", "len", "(", "obj", ")", "-", "1", "i", "=", "0", "for", "k1", ",", "v1", "in", "sorted", "(", "obj", ".", "items", "(", ")", ")", ":", "f", ".", "write", "(", "' \"%s\": \"%s\"'", "%", "(", "k1", ",", "v1", ")", ")", "if", "i", "==", "count", ":", "f", ".", "write", "(", "'\\n}\\n'", ")", "else", ":", "f", ".", "write", "(", "',\\n'", ")", "i", "+=", "1" ]
Generate `canonical combining class` property.
[ "Generate", "canonical", "combining", "class", "property", "." ]
python
train
drkjam/pydba
pydba/postgres.py
https://github.com/drkjam/pydba/blob/986c4b1315d6b128947c3bc3494513d8e5380ff0/pydba/postgres.py#L276-L291
def shell(self, expect=pexpect): """ Connects the database client shell to the database. Parameters ---------- expect_module: str the database to which backup will be restored. """ dsn = self.connection_dsn() log.debug('connection string: %s' % dsn) child = expect.spawn('psql "%s"' % dsn) if self._connect_args['password'] is not None: child.expect('Password: ') child.sendline(self._connect_args['password']) child.interact()
[ "def", "shell", "(", "self", ",", "expect", "=", "pexpect", ")", ":", "dsn", "=", "self", ".", "connection_dsn", "(", ")", "log", ".", "debug", "(", "'connection string: %s'", "%", "dsn", ")", "child", "=", "expect", ".", "spawn", "(", "'psql \"%s\"'", "%", "dsn", ")", "if", "self", ".", "_connect_args", "[", "'password'", "]", "is", "not", "None", ":", "child", ".", "expect", "(", "'Password: '", ")", "child", ".", "sendline", "(", "self", ".", "_connect_args", "[", "'password'", "]", ")", "child", ".", "interact", "(", ")" ]
Connects the database client shell to the database. Parameters ---------- expect_module: str the database to which backup will be restored.
[ "Connects", "the", "database", "client", "shell", "to", "the", "database", "." ]
python
valid
twilio/twilio-python
twilio/jwt/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/jwt/__init__.py#L131-L152
def from_jwt(cls, jwt, key=''): """ Decode a JWT string into a Jwt object :param str jwt: JWT string :param Optional[str] key: key used to verify JWT signature, if not provided then validation is skipped. :raises JwtDecodeError if decoding JWT fails for any reason. :return: A DecodedJwt object containing the jwt information. """ verify = True if key else False try: payload = jwt_lib.decode(bytes(jwt), key, options={ 'verify_signature': verify, 'verify_exp': True, 'verify_nbf': True, }) headers = jwt_lib.get_unverified_header(jwt) except Exception as e: raise JwtDecodeError(getattr(e, 'message', str(e))) return cls._from_jwt(headers, payload, key)
[ "def", "from_jwt", "(", "cls", ",", "jwt", ",", "key", "=", "''", ")", ":", "verify", "=", "True", "if", "key", "else", "False", "try", ":", "payload", "=", "jwt_lib", ".", "decode", "(", "bytes", "(", "jwt", ")", ",", "key", ",", "options", "=", "{", "'verify_signature'", ":", "verify", ",", "'verify_exp'", ":", "True", ",", "'verify_nbf'", ":", "True", ",", "}", ")", "headers", "=", "jwt_lib", ".", "get_unverified_header", "(", "jwt", ")", "except", "Exception", "as", "e", ":", "raise", "JwtDecodeError", "(", "getattr", "(", "e", ",", "'message'", ",", "str", "(", "e", ")", ")", ")", "return", "cls", ".", "_from_jwt", "(", "headers", ",", "payload", ",", "key", ")" ]
Decode a JWT string into a Jwt object :param str jwt: JWT string :param Optional[str] key: key used to verify JWT signature, if not provided then validation is skipped. :raises JwtDecodeError if decoding JWT fails for any reason. :return: A DecodedJwt object containing the jwt information.
[ "Decode", "a", "JWT", "string", "into", "a", "Jwt", "object", ":", "param", "str", "jwt", ":", "JWT", "string", ":", "param", "Optional", "[", "str", "]", "key", ":", "key", "used", "to", "verify", "JWT", "signature", "if", "not", "provided", "then", "validation", "is", "skipped", ".", ":", "raises", "JwtDecodeError", "if", "decoding", "JWT", "fails", "for", "any", "reason", ".", ":", "return", ":", "A", "DecodedJwt", "object", "containing", "the", "jwt", "information", "." ]
python
train